aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DMA-mapping.txt8
-rw-r--r--Documentation/accounting/delay-accounting.txt110
-rw-r--r--Documentation/accounting/getdelays.c396
-rw-r--r--Documentation/accounting/taskstats.txt181
-rw-r--r--Documentation/feature-removal-schedule.txt16
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/memory-barriers.txt5
-rw-r--r--Documentation/ramdisk.txt12
-rw-r--r--Documentation/x86_64/boot-options.txt7
-rw-r--r--MAINTAINERS20
-rw-r--r--Makefile2
-rw-r--r--arch/i386/kernel/crash.c2
-rw-r--r--arch/i386/kernel/process.c4
-rw-r--r--arch/i386/kernel/setup.c5
-rw-r--r--arch/i386/kernel/time.c2
-rw-r--r--arch/i386/kernel/traps.c61
-rw-r--r--arch/i386/lib/usercopy.c2
-rw-r--r--arch/s390/defconfig44
-rw-r--r--arch/s390/kernel/head31.S4
-rw-r--r--arch/s390/kernel/head64.S4
-rw-r--r--arch/s390/kernel/setup.c46
-rw-r--r--arch/sparc/kernel/devices.c25
-rw-r--r--arch/sparc/kernel/irq.c2
-rw-r--r--arch/sparc/kernel/of_device.c34
-rw-r--r--arch/sparc/kernel/prom.c9
-rw-r--r--arch/sparc/kernel/smp.c96
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c1
-rw-r--r--arch/sparc/kernel/sun4d_smp.c103
-rw-r--r--arch/sparc/kernel/sys_sparc.c18
-rw-r--r--arch/sparc/kernel/time.c74
-rw-r--r--arch/sparc/mm/io-unit.c1
-rw-r--r--arch/sparc/prom/tree.c18
-rw-r--r--arch/sparc64/defconfig8
-rw-r--r--arch/sparc64/kernel/devices.c3
-rw-r--r--arch/sparc64/kernel/head.S13
-rw-r--r--arch/sparc64/kernel/of_device.c34
-rw-r--r--arch/sparc64/kernel/pci_psycho.c6
-rw-r--r--arch/sparc64/kernel/prom.c12
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c1
-rw-r--r--arch/sparc64/kernel/sys_sparc.c18
-rw-r--r--arch/sparc64/kernel/time.c2
-rw-r--r--arch/sparc64/mm/fault.c3
-rw-r--r--arch/sparc64/prom/tree.c85
-rw-r--r--arch/um/Makefile-x86_641
-rw-r--r--arch/um/include/longjmp.h4
-rw-r--r--arch/um/include/os.h68
-rw-r--r--arch/um/kernel/syscall.c22
-rw-r--r--arch/um/kernel/vmlinux.lds.S2
-rw-r--r--arch/um/os-Linux/process.c8
-rw-r--r--arch/um/os-Linux/skas/process.c16
-rw-r--r--arch/um/os-Linux/uaccess.c3
-rw-r--r--arch/x86_64/defconfig9
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/kernel/pci-calgary.c77
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c5
-rw-r--r--arch/x86_64/kernel/tce.c4
-rw-r--r--arch/x86_64/kernel/time.c18
-rw-r--r--arch/x86_64/kernel/traps.c22
-rw-r--r--arch/x86_64/pci/k8-bus.c10
-rw-r--r--block/blktrace.c2
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/ioctl.c4
-rw-r--r--drivers/block/Kconfig10
-rw-r--r--drivers/block/cciss.c86
-rw-r--r--drivers/block/cpqarray.c2
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/bluetooth/hci_usb.c25
-rw-r--r--drivers/char/nsc_gpio.c6
-rw-r--r--drivers/char/pc8736x_gpio.c15
-rw-r--r--drivers/char/pcmcia/synclink_cs.c14
-rw-r--r--drivers/char/scx200_gpio.c72
-rw-r--r--drivers/char/synclink.c14
-rw-r--r--drivers/char/synclink_gt.c14
-rw-r--r--drivers/char/synclinkmp.c14
-rw-r--r--drivers/char/tpm/tpm.c1
-rw-r--r--drivers/char/tpm/tpm_tis.c77
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c3
-rw-r--r--drivers/crypto/padlock-aes.c9
-rw-r--r--drivers/dma/ioatdma.c2
-rw-r--r--drivers/fc4/fc.c4
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-dma.c2
-rw-r--r--drivers/ide/ide.c5
-rw-r--r--drivers/ide/pci/it821x.c11
-rw-r--r--drivers/infiniband/core/cm.c21
-rw-r--r--drivers/infiniband/core/cma.c22
-rw-r--r--drivers/infiniband/core/fmr_pool.c8
-rw-r--r--drivers/infiniband/core/mad.c22
-rw-r--r--drivers/infiniband/core/sa_query.c10
-rw-r--r--drivers/infiniband/core/user_mad.c87
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c42
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c76
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c8
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c3
-rw-r--r--drivers/leds/leds-net48xx.c7
-rw-r--r--drivers/message/fusion/Kconfig2
-rw-r--r--drivers/message/fusion/Makefile1
-rw-r--r--drivers/message/fusion/mptbase.c99
-rw-r--r--drivers/message/fusion/mptbase.h13
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptctl.h5
-rw-r--r--drivers/message/fusion/mptfc.c14
-rw-r--r--drivers/message/fusion/mptsas.c109
-rw-r--r--drivers/message/fusion/mptscsih.c118
-rw-r--r--drivers/message/fusion/mptspi.c10
-rw-r--r--drivers/net/dummy.c1
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_main.c52
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/skge.c5
-rw-r--r--drivers/net/sky2.c7
-rw-r--r--drivers/net/spider_net.c580
-rw-r--r--drivers/net/spider_net.h73
-rw-r--r--drivers/net/sunhme.c9
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/tg3.c116
-rw-r--r--drivers/net/via-velocity.c17
-rw-r--r--drivers/net/wan/c101.c4
-rw-r--r--drivers/net/wan/hdlc_ppp.c1
-rw-r--r--drivers/net/wan/hdlc_raw.c1
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c1
-rw-r--r--drivers/net/wan/n2.c3
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c2
-rw-r--r--drivers/net/wireless/orinoco.c4
-rw-r--r--drivers/net/wireless/zd1201.c2
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-isl1208.c591
-rw-r--r--drivers/s390/block/xpram.c17
-rw-r--r--drivers/s390/char/raw3270.c52
-rw-r--r--drivers/s390/char/tape_class.c10
-rw-r--r--drivers/s390/char/tape_core.c18
-rw-r--r--drivers/s390/cio/ccwgroup.c10
-rw-r--r--drivers/s390/cio/cmf.c1
-rw-r--r--drivers/s390/cio/device_fsm.c3
-rw-r--r--drivers/s390/net/ctcmain.c21
-rw-r--r--drivers/s390/net/qeth_main.c7
-rw-r--r--drivers/sbus/sbus.c2
-rw-r--r--drivers/scsi/53c7xx.c8
-rw-r--r--drivers/scsi/NCR53C9x.c18
-rw-r--r--drivers/scsi/NCR_D700.c14
-rw-r--r--drivers/scsi/aha152x.c43
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c21
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/ata_piix.c165
-rw-r--r--drivers/scsi/atari_NCR5380.c2
-rw-r--r--drivers/scsi/constants.c126
-rw-r--r--drivers/scsi/esp.c16
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c1
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libata-eh.c69
-rw-r--r--drivers/scsi/lpfc/lpfc.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/pluto.c2
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/sata_promise.c7
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_debug.c72
-rw-r--r--drivers/scsi/scsi_error.c210
-rw-r--r--drivers/scsi/scsi_ioctl.c5
-rw-r--r--drivers/scsi/scsi_lib.c88
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_transport_sas.c64
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/seagate.c2
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/scsi/sun3_NCR5380.c2
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/wd33c93.c2
-rw-r--r--drivers/serial/sunsab.c7
-rw-r--r--drivers/serial/sunsu.c5
-rw-r--r--drivers/serial/sunzilog.c125
-rw-r--r--drivers/video/Kconfig20
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/mbx/Makefile4
-rw-r--r--drivers/video/mbx/mbxdebugfs.c188
-rw-r--r--drivers/video/mbx/mbxfb.c683
-rw-r--r--drivers/video/mbx/reg_bits.h418
-rw-r--r--drivers/video/mbx/regs.h195
-rw-r--r--fs/char_dev.c22
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/namei.c33
-rw-r--r--fs/namei.c8
-rw-r--r--fs/proc/array.c6
-rw-r--r--fs/proc/base.c33
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/reiserfs/procfs.c25
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c7
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c19
-rw-r--r--fs/xfs/xfs_inode.c17
-rw-r--r--fs/xfs/xfs_log.c12
-rw-r--r--fs/xfs/xfs_vfsops.c2
-rw-r--r--include/asm-alpha/barrier.h3
-rw-r--r--include/asm-arm/system.h1
-rw-r--r--include/asm-arm26/system.h1
-rw-r--r--include/asm-cris/system.h1
-rw-r--r--include/asm-frv/system.h1
-rw-r--r--include/asm-generic/Kbuild.asm5
-rw-r--r--include/asm-h8300/system.h1
-rw-r--r--include/asm-i386/system.h2
-rw-r--r--include/asm-ia64/system.h3
-rw-r--r--include/asm-m32r/system.h1
-rw-r--r--include/asm-m68k/oplib.h5
-rw-r--r--include/asm-m68k/system.h1
-rw-r--r--include/asm-m68knommu/system.h1
-rw-r--r--include/asm-mips/system.h3
-rw-r--r--include/asm-parisc/system.h2
-rw-r--r--include/asm-powerpc/system.h1
-rw-r--r--include/asm-ppc/system.h1
-rw-r--r--include/asm-s390/system.h10
-rw-r--r--include/asm-s390/timex.h4
-rw-r--r--include/asm-sh/system.h1
-rw-r--r--include/asm-sh64/system.h1
-rw-r--r--include/asm-sparc/oplib.h5
-rw-r--r--include/asm-sparc/signal.h2
-rw-r--r--include/asm-sparc/system.h1
-rw-r--r--include/asm-sparc64/openprom.h2
-rw-r--r--include/asm-sparc64/oplib.h5
-rw-r--r--include/asm-sparc64/pgtable.h2
-rw-r--r--include/asm-sparc64/sfp-machine.h2
-rw-r--r--include/asm-sparc64/system.h2
-rw-r--r--include/asm-v850/system.h1
-rw-r--r--include/asm-x86_64/calgary.h5
-rw-r--r--include/asm-x86_64/page.h2
-rw-r--r--include/asm-x86_64/swiotlb.h2
-rw-r--r--include/asm-x86_64/system.h1
-rw-r--r--include/asm-xtensa/system.h1
-rw-r--r--include/linux/cpu.h6
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/delayacct.h119
-rw-r--r--include/linux/futex.h3
-rw-r--r--include/linux/i2c-id.h1
-rw-r--r--include/linux/ide.h1
-rw-r--r--include/linux/if_vlan.h5
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/module.h10
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/netfilter_bridge.h2
-rw-r--r--include/linux/nsc_gpio.h2
-rw-r--r--include/linux/sched.h56
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/taskstats.h137
-rw-r--r--include/linux/taskstats_kern.h89
-rw-r--r--include/linux/time.h12
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/net/genetlink.h20
-rw-r--r--include/net/netdma.h2
-rw-r--r--include/net/pkt_sched.h18
-rw-r--r--include/net/sctp/structs.h11
-rw-r--r--include/net/sctp/user.h9
-rw-r--r--include/rdma/ib_addr.h10
-rw-r--r--include/rdma/ib_fmr_pool.h2
-rw-r--r--include/rdma/ib_mad.h7
-rw-r--r--include/scsi/scsi_cmnd.h9
-rw-r--r--include/scsi/scsi_transport_sas.h7
-rw-r--r--include/video/mbxfb.h28
-rw-r--r--init/Kconfig24
-rw-r--r--init/main.c4
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/acct.c4
-rw-r--r--kernel/cpu.c75
-rw-r--r--kernel/cpuset.c24
-rw-r--r--kernel/delayacct.c178
-rw-r--r--kernel/exit.c10
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/futex.c121
-rw-r--r--kernel/futex_compat.c34
-rw-r--r--kernel/kallsyms.c4
-rw-r--r--kernel/kthread.c24
-rw-r--r--kernel/module.c11
-rw-r--r--kernel/rtmutex-tester.c1
-rw-r--r--kernel/sched.c84
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/taskstats.c568
-rw-r--r--kernel/timer.c20
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/idr.c16
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memory.c6
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/vmalloc.c7
-rw-r--r--net/8021q/vlan.c11
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/atm/br2684.c3
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/atm/lec.c3
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/atm/pppoatm.c3
-rw-r--r--net/atm/resources.c3
-rw-r--r--net/ax25/sysctl_net_ax25.c4
-rw-r--r--net/bluetooth/rfcomm/core.c19
-rw-r--r--net/bridge/br_ioctl.c7
-rw-r--r--net/bridge/br_netfilter.c5
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/skbuff.c91
-rw-r--r--net/core/user_dma.c1
-rw-r--r--net/dccp/feat.h2
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/decnet/dn_dev.c9
-rw-r--r--net/decnet/dn_fib.c3
-rw-r--r--net/decnet/dn_neigh.c3
-rw-r--r--net/decnet/dn_rules.c3
-rw-r--r--net/decnet/dn_table.c11
-rw-r--r--net/econet/af_econet.c3
-rw-r--r--net/ieee80211/Kconfig1
-rw-r--r--net/ieee80211/ieee80211_crypt.c3
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--net/ieee80211/ieee80211_wx.c7
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c28
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_io.c3
-rw-r--r--net/ipv4/ah4.c4
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/esp4.c4
-rw-r--r--net/ipv4/fib_hash.c6
-rw-r--r--net/ipv4/fib_rules.c3
-rw-r--r--net/ipv4/fib_semantics.c15
-rw-r--r--net/ipv4/igmp.c12
-rw-r--r--net/ipv4/inet_diag.c3
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/ip_options.c1
-rw-r--r--net/ipv4/ipcomp.c3
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/ipmr.c21
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c10
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c1
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/xfrm6_tunnel.c140
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/ircomm/ircomm_core.c4
-rw-r--r--net/irda/ircomm/ircomm_lmp.c4
-rw-r--r--net/irda/ircomm/ircomm_param.c2
-rw-r--r--net/irda/ircomm/ircomm_tty.c8
-rw-r--r--net/irda/irda_device.c4
-rw-r--r--net/irda/iriap.c9
-rw-r--r--net/irda/iriap_event.c2
-rw-r--r--net/irda/irias_object.c24
-rw-r--r--net/irda/irlan/irlan_common.c16
-rw-r--r--net/irda/irlan/irlan_provider.c2
-rw-r--r--net/irda/irlap.c8
-rw-r--r--net/irda/irlap_frame.c19
-rw-r--r--net/irda/irlmp.c11
-rw-r--r--net/irda/irnet/irnet_ppp.c3
-rw-r--r--net/irda/irttp.c20
-rw-r--r--net/lapb/lapb_iface.c4
-rw-r--r--net/llc/llc_core.c3
-rw-r--r--net/netfilter/Kconfig4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netfilter/nf_queue.c9
-rw-r--r--net/netfilter/xt_physdev.c15
-rw-r--r--net/netfilter/xt_pkttype.c12
-rw-r--r--net/netlink/af_netlink.c13
-rw-r--r--net/rxrpc/connection.c6
-rw-r--r--net/rxrpc/peer.c3
-rw-r--r--net/rxrpc/transport.c6
-rw-r--r--net/sched/act_api.c9
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c6
-rw-r--r--net/sched/cls_basic.c6
-rw-r--r--net/sched/cls_fw.c6
-rw-r--r--net/sched/cls_route.c9
-rw-r--r--net/sched/cls_rsvp.h9
-rw-r--r--net/sched/cls_tcindex.c12
-rw-r--r--net/sched/cls_u32.c15
-rw-r--r--net/sched/em_meta.c3
-rw-r--r--net/sched/ematch.c3
-rw-r--r--net/sched/estimator.c3
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sched/sch_gred.c3
-rw-r--r--net/sched/sch_hfsc.c3
-rw-r--r--net/sched/sch_htb.c7
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sctp/associola.c27
-rw-r--r--net/sctp/bind_addr.c8
-rw-r--r--net/sctp/endpointola.c11
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/outqueue.c9
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c14
-rw-r--r--net/sctp/sm_sideeffect.c12
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/socket.c74
-rw-r--r--net/sctp/transport.c9
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c3
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c3
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c3
-rw-r--r--net/sunrpc/clnt.c3
-rw-r--r--net/sunrpc/stats.c7
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svcsock.c3
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtsock.c6
-rw-r--r--net/tipc/bearer.c6
-rw-r--r--net/tipc/cluster.c8
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/link.c3
-rw-r--r--net/tipc/name_table.c16
-rw-r--r--net/tipc/net.c5
-rw-r--r--net/tipc/port.c5
-rw-r--r--net/tipc/ref.c2
-rw-r--r--net/tipc/subscr.c3
-rw-r--r--net/tipc/user_reg.c3
-rw-r--r--net/tipc/zone.c3
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--net/wanrouter/af_wanpipe.c9
-rw-r--r--net/wanrouter/wanmain.c9
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_state.c3
-rw-r--r--security/selinux/hooks.c14
462 files changed, 7814 insertions, 3090 deletions
diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt
index 7c717699032c..63392c9132b4 100644
--- a/Documentation/DMA-mapping.txt
+++ b/Documentation/DMA-mapping.txt
@@ -698,12 +698,12 @@ these interfaces. Remember that, as defined, consistent mappings are
698always going to be SAC addressable. 698always going to be SAC addressable.
699 699
700The first thing your driver needs to do is query the PCI platform 700The first thing your driver needs to do is query the PCI platform
701layer with your devices DAC addressing capabilities: 701layer if it is capable of handling your devices DAC addressing
702capabilities:
702 703
703 int pci_dac_set_dma_mask(struct pci_dev *pdev, u64 mask); 704 int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
704 705
705This routine behaves identically to pci_set_dma_mask. You may not 706You may not use the following interfaces if this routine fails.
706use the following interfaces if this routine fails.
707 707
708Next, DMA addresses using this API are kept track of using the 708Next, DMA addresses using this API are kept track of using the
709dma64_addr_t type. It is guaranteed to be big enough to hold any 709dma64_addr_t type. It is guaranteed to be big enough to hold any
diff --git a/Documentation/accounting/delay-accounting.txt b/Documentation/accounting/delay-accounting.txt
new file mode 100644
index 000000000000..be215e58423b
--- /dev/null
+++ b/Documentation/accounting/delay-accounting.txt
@@ -0,0 +1,110 @@
1Delay accounting
2----------------
3
4Tasks encounter delays in execution when they wait
5for some kernel resource to become available e.g. a
6runnable task may wait for a free CPU to run on.
7
8The per-task delay accounting functionality measures
9the delays experienced by a task while
10
11a) waiting for a CPU (while being runnable)
12b) completion of synchronous block I/O initiated by the task
13c) swapping in pages
14
15and makes these statistics available to userspace through
16the taskstats interface.
17
18Such delays provide feedback for setting a task's cpu priority,
19io priority and rss limit values appropriately. Long delays for
20important tasks could be a trigger for raising its corresponding priority.
21
22The functionality, through its use of the taskstats interface, also provides
23delay statistics aggregated for all tasks (or threads) belonging to a
24thread group (corresponding to a traditional Unix process). This is a commonly
25needed aggregation that is more efficiently done by the kernel.
26
27Userspace utilities, particularly resource management applications, can also
28aggregate delay statistics into arbitrary groups. To enable this, delay
29statistics of a task are available both during its lifetime as well as on its
30exit, ensuring continuous and complete monitoring can be done.
31
32
33Interface
34---------
35
36Delay accounting uses the taskstats interface which is described
37in detail in a separate document in this directory. Taskstats returns a
38generic data structure to userspace corresponding to per-pid and per-tgid
39statistics. The delay accounting functionality populates specific fields of
40this structure. See
41 include/linux/taskstats.h
42for a description of the fields pertaining to delay accounting.
43It will generally be in the form of counters returning the cumulative
44delay seen for cpu, sync block I/O, swapin etc.
45
46Taking the difference of two successive readings of a given
47counter (say cpu_delay_total) for a task will give the delay
48experienced by the task waiting for the corresponding resource
49in that interval.
50
51When a task exits, records containing the per-task statistics
52are sent to userspace without requiring a command. If it is the last exiting
53task of a thread group, the per-tgid statistics are also sent. More details
54are given in the taskstats interface description.
55
56The getdelays.c userspace utility in this directory allows simple commands to
57be run and the corresponding delay statistics to be displayed. It also serves
58as an example of using the taskstats interface.
59
60Usage
61-----
62
63Compile the kernel with
64 CONFIG_TASK_DELAY_ACCT=y
65 CONFIG_TASKSTATS=y
66
67Enable the accounting at boot time by adding
68the following to the kernel boot options
69 delayacct
70
71and after the system has booted up, use a utility
72similar to getdelays.c to access the delays
73seen by a given task or a task group (tgid).
74The utility also allows a given command to be
75executed and the corresponding delays to be
76seen.
77
78General format of the getdelays command
79
80getdelays [-t tgid] [-p pid] [-c cmd...]
81
82
83Get delays, since system boot, for pid 10
84# ./getdelays -p 10
85(output similar to next case)
86
87Get sum of delays, since system boot, for all pids with tgid 5
88# ./getdelays -t 5
89
90
91CPU count real total virtual total delay total
92 7876 92005750 100000000 24001500
93IO count delay total
94 0 0
95MEM count delay total
96 0 0
97
98Get delays seen in executing a given simple command
99# ./getdelays -c ls /
100
101bin data1 data3 data5 dev home media opt root srv sys usr
102boot data2 data4 data6 etc lib mnt proc sbin subdomain tmp var
103
104
105CPU count real total virtual total delay total
106 6 4000250 4000000 0
107IO count delay total
108 0 0
109MEM count delay total
110 0 0
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
new file mode 100644
index 000000000000..795ca3911cc5
--- /dev/null
+++ b/Documentation/accounting/getdelays.c
@@ -0,0 +1,396 @@
1/* getdelays.c
2 *
3 * Utility to get per-pid and per-tgid delay accounting statistics
4 * Also illustrates usage of the taskstats interface
5 *
6 * Copyright (C) Shailabh Nagar, IBM Corp. 2005
7 * Copyright (C) Balbir Singh, IBM Corp. 2006
8 * Copyright (c) Jay Lan, SGI. 2006
9 *
10 */
11
12#include <stdio.h>
13#include <stdlib.h>
14#include <errno.h>
15#include <unistd.h>
16#include <poll.h>
17#include <string.h>
18#include <fcntl.h>
19#include <sys/types.h>
20#include <sys/stat.h>
21#include <sys/socket.h>
22#include <sys/types.h>
23#include <signal.h>
24
25#include <linux/genetlink.h>
26#include <linux/taskstats.h>
27
28/*
29 * Generic macros for dealing with netlink sockets. Might be duplicated
30 * elsewhere. It is recommended that commercial grade applications use
31 * libnl or libnetlink and use the interfaces provided by the library
32 */
33#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
34#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
35#define NLA_DATA(na) ((void *)((char*)(na) + NLA_HDRLEN))
36#define NLA_PAYLOAD(len) (len - NLA_HDRLEN)
37
38#define err(code, fmt, arg...) do { printf(fmt, ##arg); exit(code); } while (0)
39int done = 0;
40int rcvbufsz=0;
41
42 char name[100];
43int dbg=0, print_delays=0;
44__u64 stime, utime;
45#define PRINTF(fmt, arg...) { \
46 if (dbg) { \
47 printf(fmt, ##arg); \
48 } \
49 }
50
51/* Maximum size of response requested or message sent */
52#define MAX_MSG_SIZE 256
53/* Maximum number of cpus expected to be specified in a cpumask */
54#define MAX_CPUS 32
55/* Maximum length of pathname to log file */
56#define MAX_FILENAME 256
57
58struct msgtemplate {
59 struct nlmsghdr n;
60 struct genlmsghdr g;
61 char buf[MAX_MSG_SIZE];
62};
63
64char cpumask[100+6*MAX_CPUS];
65
66/*
67 * Create a raw netlink socket and bind
68 */
69static int create_nl_socket(int protocol)
70{
71 int fd;
72 struct sockaddr_nl local;
73
74 fd = socket(AF_NETLINK, SOCK_RAW, protocol);
75 if (fd < 0)
76 return -1;
77
78 if (rcvbufsz)
79 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
80 &rcvbufsz, sizeof(rcvbufsz)) < 0) {
81 printf("Unable to set socket rcv buf size to %d\n",
82 rcvbufsz);
83 return -1;
84 }
85
86 memset(&local, 0, sizeof(local));
87 local.nl_family = AF_NETLINK;
88
89 if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0)
90 goto error;
91
92 return fd;
93error:
94 close(fd);
95 return -1;
96}
97
98
99int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
100 __u8 genl_cmd, __u16 nla_type,
101 void *nla_data, int nla_len)
102{
103 struct nlattr *na;
104 struct sockaddr_nl nladdr;
105 int r, buflen;
106 char *buf;
107
108 struct msgtemplate msg;
109
110 msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
111 msg.n.nlmsg_type = nlmsg_type;
112 msg.n.nlmsg_flags = NLM_F_REQUEST;
113 msg.n.nlmsg_seq = 0;
114 msg.n.nlmsg_pid = nlmsg_pid;
115 msg.g.cmd = genl_cmd;
116 msg.g.version = 0x1;
117 na = (struct nlattr *) GENLMSG_DATA(&msg);
118 na->nla_type = nla_type;
119 na->nla_len = nla_len + 1 + NLA_HDRLEN;
120 memcpy(NLA_DATA(na), nla_data, nla_len);
121 msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
122
123 buf = (char *) &msg;
124 buflen = msg.n.nlmsg_len ;
125 memset(&nladdr, 0, sizeof(nladdr));
126 nladdr.nl_family = AF_NETLINK;
127 while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr,
128 sizeof(nladdr))) < buflen) {
129 if (r > 0) {
130 buf += r;
131 buflen -= r;
132 } else if (errno != EAGAIN)
133 return -1;
134 }
135 return 0;
136}
137
138
139/*
140 * Probe the controller in genetlink to find the family id
141 * for the TASKSTATS family
142 */
143int get_family_id(int sd)
144{
145 struct {
146 struct nlmsghdr n;
147 struct genlmsghdr g;
148 char buf[256];
149 } ans;
150
151 int id, rc;
152 struct nlattr *na;
153 int rep_len;
154
155 strcpy(name, TASKSTATS_GENL_NAME);
156 rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
157 CTRL_ATTR_FAMILY_NAME, (void *)name,
158 strlen(TASKSTATS_GENL_NAME)+1);
159
160 rep_len = recv(sd, &ans, sizeof(ans), 0);
161 if (ans.n.nlmsg_type == NLMSG_ERROR ||
162 (rep_len < 0) || !NLMSG_OK((&ans.n), rep_len))
163 return 0;
164
165 na = (struct nlattr *) GENLMSG_DATA(&ans);
166 na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len));
167 if (na->nla_type == CTRL_ATTR_FAMILY_ID) {
168 id = *(__u16 *) NLA_DATA(na);
169 }
170 return id;
171}
172
173void print_delayacct(struct taskstats *t)
174{
175 printf("\n\nCPU %15s%15s%15s%15s\n"
176 " %15llu%15llu%15llu%15llu\n"
177 "IO %15s%15s\n"
178 " %15llu%15llu\n"
179 "MEM %15s%15s\n"
180 " %15llu%15llu\n\n",
181 "count", "real total", "virtual total", "delay total",
182 t->cpu_count, t->cpu_run_real_total, t->cpu_run_virtual_total,
183 t->cpu_delay_total,
184 "count", "delay total",
185 t->blkio_count, t->blkio_delay_total,
186 "count", "delay total", t->swapin_count, t->swapin_delay_total);
187}
188
189int main(int argc, char *argv[])
190{
191 int c, rc, rep_len, aggr_len, len2, cmd_type;
192 __u16 id;
193 __u32 mypid;
194
195 struct nlattr *na;
196 int nl_sd = -1;
197 int len = 0;
198 pid_t tid = 0;
199 pid_t rtid = 0;
200
201 int fd = 0;
202 int count = 0;
203 int write_file = 0;
204 int maskset = 0;
205 char logfile[128];
206 int loop = 0;
207
208 struct msgtemplate msg;
209
210 while (1) {
211 c = getopt(argc, argv, "dw:r:m:t:p:v:l");
212 if (c < 0)
213 break;
214
215 switch (c) {
216 case 'd':
217 printf("print delayacct stats ON\n");
218 print_delays = 1;
219 break;
220 case 'w':
221 strncpy(logfile, optarg, MAX_FILENAME);
222 printf("write to file %s\n", logfile);
223 write_file = 1;
224 break;
225 case 'r':
226 rcvbufsz = atoi(optarg);
227 printf("receive buf size %d\n", rcvbufsz);
228 if (rcvbufsz < 0)
229 err(1, "Invalid rcv buf size\n");
230 break;
231 case 'm':
232 strncpy(cpumask, optarg, sizeof(cpumask));
233 maskset = 1;
234 printf("cpumask %s maskset %d\n", cpumask, maskset);
235 break;
236 case 't':
237 tid = atoi(optarg);
238 if (!tid)
239 err(1, "Invalid tgid\n");
240 cmd_type = TASKSTATS_CMD_ATTR_TGID;
241 print_delays = 1;
242 break;
243 case 'p':
244 tid = atoi(optarg);
245 if (!tid)
246 err(1, "Invalid pid\n");
247 cmd_type = TASKSTATS_CMD_ATTR_PID;
248 print_delays = 1;
249 break;
250 case 'v':
251 printf("debug on\n");
252 dbg = 1;
253 break;
254 case 'l':
255 printf("listen forever\n");
256 loop = 1;
257 break;
258 default:
259 printf("Unknown option %d\n", c);
260 exit(-1);
261 }
262 }
263
264 if (write_file) {
265 fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC,
266 S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
267 if (fd == -1) {
268 perror("Cannot open output file\n");
269 exit(1);
270 }
271 }
272
273 if ((nl_sd = create_nl_socket(NETLINK_GENERIC)) < 0)
274 err(1, "error creating Netlink socket\n");
275
276
277 mypid = getpid();
278 id = get_family_id(nl_sd);
279 if (!id) {
280 printf("Error getting family id, errno %d", errno);
281 goto err;
282 }
283 PRINTF("family id %d\n", id);
284
285 if (maskset) {
286 rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
287 TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
288 &cpumask, sizeof(cpumask));
289 PRINTF("Sent register cpumask, retval %d\n", rc);
290 if (rc < 0) {
291 printf("error sending register cpumask\n");
292 goto err;
293 }
294 }
295
296 if (tid) {
297 rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
298 cmd_type, &tid, sizeof(__u32));
299 PRINTF("Sent pid/tgid, retval %d\n", rc);
300 if (rc < 0) {
301 printf("error sending tid/tgid cmd\n");
302 goto done;
303 }
304 }
305
306 do {
307 int i;
308
309 rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
310 PRINTF("received %d bytes\n", rep_len);
311
312 if (rep_len < 0) {
313 printf("nonfatal reply error: errno %d\n", errno);
314 continue;
315 }
316 if (msg.n.nlmsg_type == NLMSG_ERROR ||
317 !NLMSG_OK((&msg.n), rep_len)) {
318 printf("fatal reply error, errno %d\n", errno);
319 goto done;
320 }
321
322 PRINTF("nlmsghdr size=%d, nlmsg_len=%d, rep_len=%d\n",
323 sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len);
324
325
326 rep_len = GENLMSG_PAYLOAD(&msg.n);
327
328 na = (struct nlattr *) GENLMSG_DATA(&msg);
329 len = 0;
330 i = 0;
331 while (len < rep_len) {
332 len += NLA_ALIGN(na->nla_len);
333 switch (na->nla_type) {
334 case TASKSTATS_TYPE_AGGR_TGID:
335 /* Fall through */
336 case TASKSTATS_TYPE_AGGR_PID:
337 aggr_len = NLA_PAYLOAD(na->nla_len);
338 len2 = 0;
339 /* For nested attributes, na follows */
340 na = (struct nlattr *) NLA_DATA(na);
341 done = 0;
342 while (len2 < aggr_len) {
343 switch (na->nla_type) {
344 case TASKSTATS_TYPE_PID:
345 rtid = *(int *) NLA_DATA(na);
346 if (print_delays)
347 printf("PID\t%d\n", rtid);
348 break;
349 case TASKSTATS_TYPE_TGID:
350 rtid = *(int *) NLA_DATA(na);
351 if (print_delays)
352 printf("TGID\t%d\n", rtid);
353 break;
354 case TASKSTATS_TYPE_STATS:
355 count++;
356 if (print_delays)
357 print_delayacct((struct taskstats *) NLA_DATA(na));
358 if (fd) {
359 if (write(fd, NLA_DATA(na), na->nla_len) < 0) {
360 err(1,"write error\n");
361 }
362 }
363 if (!loop)
364 goto done;
365 break;
366 default:
367 printf("Unknown nested nla_type %d\n", na->nla_type);
368 break;
369 }
370 len2 += NLA_ALIGN(na->nla_len);
371 na = (struct nlattr *) ((char *) na + len2);
372 }
373 break;
374
375 default:
376 printf("Unknown nla_type %d\n", na->nla_type);
377 break;
378 }
379 na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);
380 }
381 } while (loop);
382done:
383 if (maskset) {
384 rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
385 TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
386 &cpumask, sizeof(cpumask));
387 printf("Sent deregister mask, retval %d\n", rc);
388 if (rc < 0)
389 err(rc, "error sending deregister cpumask\n");
390 }
391err:
392 close(nl_sd);
393 if (fd)
394 close(fd);
395 return 0;
396}
diff --git a/Documentation/accounting/taskstats.txt b/Documentation/accounting/taskstats.txt
new file mode 100644
index 000000000000..92ebf29e9041
--- /dev/null
+++ b/Documentation/accounting/taskstats.txt
@@ -0,0 +1,181 @@
1Per-task statistics interface
2-----------------------------
3
4
5Taskstats is a netlink-based interface for sending per-task and
6per-process statistics from the kernel to userspace.
7
8Taskstats was designed for the following benefits:
9
10- efficiently provide statistics during lifetime of a task and on its exit
11- unified interface for multiple accounting subsystems
12- extensibility for use by future accounting patches
13
14Terminology
15-----------
16
17"pid", "tid" and "task" are used interchangeably and refer to the standard
18Linux task defined by struct task_struct. per-pid stats are the same as
19per-task stats.
20
21"tgid", "process" and "thread group" are used interchangeably and refer to the
22tasks that share an mm_struct i.e. the traditional Unix process. Despite the
23use of tgid, there is no special treatment for the task that is thread group
24leader - a process is deemed alive as long as it has any task belonging to it.
25
26Usage
27-----
28
29To get statistics during a task's lifetime, userspace opens a unicast netlink
30socket (NETLINK_GENERIC family) and sends commands specifying a pid or a tgid.
31The response contains statistics for a task (if pid is specified) or the sum of
32statistics for all tasks of the process (if tgid is specified).
33
34To obtain statistics for tasks which are exiting, the userspace listener
35sends a register command and specifies a cpumask. Whenever a task exits on
36one of the cpus in the cpumask, its per-pid statistics are sent to the
37registered listener. Using cpumasks allows the data received by one listener
38to be limited and assists in flow control over the netlink interface and is
39explained in more detail below.
40
41If the exiting task is the last thread exiting its thread group,
42an additional record containing the per-tgid stats is also sent to userspace.
43The latter contains the sum of per-pid stats for all threads in the thread
44group, both past and present.
45
46getdelays.c is a simple utility demonstrating usage of the taskstats interface
47for reporting delay accounting statistics. Users can register cpumasks,
48send commands and process responses, listen for per-tid/tgid exit data,
49write the data received to a file and do basic flow control by increasing
50receive buffer sizes.
51
52Interface
53---------
54
55The user-kernel interface is encapsulated in include/linux/taskstats.h
56
57To avoid this documentation becoming obsolete as the interface evolves, only
58an outline of the current version is given. taskstats.h always overrides the
59description here.
60
61struct taskstats is the common accounting structure for both per-pid and
62per-tgid data. It is versioned and can be extended by each accounting subsystem
63that is added to the kernel. The fields and their semantics are defined in the
64taskstats.h file.
65
66The data exchanged between user and kernel space is a netlink message belonging
67to the NETLINK_GENERIC family and using the netlink attributes interface.
68The messages are in the format
69
70 +----------+- - -+-------------+-------------------+
71 | nlmsghdr | Pad | genlmsghdr | taskstats payload |
72 +----------+- - -+-------------+-------------------+
73
74
75The taskstats payload is one of the following three kinds:
76
771. Commands: Sent from user to kernel. Commands to get data on
78a pid/tgid consist of one attribute, of type TASKSTATS_CMD_ATTR_PID/TGID,
79containing a u32 pid or tgid in the attribute payload. The pid/tgid denotes
80the task/process for which userspace wants statistics.
81
82Commands to register/deregister interest in exit data from a set of cpus
83consist of one attribute, of type
84TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK and contain a cpumask in the
85attribute payload. The cpumask is specified as an ascii string of
86comma-separated cpu ranges e.g. to listen to exit data from cpus 1,2,3,5,7,8
87the cpumask would be "1-3,5,7-8". If userspace forgets to deregister interest
88in cpus before closing the listening socket, the kernel cleans up its interest
89set over time. However, for the sake of efficiency, an explicit deregistration
90is advisable.
91
922. Response for a command: sent from the kernel in response to a userspace
93command. The payload is a series of three attributes of type:
94
95a) TASKSTATS_TYPE_AGGR_PID/TGID : attribute containing no payload but indicates
96a pid/tgid will be followed by some stats.
97
98b) TASKSTATS_TYPE_PID/TGID: attribute whose payload is the pid/tgid whose stats
99is being returned.
100
101c) TASKSTATS_TYPE_STATS: attribute with a struct taskstsats as payload. The
102same structure is used for both per-pid and per-tgid stats.
103
1043. New message sent by kernel whenever a task exits. The payload consists of a
105 series of attributes of the following type:
106
107a) TASKSTATS_TYPE_AGGR_PID: indicates next two attributes will be pid+stats
108b) TASKSTATS_TYPE_PID: contains exiting task's pid
109c) TASKSTATS_TYPE_STATS: contains the exiting task's per-pid stats
110d) TASKSTATS_TYPE_AGGR_TGID: indicates next two attributes will be tgid+stats
111e) TASKSTATS_TYPE_TGID: contains tgid of process to which task belongs
112f) TASKSTATS_TYPE_STATS: contains the per-tgid stats for exiting task's process
113
114
115per-tgid stats
116--------------
117
118Taskstats provides per-process stats, in addition to per-task stats, since
119resource management is often done at a process granularity and aggregating task
120stats in userspace alone is inefficient and potentially inaccurate (due to lack
121of atomicity).
122
123However, maintaining per-process, in addition to per-task stats, within the
124kernel has space and time overheads. To address this, the taskstats code
125accumalates each exiting task's statistics into a process-wide data structure.
126When the last task of a process exits, the process level data accumalated also
127gets sent to userspace (along with the per-task data).
128
129When a user queries to get per-tgid data, the sum of all other live threads in
130the group is added up and added to the accumalated total for previously exited
131threads of the same thread group.
132
133Extending taskstats
134-------------------
135
136There are two ways to extend the taskstats interface to export more
137per-task/process stats as patches to collect them get added to the kernel
138in future:
139
1401. Adding more fields to the end of the existing struct taskstats. Backward
141 compatibility is ensured by the version number within the
142 structure. Userspace will use only the fields of the struct that correspond
143 to the version its using.
144
1452. Defining separate statistic structs and using the netlink attributes
146 interface to return them. Since userspace processes each netlink attribute
147 independently, it can always ignore attributes whose type it does not
148 understand (because it is using an older version of the interface).
149
150
151Choosing between 1. and 2. is a matter of trading off flexibility and
152overhead. If only a few fields need to be added, then 1. is the preferable
153path since the kernel and userspace don't need to incur the overhead of
154processing new netlink attributes. But if the new fields expand the existing
155struct too much, requiring disparate userspace accounting utilities to
156unnecessarily receive large structures whose fields are of no interest, then
157extending the attributes structure would be worthwhile.
158
159Flow control for taskstats
160--------------------------
161
162When the rate of task exits becomes large, a listener may not be able to keep
163up with the kernel's rate of sending per-tid/tgid exit data leading to data
164loss. This possibility gets compounded when the taskstats structure gets
165extended and the number of cpus grows large.
166
167To avoid losing statistics, userspace should do one or more of the following:
168
169- increase the receive buffer sizes for the netlink sockets opened by
170listeners to receive exit data.
171
172- create more listeners and reduce the number of cpus being listened to by
173each listener. In the extreme case, there could be one listener for each cpu.
174Users may also consider setting the cpu affinity of the listener to the subset
175of cpus to which it listens, especially if they are listening to just one cpu.
176
177Despite these measures, if the userspace receives ENOBUFS error messages
178indicated overflow of receive buffers, it should take measures to handle the
179loss of data.
180
181----
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 9d3a0775a11d..87851efb0228 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -258,3 +258,19 @@ Why: These drivers never compiled since they were added to the kernel
258Who: Jean Delvare <khali@linux-fr.org> 258Who: Jean Delvare <khali@linux-fr.org>
259 259
260--------------------------- 260---------------------------
261
262What: Bridge netfilter deferred IPv4/IPv6 output hook calling
263When: January 2007
264Why: The deferred output hooks are a layering violation causing unusual
265 and broken behaviour on bridge devices. Examples of things they
266 break include QoS classifation using the MARK or CLASSIFY targets,
267 the IPsec policy match and connection tracking with VLANs on a
268 bridge. Their only use is to enable bridge output port filtering
269 within iptables with the physdev match, which can also be done by
270 combining iptables and ebtables using netfilter marks. Until it
271 will get removed the hook deferral is disabled by default and is
272 only enabled when needed.
273
274Who: Patrick McHardy <kaber@trash.net>
275
276---------------------------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 149f62ba14a5..e11f7728ec6f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -448,6 +448,8 @@ running once the system is up.
448 Format: <area>[,<node>] 448 Format: <area>[,<node>]
449 See also Documentation/networking/decnet.txt. 449 See also Documentation/networking/decnet.txt.
450 450
451 delayacct [KNL] Enable per-task delay accounting
452
451 dhash_entries= [KNL] 453 dhash_entries= [KNL]
452 Set number of hash buckets for dentry cache. 454 Set number of hash buckets for dentry cache.
453 455
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 28d1bc3edb1c..46b9b389df35 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1015,10 +1015,9 @@ CPU from reordering them.
1015There are some more advanced barrier functions: 1015There are some more advanced barrier functions:
1016 1016
1017 (*) set_mb(var, value) 1017 (*) set_mb(var, value)
1018 (*) set_wmb(var, value)
1019 1018
1020 These assign the value to the variable and then insert at least a write 1019 This assigns the value to the variable and then inserts at least a write
1021 barrier after it, depending on the function. They aren't guaranteed to 1020 barrier after it, depending on the function. It isn't guaranteed to
1022 insert anything more than a compiler barrier in a UP compilation. 1021 insert anything more than a compiler barrier in a UP compilation.
1023 1022
1024 1023
diff --git a/Documentation/ramdisk.txt b/Documentation/ramdisk.txt
index 7c25584e082c..52f75b7d51c2 100644
--- a/Documentation/ramdisk.txt
+++ b/Documentation/ramdisk.txt
@@ -6,7 +6,7 @@ Contents:
6 1) Overview 6 1) Overview
7 2) Kernel Command Line Parameters 7 2) Kernel Command Line Parameters
8 3) Using "rdev -r" 8 3) Using "rdev -r"
9 4) An Example of Creating a Compressed RAM Disk 9 4) An Example of Creating a Compressed RAM Disk
10 10
11 11
121) Overview 121) Overview
@@ -34,7 +34,7 @@ make it clearer. The original "ramdisk=<ram_size>" has been kept around for
34compatibility reasons, but it may be removed in the future. 34compatibility reasons, but it may be removed in the future.
35 35
36The new RAM disk also has the ability to load compressed RAM disk images, 36The new RAM disk also has the ability to load compressed RAM disk images,
37allowing one to squeeze more programs onto an average installation or 37allowing one to squeeze more programs onto an average installation or
38rescue floppy disk. 38rescue floppy disk.
39 39
40 40
@@ -51,7 +51,7 @@ default is 4096 (4 MB) (8192 (8 MB) on S390).
51 =================== 51 ===================
52 52
53This parameter tells the RAM disk driver how many bytes to use per block. The 53This parameter tells the RAM disk driver how many bytes to use per block. The
54default is 512. 54default is 1024 (BLOCK_SIZE).
55 55
56 56
573) Using "rdev -r" 573) Using "rdev -r"
@@ -70,7 +70,7 @@ These numbers are no magical secrets, as seen below:
70./arch/i386/kernel/setup.c:#define RAMDISK_PROMPT_FLAG 0x8000 70./arch/i386/kernel/setup.c:#define RAMDISK_PROMPT_FLAG 0x8000
71./arch/i386/kernel/setup.c:#define RAMDISK_LOAD_FLAG 0x4000 71./arch/i386/kernel/setup.c:#define RAMDISK_LOAD_FLAG 0x4000
72 72
73Consider a typical two floppy disk setup, where you will have the 73Consider a typical two floppy disk setup, where you will have the
74kernel on disk one, and have already put a RAM disk image onto disk #2. 74kernel on disk one, and have already put a RAM disk image onto disk #2.
75 75
76Hence you want to set bits 0 to 13 as 0, meaning that your RAM disk 76Hence you want to set bits 0 to 13 as 0, meaning that your RAM disk
@@ -97,12 +97,12 @@ Since the default start = 0 and the default prompt = 1, you could use:
97 append = "load_ramdisk=1" 97 append = "load_ramdisk=1"
98 98
99 99
1004) An Example of Creating a Compressed RAM Disk 1004) An Example of Creating a Compressed RAM Disk
101---------------------------------------------- 101----------------------------------------------
102 102
103To create a RAM disk image, you will need a spare block device to 103To create a RAM disk image, you will need a spare block device to
104construct it on. This can be the RAM disk device itself, or an 104construct it on. This can be the RAM disk device itself, or an
105unused disk partition (such as an unmounted swap partition). For this 105unused disk partition (such as an unmounted swap partition). For this
106example, we will use the RAM disk device, "/dev/ram0". 106example, we will use the RAM disk device, "/dev/ram0".
107 107
108Note: This technique should not be done on a machine with less than 8 MB 108Note: This technique should not be done on a machine with less than 8 MB
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index 6887d44d2661..6da24e7a56cb 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -238,6 +238,13 @@ Debugging
238 pagefaulttrace Dump all page faults. Only useful for extreme debugging 238 pagefaulttrace Dump all page faults. Only useful for extreme debugging
239 and will create a lot of output. 239 and will create a lot of output.
240 240
241 call_trace=[old|both|newfallback|new]
242 old: use old inexact backtracer
243 new: use new exact dwarf2 unwinder
244 both: print entries from both
245 newfallback: use new unwinder but fall back to old if it gets
246 stuck (default)
247
241Misc 248Misc
242 249
243 noreplacement Don't replace instructions with more appropriate ones 250 noreplacement Don't replace instructions with more appropriate ones
diff --git a/MAINTAINERS b/MAINTAINERS
index a3462c3414cd..b2afc7ae965b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -771,6 +771,7 @@ M: aliakc@web.de
771P: Jamie Lenehan 771P: Jamie Lenehan
772M: lenehan@twibble.org 772M: lenehan@twibble.org
773W: http://twibble.org/dist/dc395x/ 773W: http://twibble.org/dist/dc395x/
774L: dc395x@twibble.org
774L: http://lists.twibble.org/mailman/listinfo/dc395x/ 775L: http://lists.twibble.org/mailman/listinfo/dc395x/
775S: Maintained 776S: Maintained
776 777
@@ -1501,6 +1502,7 @@ P: Yi Zhu
1501M: yi.zhu@intel.com 1502M: yi.zhu@intel.com
1502P: James Ketrenos 1503P: James Ketrenos
1503M: jketreno@linux.intel.com 1504M: jketreno@linux.intel.com
1505L: ipw2100-devel@lists.sourceforge.net
1504L: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel 1506L: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel
1505W: http://ipw2100.sourceforge.net 1507W: http://ipw2100.sourceforge.net
1506S: Supported 1508S: Supported
@@ -1510,6 +1512,7 @@ P: Yi Zhu
1510M: yi.zhu@intel.com 1512M: yi.zhu@intel.com
1511P: James Ketrenos 1513P: James Ketrenos
1512M: jketreno@linux.intel.com 1514M: jketreno@linux.intel.com
1515L: ipw2100-devel@lists.sourceforge.net
1513L: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel 1516L: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel
1514W: http://ipw2200.sourceforge.net 1517W: http://ipw2200.sourceforge.net
1515S: Supported 1518S: Supported
@@ -1673,10 +1676,8 @@ L: linux-kernel@vger.kernel.org
1673S: Maintained 1676S: Maintained
1674 1677
1675LAPB module 1678LAPB module
1676P: Henner Eisen
1677M: eis@baty.hanse.de
1678L: linux-x25@vger.kernel.org 1679L: linux-x25@vger.kernel.org
1679S: Maintained 1680S: Orphan
1680 1681
1681LASI 53c700 driver for PARISC 1682LASI 53c700 driver for PARISC
1682P: James E.J. Bottomley 1683P: James E.J. Bottomley
@@ -2226,6 +2227,7 @@ S: Maintained
2226 2227
2227PCMCIA SUBSYSTEM 2228PCMCIA SUBSYSTEM
2228P: Linux PCMCIA Team 2229P: Linux PCMCIA Team
2230L: linux-pcmcia@lists.infradead.org
2229L: http://lists.infradead.org/mailman/listinfo/linux-pcmcia 2231L: http://lists.infradead.org/mailman/listinfo/linux-pcmcia
2230T: git kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git 2232T: git kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git
2231S: Maintained 2233S: Maintained
@@ -2236,6 +2238,12 @@ M: tsbogend@alpha.franken.de
2236L: netdev@vger.kernel.org 2238L: netdev@vger.kernel.org
2237S: Maintained 2239S: Maintained
2238 2240
2241PER-TASK DELAY ACCOUNTING
2242P: Shailabh Nagar
2243M: nagar@watson.ibm.com
2244L: linux-kernel@vger.kernel.org
2245S: Maintained
2246
2239PERSONALITY HANDLING 2247PERSONALITY HANDLING
2240P: Christoph Hellwig 2248P: Christoph Hellwig
2241M: hch@infradead.org 2249M: hch@infradead.org
@@ -2763,6 +2771,12 @@ P: Deepak Saxena
2763M: dsaxena@plexity.net 2771M: dsaxena@plexity.net
2764S: Maintained 2772S: Maintained
2765 2773
2774TASKSTATS STATISTICS INTERFACE
2775P: Shailabh Nagar
2776M: nagar@watson.ibm.com
2777L: linux-kernel@vger.kernel.org
2778S: Maintained
2779
2766TI PARALLEL LINK CABLE DRIVER 2780TI PARALLEL LINK CABLE DRIVER
2767P: Romain Lievin 2781P: Romain Lievin
2768M: roms@lpg.ticalc.org 2782M: roms@lpg.ticalc.org
diff --git a/Makefile b/Makefile
index 7c010f3325a9..1dd58d35d72c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 18 3SUBLEVEL = 18
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME=Crazed Snow-Weasel 5NAME=Crazed Snow-Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 48f0f62f781c..5b96f038367f 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -90,7 +90,7 @@ static void crash_save_self(struct pt_regs *regs)
90 crash_save_this_cpu(regs, cpu); 90 crash_save_this_cpu(regs, cpu);
91} 91}
92 92
93#ifdef CONFIG_SMP 93#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
94static atomic_t waiting_for_crash_ipi; 94static atomic_t waiting_for_crash_ipi;
95 95
96static int crash_nmi_callback(struct pt_regs *regs, int cpu) 96static int crash_nmi_callback(struct pt_regs *regs, int cpu)
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 923bb292f47f..8657c739656a 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -690,8 +690,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
690 /* 690 /*
691 * Now maybe handle debug registers and/or IO bitmaps 691 * Now maybe handle debug registers and/or IO bitmaps
692 */ 692 */
693 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)) 693 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
694 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) 694 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
695 __switch_to_xtra(next_p, tss); 695 __switch_to_xtra(next_p, tss);
696 696
697 disable_tsc(prev_p, next_p); 697 disable_tsc(prev_p, next_p);
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 7864395c1441..f1682206d304 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1327,7 +1327,10 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
1327 res->start = e820.map[i].addr; 1327 res->start = e820.map[i].addr;
1328 res->end = res->start + e820.map[i].size - 1; 1328 res->end = res->start + e820.map[i].size - 1;
1329 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 1329 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
1330 request_resource(&iomem_resource, res); 1330 if (request_resource(&iomem_resource, res)) {
1331 kfree(res);
1332 continue;
1333 }
1331 if (e820.map[i].type == E820_RAM) { 1334 if (e820.map[i].type == E820_RAM) {
1332 /* 1335 /*
1333 * We don't know which RAM region contains kernel data, 1336 * We don't know which RAM region contains kernel data,
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 8705c0f05788..edd00f6cee37 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -135,7 +135,7 @@ unsigned long profile_pc(struct pt_regs *regs)
135{ 135{
136 unsigned long pc = instruction_pointer(regs); 136 unsigned long pc = instruction_pointer(regs);
137 137
138 if (in_lock_functions(pc)) 138 if (!user_mode_vm(regs) && in_lock_functions(pc))
139 return *(unsigned long *)(regs->ebp + 4); 139 return *(unsigned long *)(regs->ebp + 4);
140 140
141 return pc; 141 return pc;
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 5cfd4f42eeba..021f8fdc7512 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -187,10 +187,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
187 if (unwind_init_blocked(&info, task) == 0) 187 if (unwind_init_blocked(&info, task) == 0)
188 unw_ret = show_trace_unwind(&info, log_lvl); 188 unw_ret = show_trace_unwind(&info, log_lvl);
189 } 189 }
190 if (unw_ret > 0) { 190 if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
191 if (call_trace > 0) 191#ifdef CONFIG_STACK_UNWIND
192 print_symbol("DWARF2 unwinder stuck at %s\n",
193 UNW_PC(&info));
194 if (call_trace == 1) {
195 printk("Leftover inexact backtrace:\n");
196 if (UNW_SP(&info))
197 stack = (void *)UNW_SP(&info);
198 } else if (call_trace > 1)
192 return; 199 return;
193 printk("%sLegacy call trace:\n", log_lvl); 200 else
201 printk("Full inexact backtrace again:\n");
202#else
203 printk("Inexact backtrace:\n");
204#endif
194 } 205 }
195 } 206 }
196 207
@@ -324,35 +335,35 @@ void show_registers(struct pt_regs *regs)
324 335
325static void handle_BUG(struct pt_regs *regs) 336static void handle_BUG(struct pt_regs *regs)
326{ 337{
338 unsigned long eip = regs->eip;
327 unsigned short ud2; 339 unsigned short ud2;
328 unsigned short line;
329 char *file;
330 char c;
331 unsigned long eip;
332
333 eip = regs->eip;
334 340
335 if (eip < PAGE_OFFSET) 341 if (eip < PAGE_OFFSET)
336 goto no_bug; 342 return;
337 if (__get_user(ud2, (unsigned short __user *)eip)) 343 if (__get_user(ud2, (unsigned short __user *)eip))
338 goto no_bug; 344 return;
339 if (ud2 != 0x0b0f) 345 if (ud2 != 0x0b0f)
340 goto no_bug; 346 return;
341 if (__get_user(line, (unsigned short __user *)(eip + 2)))
342 goto bug;
343 if (__get_user(file, (char * __user *)(eip + 4)) ||
344 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
345 file = "<bad filename>";
346 347
347 printk(KERN_EMERG "------------[ cut here ]------------\n"); 348 printk(KERN_EMERG "------------[ cut here ]------------\n");
348 printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
349 349
350no_bug: 350#ifdef CONFIG_DEBUG_BUGVERBOSE
351 return; 351 do {
352 unsigned short line;
353 char *file;
354 char c;
355
356 if (__get_user(line, (unsigned short __user *)(eip + 2)))
357 break;
358 if (__get_user(file, (char * __user *)(eip + 4)) ||
359 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
360 file = "<bad filename>";
352 361
353 /* Here we know it was a BUG but file-n-line is unavailable */ 362 printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
354bug: 363 return;
355 printk(KERN_EMERG "Kernel BUG\n"); 364 } while (0);
365#endif
366 printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
356} 367}
357 368
358/* This is gone through when something in the kernel 369/* This is gone through when something in the kernel
@@ -1238,8 +1249,10 @@ static int __init call_trace_setup(char *s)
1238 call_trace = -1; 1249 call_trace = -1;
1239 else if (strcmp(s, "both") == 0) 1250 else if (strcmp(s, "both") == 0)
1240 call_trace = 0; 1251 call_trace = 0;
1241 else if (strcmp(s, "new") == 0) 1252 else if (strcmp(s, "newfallback") == 0)
1242 call_trace = 1; 1253 call_trace = 1;
1254 else if (strcmp(s, "new") == 2)
1255 call_trace = 2;
1243 return 1; 1256 return 1;
1244} 1257}
1245__setup("call_trace=", call_trace_setup); 1258__setup("call_trace=", call_trace_setup);
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c
index 4b75212ab6dd..efc7e7d5f4d0 100644
--- a/arch/i386/lib/usercopy.c
+++ b/arch/i386/lib/usercopy.c
@@ -843,7 +843,6 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
843unsigned long 843unsigned long
844copy_to_user(void __user *to, const void *from, unsigned long n) 844copy_to_user(void __user *to, const void *from, unsigned long n)
845{ 845{
846 might_sleep();
847 BUG_ON((long) n < 0); 846 BUG_ON((long) n < 0);
848 if (access_ok(VERIFY_WRITE, to, n)) 847 if (access_ok(VERIFY_WRITE, to, n))
849 n = __copy_to_user(to, from, n); 848 n = __copy_to_user(to, from, n);
@@ -870,7 +869,6 @@ EXPORT_SYMBOL(copy_to_user);
870unsigned long 869unsigned long
871copy_from_user(void *to, const void __user *from, unsigned long n) 870copy_from_user(void *to, const void __user *from, unsigned long n)
872{ 871{
873 might_sleep();
874 BUG_ON((long) n < 0); 872 BUG_ON((long) n < 0);
875 if (access_ok(VERIFY_READ, from, n)) 873 if (access_ok(VERIFY_READ, from, n))
876 n = __copy_from_user(to, from, n); 874 n = __copy_from_user(to, from, n);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index f4dfc10026d2..f1d4591eddbb 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,13 +1,16 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-rc1 3# Linux kernel version: 2.6.18-rc2
4# Mon Apr 3 14:34:15 2006 4# Thu Jul 27 13:51:07 2006
5# 5#
6CONFIG_MMU=y 6CONFIG_MMU=y
7CONFIG_LOCKDEP_SUPPORT=y
8CONFIG_STACKTRACE_SUPPORT=y
7CONFIG_RWSEM_XCHGADD_ALGORITHM=y 9CONFIG_RWSEM_XCHGADD_ALGORITHM=y
8CONFIG_GENERIC_HWEIGHT=y 10CONFIG_GENERIC_HWEIGHT=y
9CONFIG_GENERIC_CALIBRATE_DELAY=y 11CONFIG_GENERIC_CALIBRATE_DELAY=y
10CONFIG_S390=y 12CONFIG_S390=y
13CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
11 14
12# 15#
13# Code maturity level options 16# Code maturity level options
@@ -25,6 +28,7 @@ CONFIG_SWAP=y
25CONFIG_SYSVIPC=y 28CONFIG_SYSVIPC=y
26CONFIG_POSIX_MQUEUE=y 29CONFIG_POSIX_MQUEUE=y
27# CONFIG_BSD_PROCESS_ACCT is not set 30# CONFIG_BSD_PROCESS_ACCT is not set
31# CONFIG_TASKSTATS is not set
28CONFIG_SYSCTL=y 32CONFIG_SYSCTL=y
29CONFIG_AUDIT=y 33CONFIG_AUDIT=y
30# CONFIG_AUDITSYSCALL is not set 34# CONFIG_AUDITSYSCALL is not set
@@ -43,10 +47,12 @@ CONFIG_PRINTK=y
43CONFIG_BUG=y 47CONFIG_BUG=y
44CONFIG_ELF_CORE=y 48CONFIG_ELF_CORE=y
45CONFIG_BASE_FULL=y 49CONFIG_BASE_FULL=y
50CONFIG_RT_MUTEXES=y
46CONFIG_FUTEX=y 51CONFIG_FUTEX=y
47CONFIG_EPOLL=y 52CONFIG_EPOLL=y
48CONFIG_SHMEM=y 53CONFIG_SHMEM=y
49CONFIG_SLAB=y 54CONFIG_SLAB=y
55CONFIG_VM_EVENT_COUNTERS=y
50# CONFIG_TINY_SHMEM is not set 56# CONFIG_TINY_SHMEM is not set
51CONFIG_BASE_SMALL=0 57CONFIG_BASE_SMALL=0
52# CONFIG_SLOB is not set 58# CONFIG_SLOB is not set
@@ -94,7 +100,6 @@ CONFIG_HOTPLUG_CPU=y
94CONFIG_DEFAULT_MIGRATION_COST=1000000 100CONFIG_DEFAULT_MIGRATION_COST=1000000
95CONFIG_COMPAT=y 101CONFIG_COMPAT=y
96CONFIG_SYSVIPC_COMPAT=y 102CONFIG_SYSVIPC_COMPAT=y
97CONFIG_BINFMT_ELF32=y
98 103
99# 104#
100# Code generation options 105# Code generation options
@@ -115,6 +120,7 @@ CONFIG_FLATMEM=y
115CONFIG_FLAT_NODE_MEM_MAP=y 120CONFIG_FLAT_NODE_MEM_MAP=y
116# CONFIG_SPARSEMEM_STATIC is not set 121# CONFIG_SPARSEMEM_STATIC is not set
117CONFIG_SPLIT_PTLOCK_CPUS=4 122CONFIG_SPLIT_PTLOCK_CPUS=4
123CONFIG_RESOURCES_64BIT=y
118 124
119# 125#
120# I/O subsystem configuration 126# I/O subsystem configuration
@@ -142,6 +148,7 @@ CONFIG_VIRT_CPU_ACCOUNTING=y
142# CONFIG_APPLDATA_BASE is not set 148# CONFIG_APPLDATA_BASE is not set
143CONFIG_NO_IDLE_HZ=y 149CONFIG_NO_IDLE_HZ=y
144CONFIG_NO_IDLE_HZ_INIT=y 150CONFIG_NO_IDLE_HZ_INIT=y
151CONFIG_S390_HYPFS_FS=y
145CONFIG_KEXEC=y 152CONFIG_KEXEC=y
146 153
147# 154#
@@ -174,6 +181,8 @@ CONFIG_IP_FIB_HASH=y
174# CONFIG_INET_IPCOMP is not set 181# CONFIG_INET_IPCOMP is not set
175# CONFIG_INET_XFRM_TUNNEL is not set 182# CONFIG_INET_XFRM_TUNNEL is not set
176# CONFIG_INET_TUNNEL is not set 183# CONFIG_INET_TUNNEL is not set
184CONFIG_INET_XFRM_MODE_TRANSPORT=y
185CONFIG_INET_XFRM_MODE_TUNNEL=y
177CONFIG_INET_DIAG=y 186CONFIG_INET_DIAG=y
178CONFIG_INET_TCP_DIAG=y 187CONFIG_INET_TCP_DIAG=y
179# CONFIG_TCP_CONG_ADVANCED is not set 188# CONFIG_TCP_CONG_ADVANCED is not set
@@ -186,7 +195,10 @@ CONFIG_IPV6=y
186# CONFIG_INET6_IPCOMP is not set 195# CONFIG_INET6_IPCOMP is not set
187# CONFIG_INET6_XFRM_TUNNEL is not set 196# CONFIG_INET6_XFRM_TUNNEL is not set
188# CONFIG_INET6_TUNNEL is not set 197# CONFIG_INET6_TUNNEL is not set
198CONFIG_INET6_XFRM_MODE_TRANSPORT=y
199CONFIG_INET6_XFRM_MODE_TUNNEL=y
189# CONFIG_IPV6_TUNNEL is not set 200# CONFIG_IPV6_TUNNEL is not set
201# CONFIG_NETWORK_SECMARK is not set
190# CONFIG_NETFILTER is not set 202# CONFIG_NETFILTER is not set
191 203
192# 204#
@@ -263,6 +275,7 @@ CONFIG_NET_ESTIMATOR=y
263# Network testing 275# Network testing
264# 276#
265# CONFIG_NET_PKTGEN is not set 277# CONFIG_NET_PKTGEN is not set
278# CONFIG_NET_TCPPROBE is not set
266# CONFIG_HAMRADIO is not set 279# CONFIG_HAMRADIO is not set
267# CONFIG_IRDA is not set 280# CONFIG_IRDA is not set
268# CONFIG_BT is not set 281# CONFIG_BT is not set
@@ -276,6 +289,7 @@ CONFIG_STANDALONE=y
276CONFIG_PREVENT_FIRMWARE_BUILD=y 289CONFIG_PREVENT_FIRMWARE_BUILD=y
277# CONFIG_FW_LOADER is not set 290# CONFIG_FW_LOADER is not set
278# CONFIG_DEBUG_DRIVER is not set 291# CONFIG_DEBUG_DRIVER is not set
292CONFIG_SYS_HYPERVISOR=y
279 293
280# 294#
281# Connector - unified userspace <-> kernelspace linker 295# Connector - unified userspace <-> kernelspace linker
@@ -334,6 +348,7 @@ CONFIG_BLK_DEV_NBD=m
334CONFIG_BLK_DEV_RAM=y 348CONFIG_BLK_DEV_RAM=y
335CONFIG_BLK_DEV_RAM_COUNT=16 349CONFIG_BLK_DEV_RAM_COUNT=16
336CONFIG_BLK_DEV_RAM_SIZE=4096 350CONFIG_BLK_DEV_RAM_SIZE=4096
351CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
337CONFIG_BLK_DEV_INITRD=y 352CONFIG_BLK_DEV_INITRD=y
338# CONFIG_CDROM_PKTCDVD is not set 353# CONFIG_CDROM_PKTCDVD is not set
339 354
@@ -359,9 +374,7 @@ CONFIG_MD_LINEAR=m
359CONFIG_MD_RAID0=m 374CONFIG_MD_RAID0=m
360CONFIG_MD_RAID1=m 375CONFIG_MD_RAID1=m
361# CONFIG_MD_RAID10 is not set 376# CONFIG_MD_RAID10 is not set
362CONFIG_MD_RAID5=m 377# CONFIG_MD_RAID456 is not set
363# CONFIG_MD_RAID5_RESHAPE is not set
364# CONFIG_MD_RAID6 is not set
365CONFIG_MD_MULTIPATH=m 378CONFIG_MD_MULTIPATH=m
366# CONFIG_MD_FAULTY is not set 379# CONFIG_MD_FAULTY is not set
367CONFIG_BLK_DEV_DM=y 380CONFIG_BLK_DEV_DM=y
@@ -419,7 +432,8 @@ CONFIG_S390_TAPE_34XX=m
419# 432#
420# Cryptographic devices 433# Cryptographic devices
421# 434#
422CONFIG_Z90CRYPT=m 435CONFIG_ZCRYPT=m
436# CONFIG_ZCRYPT_MONOLITHIC is not set
423 437
424# 438#
425# Network device support 439# Network device support
@@ -509,6 +523,7 @@ CONFIG_FS_MBCACHE=y
509# CONFIG_MINIX_FS is not set 523# CONFIG_MINIX_FS is not set
510# CONFIG_ROMFS_FS is not set 524# CONFIG_ROMFS_FS is not set
511CONFIG_INOTIFY=y 525CONFIG_INOTIFY=y
526CONFIG_INOTIFY_USER=y
512# CONFIG_QUOTA is not set 527# CONFIG_QUOTA is not set
513CONFIG_DNOTIFY=y 528CONFIG_DNOTIFY=y
514# CONFIG_AUTOFS_FS is not set 529# CONFIG_AUTOFS_FS is not set
@@ -614,26 +629,36 @@ CONFIG_MSDOS_PARTITION=y
614# Instrumentation Support 629# Instrumentation Support
615# 630#
616# CONFIG_PROFILING is not set 631# CONFIG_PROFILING is not set
617# CONFIG_STATISTICS is not set 632CONFIG_STATISTICS=y
633CONFIG_KPROBES=y
618 634
619# 635#
620# Kernel hacking 636# Kernel hacking
621# 637#
638CONFIG_TRACE_IRQFLAGS_SUPPORT=y
622# CONFIG_PRINTK_TIME is not set 639# CONFIG_PRINTK_TIME is not set
623CONFIG_MAGIC_SYSRQ=y 640CONFIG_MAGIC_SYSRQ=y
641# CONFIG_UNUSED_SYMBOLS is not set
624CONFIG_DEBUG_KERNEL=y 642CONFIG_DEBUG_KERNEL=y
625CONFIG_LOG_BUF_SHIFT=17 643CONFIG_LOG_BUF_SHIFT=17
626# CONFIG_DETECT_SOFTLOCKUP is not set 644# CONFIG_DETECT_SOFTLOCKUP is not set
627# CONFIG_SCHEDSTATS is not set 645# CONFIG_SCHEDSTATS is not set
628# CONFIG_DEBUG_SLAB is not set 646# CONFIG_DEBUG_SLAB is not set
629CONFIG_DEBUG_PREEMPT=y 647CONFIG_DEBUG_PREEMPT=y
630CONFIG_DEBUG_MUTEXES=y 648# CONFIG_DEBUG_RT_MUTEXES is not set
649# CONFIG_RT_MUTEX_TESTER is not set
631CONFIG_DEBUG_SPINLOCK=y 650CONFIG_DEBUG_SPINLOCK=y
651CONFIG_DEBUG_MUTEXES=y
652# CONFIG_DEBUG_RWSEMS is not set
653# CONFIG_DEBUG_LOCK_ALLOC is not set
654# CONFIG_PROVE_LOCKING is not set
632CONFIG_DEBUG_SPINLOCK_SLEEP=y 655CONFIG_DEBUG_SPINLOCK_SLEEP=y
656# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
633# CONFIG_DEBUG_KOBJECT is not set 657# CONFIG_DEBUG_KOBJECT is not set
634# CONFIG_DEBUG_INFO is not set 658# CONFIG_DEBUG_INFO is not set
635CONFIG_DEBUG_FS=y 659CONFIG_DEBUG_FS=y
636# CONFIG_DEBUG_VM is not set 660# CONFIG_DEBUG_VM is not set
661# CONFIG_FRAME_POINTER is not set
637# CONFIG_UNWIND_INFO is not set 662# CONFIG_UNWIND_INFO is not set
638CONFIG_FORCED_INLINING=y 663CONFIG_FORCED_INLINING=y
639# CONFIG_RCU_TORTURE_TEST is not set 664# CONFIG_RCU_TORTURE_TEST is not set
@@ -688,3 +713,4 @@ CONFIG_CRYPTO=y
688# CONFIG_CRC16 is not set 713# CONFIG_CRC16 is not set
689CONFIG_CRC32=m 714CONFIG_CRC32=m
690# CONFIG_LIBCRC32C is not set 715# CONFIG_LIBCRC32C is not set
716CONFIG_PLIST=y
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index d00de17b3778..a4dc61f3285e 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -273,7 +273,7 @@ startup_continue:
273.Lbss_end: .long _end 273.Lbss_end: .long _end
274.Lparmaddr: .long PARMAREA 274.Lparmaddr: .long PARMAREA
275.Lsccbaddr: .long .Lsccb 275.Lsccbaddr: .long .Lsccb
276 .align 4096 276 .org 0x12000
277.Lsccb: 277.Lsccb:
278 .hword 0x1000 # length, one page 278 .hword 0x1000 # length, one page
279 .byte 0x00,0x00,0x00 279 .byte 0x00,0x00,0x00
@@ -290,7 +290,7 @@ startup_continue:
290.Lscpincr2: 290.Lscpincr2:
291 .quad 0x00 291 .quad 0x00
292 .fill 3984,1,0 292 .fill 3984,1,0
293 .align 4096 293 .org 0x13000
294 294
295#ifdef CONFIG_SHARED_KERNEL 295#ifdef CONFIG_SHARED_KERNEL
296 .org 0x100000 296 .org 0x100000
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 47744fcca930..9d80c5b1ef95 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -268,7 +268,7 @@ startup_continue:
268.Lparmaddr: 268.Lparmaddr:
269 .quad PARMAREA 269 .quad PARMAREA
270 270
271 .align 4096 271 .org 0x12000
272.Lsccb: 272.Lsccb:
273 .hword 0x1000 # length, one page 273 .hword 0x1000 # length, one page
274 .byte 0x00,0x00,0x00 274 .byte 0x00,0x00,0x00
@@ -285,7 +285,7 @@ startup_continue:
285.Lscpincr2: 285.Lscpincr2:
286 .quad 0x00 286 .quad 0x00
287 .fill 3984,1,0 287 .fill 3984,1,0
288 .align 4096 288 .org 0x13000
289 289
290#ifdef CONFIG_SHARED_KERNEL 290#ifdef CONFIG_SHARED_KERNEL
291 .org 0x100000 291 .org 0x100000
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 1ca34f54ea8a..c902f059c7aa 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -877,31 +877,57 @@ static struct bin_attribute ipl_scp_data_attr = {
877 877
878static decl_subsys(ipl, NULL, NULL); 878static decl_subsys(ipl, NULL, NULL);
879 879
880static int ipl_register_fcp_files(void)
881{
882 int rc;
883
884 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
885 &ipl_fcp_attr_group);
886 if (rc)
887 goto out;
888 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
889 &ipl_parameter_attr);
890 if (rc)
891 goto out_ipl_parm;
892 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
893 &ipl_scp_data_attr);
894 if (!rc)
895 goto out;
896
897 sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr);
898
899out_ipl_parm:
900 sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
901out:
902 return rc;
903}
904
880static int __init 905static int __init
881ipl_device_sysfs_register(void) { 906ipl_device_sysfs_register(void) {
882 int rc; 907 int rc;
883 908
884 rc = firmware_register(&ipl_subsys); 909 rc = firmware_register(&ipl_subsys);
885 if (rc) 910 if (rc)
886 return rc; 911 goto out;
887 912
888 switch (get_ipl_type()) { 913 switch (get_ipl_type()) {
889 case ipl_type_ccw: 914 case ipl_type_ccw:
890 sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group); 915 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
916 &ipl_ccw_attr_group);
891 break; 917 break;
892 case ipl_type_fcp: 918 case ipl_type_fcp:
893 sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group); 919 rc = ipl_register_fcp_files();
894 sysfs_create_bin_file(&ipl_subsys.kset.kobj,
895 &ipl_parameter_attr);
896 sysfs_create_bin_file(&ipl_subsys.kset.kobj,
897 &ipl_scp_data_attr);
898 break; 920 break;
899 default: 921 default:
900 sysfs_create_group(&ipl_subsys.kset.kobj, 922 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
901 &ipl_unknown_attr_group); 923 &ipl_unknown_attr_group);
902 break; 924 break;
903 } 925 }
904 return 0; 926
927 if (rc)
928 firmware_unregister(&ipl_subsys);
929out:
930 return rc;
905} 931}
906 932
907__initcall(ipl_device_sysfs_register); 933__initcall(ipl_device_sysfs_register);
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index adba9dfee35e..af90a5f9ab57 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -15,6 +15,7 @@
15 15
16#include <asm/page.h> 16#include <asm/page.h>
17#include <asm/oplib.h> 17#include <asm/oplib.h>
18#include <asm/prom.h>
18#include <asm/smp.h> 19#include <asm/smp.h>
19#include <asm/system.h> 20#include <asm/system.h>
20#include <asm/cpudata.h> 21#include <asm/cpudata.h>
@@ -34,12 +35,6 @@ static int check_cpu_node(int nd, int *cur_inst,
34 int (*compare)(int, int, void *), void *compare_arg, 35 int (*compare)(int, int, void *), void *compare_arg,
35 int *prom_node, int *mid) 36 int *prom_node, int *mid)
36{ 37{
37 char node_str[128];
38
39 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
40 if (strcmp(node_str, "cpu"))
41 return -ENODEV;
42
43 if (!compare(nd, *cur_inst, compare_arg)) { 38 if (!compare(nd, *cur_inst, compare_arg)) {
44 if (prom_node) 39 if (prom_node)
45 *prom_node = nd; 40 *prom_node = nd;
@@ -59,20 +54,14 @@ static int check_cpu_node(int nd, int *cur_inst,
59static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg, 54static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
60 int *prom_node, int *mid) 55 int *prom_node, int *mid)
61{ 56{
62 int nd, cur_inst, err; 57 struct device_node *dp;
58 int cur_inst;
63 59
64 nd = prom_root_node;
65 cur_inst = 0; 60 cur_inst = 0;
66 61 for_each_node_by_type(dp, "cpu") {
67 err = check_cpu_node(nd, &cur_inst, compare, compare_arg, 62 int err = check_cpu_node(dp->node, &cur_inst,
68 prom_node, mid); 63 compare, compare_arg,
69 if (!err) 64 prom_node, mid);
70 return 0;
71
72 nd = prom_getchild(nd);
73 while ((nd = prom_getsibling(nd)) != 0) {
74 err = check_cpu_node(nd, &cur_inst, compare, compare_arg,
75 prom_node, mid);
76 if (!err) 65 if (!err)
77 return 0; 66 return 0;
78 } 67 }
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index cde73327ca96..72f0201051a0 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -329,7 +329,7 @@ void handler_irq(int irq, struct pt_regs * regs)
329 disable_pil_irq(irq); 329 disable_pil_irq(irq);
330#ifdef CONFIG_SMP 330#ifdef CONFIG_SMP
331 /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */ 331 /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */
332 if(irq < 10) 332 if((sparc_cpu_model==sun4m) && (irq < 10))
333 smp4m_irq_rotate(cpu); 333 smp4m_irq_rotate(cpu);
334#endif 334#endif
335 action = sparc_irq[irq].action; 335 action = sparc_irq[irq].action;
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index 5a2faad5d043..97bf87e8cdde 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -596,14 +596,41 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
596 static int pil_to_sbus[] = { 596 static int pil_to_sbus[] = {
597 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, 597 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
598 }; 598 };
599 struct device_node *busp = dp->parent; 599 struct device_node *io_unit, *sbi = dp->parent;
600 struct linux_prom_registers *regs; 600 struct linux_prom_registers *regs;
601 int board = of_getintprop_default(busp, "board#", 0); 601 int board, slot;
602 int slot; 602
603 while (sbi) {
604 if (!strcmp(sbi->name, "sbi"))
605 break;
606
607 sbi = sbi->parent;
608 }
609 if (!sbi)
610 goto build_resources;
603 611
604 regs = of_get_property(dp, "reg", NULL); 612 regs = of_get_property(dp, "reg", NULL);
613 if (!regs)
614 goto build_resources;
615
605 slot = regs->which_io; 616 slot = regs->which_io;
606 617
618 /* If SBI's parent is not io-unit or the io-unit lacks
619 * a "board#" property, something is very wrong.
620 */
621 if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
622 printk("%s: Error, parent is not io-unit.\n",
623 sbi->full_name);
624 goto build_resources;
625 }
626 io_unit = sbi->parent;
627 board = of_getintprop_default(io_unit, "board#", -1);
628 if (board == -1) {
629 printk("%s: Error, lacks board# property.\n",
630 io_unit->full_name);
631 goto build_resources;
632 }
633
607 for (i = 0; i < op->num_irqs; i++) { 634 for (i = 0; i < op->num_irqs; i++) {
608 int this_irq = op->irqs[i]; 635 int this_irq = op->irqs[i];
609 int sbusl = pil_to_sbus[this_irq]; 636 int sbusl = pil_to_sbus[this_irq];
@@ -617,6 +644,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
617 } 644 }
618 } 645 }
619 646
647build_resources:
620 build_device_resources(op, parent); 648 build_device_resources(op, parent);
621 649
622 op->dev.parent = parent; 650 op->dev.parent = parent;
diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c
index 4b06dcb00ebd..4ca9e5fc97f4 100644
--- a/arch/sparc/kernel/prom.c
+++ b/arch/sparc/kernel/prom.c
@@ -444,6 +444,7 @@ static struct property * __init build_one_prop(phandle node, char *prev, char *s
444 static struct property *tmp = NULL; 444 static struct property *tmp = NULL;
445 struct property *p; 445 struct property *p;
446 int len; 446 int len;
447 const char *name;
447 448
448 if (tmp) { 449 if (tmp) {
449 p = tmp; 450 p = tmp;
@@ -456,19 +457,21 @@ static struct property * __init build_one_prop(phandle node, char *prev, char *s
456 457
457 p->name = (char *) (p + 1); 458 p->name = (char *) (p + 1);
458 if (special_name) { 459 if (special_name) {
460 strcpy(p->name, special_name);
459 p->length = special_len; 461 p->length = special_len;
460 p->value = prom_early_alloc(special_len); 462 p->value = prom_early_alloc(special_len);
461 memcpy(p->value, special_val, special_len); 463 memcpy(p->value, special_val, special_len);
462 } else { 464 } else {
463 if (prev == NULL) { 465 if (prev == NULL) {
464 prom_firstprop(node, p->name); 466 name = prom_firstprop(node, NULL);
465 } else { 467 } else {
466 prom_nextprop(node, prev, p->name); 468 name = prom_nextprop(node, prev, NULL);
467 } 469 }
468 if (strlen(p->name) == 0) { 470 if (strlen(name) == 0) {
469 tmp = p; 471 tmp = p;
470 return NULL; 472 return NULL;
471 } 473 }
474 strcpy(p->name, name);
472 p->length = prom_getproplen(node, p->name); 475 p->length = prom_getproplen(node, p->name);
473 if (p->length <= 0) { 476 if (p->length <= 0) {
474 p->length = 0; 477 p->length = 0;
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index 6135d4faeeeb..e311ade1b490 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -87,6 +87,7 @@ void __cpuinit smp_store_cpu_info(int id)
87void __init smp_cpus_done(unsigned int max_cpus) 87void __init smp_cpus_done(unsigned int max_cpus)
88{ 88{
89 extern void smp4m_smp_done(void); 89 extern void smp4m_smp_done(void);
90 extern void smp4d_smp_done(void);
90 unsigned long bogosum = 0; 91 unsigned long bogosum = 0;
91 int cpu, num; 92 int cpu, num;
92 93
@@ -100,8 +101,34 @@ void __init smp_cpus_done(unsigned int max_cpus)
100 num, bogosum/(500000/HZ), 101 num, bogosum/(500000/HZ),
101 (bogosum/(5000/HZ))%100); 102 (bogosum/(5000/HZ))%100);
102 103
103 BUG_ON(sparc_cpu_model != sun4m); 104 switch(sparc_cpu_model) {
104 smp4m_smp_done(); 105 case sun4:
106 printk("SUN4\n");
107 BUG();
108 break;
109 case sun4c:
110 printk("SUN4C\n");
111 BUG();
112 break;
113 case sun4m:
114 smp4m_smp_done();
115 break;
116 case sun4d:
117 smp4d_smp_done();
118 break;
119 case sun4e:
120 printk("SUN4E\n");
121 BUG();
122 break;
123 case sun4u:
124 printk("SUN4U\n");
125 BUG();
126 break;
127 default:
128 printk("UNKNOWN!\n");
129 BUG();
130 break;
131 };
105} 132}
106 133
107void cpu_panic(void) 134void cpu_panic(void)
@@ -267,9 +294,9 @@ int setup_profiling_timer(unsigned int multiplier)
267void __init smp_prepare_cpus(unsigned int max_cpus) 294void __init smp_prepare_cpus(unsigned int max_cpus)
268{ 295{
269 extern void smp4m_boot_cpus(void); 296 extern void smp4m_boot_cpus(void);
297 extern void smp4d_boot_cpus(void);
270 int i, cpuid, extra; 298 int i, cpuid, extra;
271 299
272 BUG_ON(sparc_cpu_model != sun4m);
273 printk("Entering SMP Mode...\n"); 300 printk("Entering SMP Mode...\n");
274 301
275 extra = 0; 302 extra = 0;
@@ -283,7 +310,34 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
283 310
284 smp_store_cpu_info(boot_cpu_id); 311 smp_store_cpu_info(boot_cpu_id);
285 312
286 smp4m_boot_cpus(); 313 switch(sparc_cpu_model) {
314 case sun4:
315 printk("SUN4\n");
316 BUG();
317 break;
318 case sun4c:
319 printk("SUN4C\n");
320 BUG();
321 break;
322 case sun4m:
323 smp4m_boot_cpus();
324 break;
325 case sun4d:
326 smp4d_boot_cpus();
327 break;
328 case sun4e:
329 printk("SUN4E\n");
330 BUG();
331 break;
332 case sun4u:
333 printk("SUN4U\n");
334 BUG();
335 break;
336 default:
337 printk("UNKNOWN!\n");
338 BUG();
339 break;
340 };
287} 341}
288 342
289/* Set this up early so that things like the scheduler can init 343/* Set this up early so that things like the scheduler can init
@@ -323,9 +377,37 @@ void __init smp_prepare_boot_cpu(void)
323int __cpuinit __cpu_up(unsigned int cpu) 377int __cpuinit __cpu_up(unsigned int cpu)
324{ 378{
325 extern int smp4m_boot_one_cpu(int); 379 extern int smp4m_boot_one_cpu(int);
326 int ret; 380 extern int smp4d_boot_one_cpu(int);
327 381 int ret=0;
328 ret = smp4m_boot_one_cpu(cpu); 382
383 switch(sparc_cpu_model) {
384 case sun4:
385 printk("SUN4\n");
386 BUG();
387 break;
388 case sun4c:
389 printk("SUN4C\n");
390 BUG();
391 break;
392 case sun4m:
393 ret = smp4m_boot_one_cpu(cpu);
394 break;
395 case sun4d:
396 ret = smp4d_boot_one_cpu(cpu);
397 break;
398 case sun4e:
399 printk("SUN4E\n");
400 BUG();
401 break;
402 case sun4u:
403 printk("SUN4U\n");
404 BUG();
405 break;
406 default:
407 printk("UNKNOWN!\n");
408 BUG();
409 break;
410 };
329 411
330 if (!ret) { 412 if (!ret) {
331 cpu_set(cpu, smp_commenced_mask); 413 cpu_set(cpu, smp_commenced_mask);
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 5fb987fc3d63..4d441a554d35 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -237,7 +237,6 @@ EXPORT_SYMBOL(prom_node_has_property);
237EXPORT_SYMBOL(prom_setprop); 237EXPORT_SYMBOL(prom_setprop);
238EXPORT_SYMBOL(saved_command_line); 238EXPORT_SYMBOL(saved_command_line);
239EXPORT_SYMBOL(prom_apply_obio_ranges); 239EXPORT_SYMBOL(prom_apply_obio_ranges);
240EXPORT_SYMBOL(prom_getname);
241EXPORT_SYMBOL(prom_feval); 240EXPORT_SYMBOL(prom_feval);
242EXPORT_SYMBOL(prom_getbool); 241EXPORT_SYMBOL(prom_getbool);
243EXPORT_SYMBOL(prom_getstring); 242EXPORT_SYMBOL(prom_getstring);
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index b141b7ee6717..ba843f6a2832 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -43,15 +43,10 @@ extern ctxd_t *srmmu_ctx_table_phys;
43extern void calibrate_delay(void); 43extern void calibrate_delay(void);
44 44
45extern volatile int smp_processors_ready; 45extern volatile int smp_processors_ready;
46extern int smp_num_cpus;
47static int smp_highest_cpu; 46static int smp_highest_cpu;
48extern volatile unsigned long cpu_callin_map[NR_CPUS]; 47extern volatile unsigned long cpu_callin_map[NR_CPUS];
49extern cpuinfo_sparc cpu_data[NR_CPUS]; 48extern cpuinfo_sparc cpu_data[NR_CPUS];
50extern unsigned char boot_cpu_id; 49extern unsigned char boot_cpu_id;
51extern int smp_activated;
52extern volatile int __cpu_number_map[NR_CPUS];
53extern volatile int __cpu_logical_map[NR_CPUS];
54extern volatile unsigned long ipi_count;
55extern volatile int smp_process_available; 50extern volatile int smp_process_available;
56 51
57extern cpumask_t smp_commenced_mask; 52extern cpumask_t smp_commenced_mask;
@@ -144,6 +139,8 @@ void __init smp4d_callin(void)
144 spin_lock_irqsave(&sun4d_imsk_lock, flags); 139 spin_lock_irqsave(&sun4d_imsk_lock, flags);
145 cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ 140 cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
146 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 141 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
142 cpu_set(cpuid, cpu_online_map);
143
147} 144}
148 145
149extern void init_IRQ(void); 146extern void init_IRQ(void);
@@ -160,51 +157,24 @@ extern unsigned long trapbase_cpu3[];
160 157
161void __init smp4d_boot_cpus(void) 158void __init smp4d_boot_cpus(void)
162{ 159{
163 int cpucount = 0;
164 int i, mid;
165
166 printk("Entering SMP Mode...\n");
167
168 if (boot_cpu_id) 160 if (boot_cpu_id)
169 current_set[0] = NULL; 161 current_set[0] = NULL;
170
171 local_irq_enable();
172 cpus_clear(cpu_present_map);
173
174 /* XXX This whole thing has to go. See sparc64. */
175 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
176 cpu_set(mid, cpu_present_map);
177 SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0]));
178 for(i=0; i < NR_CPUS; i++)
179 __cpu_number_map[i] = -1;
180 for(i=0; i < NR_CPUS; i++)
181 __cpu_logical_map[i] = -1;
182 __cpu_number_map[boot_cpu_id] = 0;
183 __cpu_logical_map[0] = boot_cpu_id;
184 current_thread_info()->cpu = boot_cpu_id;
185 smp_store_cpu_info(boot_cpu_id);
186 smp_setup_percpu_timer(); 162 smp_setup_percpu_timer();
187 local_flush_cache_all(); 163 local_flush_cache_all();
188 if (cpu_find_by_instance(1, NULL, NULL)) 164}
189 return; /* Not an MP box. */ 165
190 SMP_PRINTK(("Iterating over CPUs\n")); 166int smp4d_boot_one_cpu(int i)
191 for(i = 0; i < NR_CPUS; i++) { 167{
192 if(i == boot_cpu_id)
193 continue;
194
195 if (cpu_isset(i, cpu_present_map)) {
196 extern unsigned long sun4d_cpu_startup; 168 extern unsigned long sun4d_cpu_startup;
197 unsigned long *entry = &sun4d_cpu_startup; 169 unsigned long *entry = &sun4d_cpu_startup;
198 struct task_struct *p; 170 struct task_struct *p;
199 int timeout; 171 int timeout;
200 int no; 172 int cpu_node;
201 173
174 cpu_find_by_instance(i, &cpu_node,NULL);
202 /* Cook up an idler for this guy. */ 175 /* Cook up an idler for this guy. */
203 p = fork_idle(i); 176 p = fork_idle(i);
204 cpucount++;
205 current_set[i] = task_thread_info(p); 177 current_set[i] = task_thread_info(p);
206 for (no = 0; !cpu_find_by_instance(no, NULL, &mid)
207 && mid != i; no++) ;
208 178
209 /* 179 /*
210 * Initialize the contexts table 180 * Initialize the contexts table
@@ -216,9 +186,9 @@ void __init smp4d_boot_cpus(void)
216 smp_penguin_ctable.reg_size = 0; 186 smp_penguin_ctable.reg_size = 0;
217 187
218 /* whirrr, whirrr, whirrrrrrrrr... */ 188 /* whirrr, whirrr, whirrrrrrrrr... */
219 SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, cpu_data(no).prom_node)); 189 SMP_PRINTK(("Starting CPU %d at %p \n", i, entry));
220 local_flush_cache_all(); 190 local_flush_cache_all();
221 prom_startcpu(cpu_data(no).prom_node, 191 prom_startcpu(cpu_node,
222 &smp_penguin_ctable, 0, (char *)entry); 192 &smp_penguin_ctable, 0, (char *)entry);
223 193
224 SMP_PRINTK(("prom_startcpu returned :)\n")); 194 SMP_PRINTK(("prom_startcpu returned :)\n"));
@@ -230,39 +200,30 @@ void __init smp4d_boot_cpus(void)
230 udelay(200); 200 udelay(200);
231 } 201 }
232 202
233 if(cpu_callin_map[i]) { 203 if (!(cpu_callin_map[i])) {
234 /* Another "Red Snapper". */ 204 printk("Processor %d is stuck.\n", i);
235 __cpu_number_map[i] = cpucount; 205 return -ENODEV;
236 __cpu_logical_map[cpucount] = i; 206
237 } else {
238 cpucount--;
239 printk("Processor %d is stuck.\n", i);
240 }
241 }
242 if(!(cpu_callin_map[i])) {
243 cpu_clear(i, cpu_present_map);
244 __cpu_number_map[i] = -1;
245 }
246 } 207 }
247 local_flush_cache_all(); 208 local_flush_cache_all();
248 if(cpucount == 0) { 209 return 0;
249 printk("Error: only one Processor found.\n"); 210}
250 cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id()); 211
251 } else { 212void __init smp4d_smp_done(void)
252 unsigned long bogosum = 0; 213{
253 214 int i, first;
254 for_each_present_cpu(i) { 215 int *prev;
255 bogosum += cpu_data(i).udelay_val; 216
256 smp_highest_cpu = i; 217 /* setup cpu list for irq rotation */
218 first = 0;
219 prev = &first;
220 for (i = 0; i < NR_CPUS; i++)
221 if (cpu_online(i)) {
222 *prev = i;
223 prev = &cpu_data(i).next;
257 } 224 }
258 SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); 225 *prev = first;
259 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", 226 local_flush_cache_all();
260 cpucount + 1,
261 bogosum/(500000/HZ),
262 (bogosum/(5000/HZ))%100);
263 smp_activated = 1;
264 smp_num_cpus = cpucount + 1;
265 }
266 227
267 /* Free unneeded trap tables */ 228 /* Free unneeded trap tables */
268 ClearPageReserved(virt_to_page(trapbase_cpu1)); 229 ClearPageReserved(virt_to_page(trapbase_cpu1));
@@ -334,7 +295,7 @@ void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
334 register int i; 295 register int i;
335 296
336 mask = cpumask_of_cpu(hard_smp4d_processor_id()); 297 mask = cpumask_of_cpu(hard_smp4d_processor_id());
337 cpus_andnot(mask, cpu_present_map, mask); 298 cpus_andnot(mask, cpu_online_map, mask);
338 for(i = 0; i <= high; i++) { 299 for(i = 0; i <= high; i++) {
339 if (cpu_isset(i, mask)) { 300 if (cpu_isset(i, mask)) {
340 ccall_info.processors_in[i] = 0; 301 ccall_info.processors_in[i] = 0;
diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c
index 0cdfc9d294b4..a41c8a5c2007 100644
--- a/arch/sparc/kernel/sys_sparc.c
+++ b/arch/sparc/kernel/sys_sparc.c
@@ -465,21 +465,21 @@ sys_rt_sigaction(int sig,
465 465
466asmlinkage int sys_getdomainname(char __user *name, int len) 466asmlinkage int sys_getdomainname(char __user *name, int len)
467{ 467{
468 int nlen; 468 int nlen, err;
469 int err = -EFAULT;
470 469
470 if (len < 0 || len > __NEW_UTS_LEN)
471 return -EINVAL;
472
471 down_read(&uts_sem); 473 down_read(&uts_sem);
472 474
473 nlen = strlen(system_utsname.domainname) + 1; 475 nlen = strlen(system_utsname.domainname) + 1;
474
475 if (nlen < len) 476 if (nlen < len)
476 len = nlen; 477 len = nlen;
477 if (len > __NEW_UTS_LEN) 478
478 goto done; 479 err = -EFAULT;
479 if (copy_to_user(name, system_utsname.domainname, len)) 480 if (!copy_to_user(name, system_utsname.domainname, len))
480 goto done; 481 err = 0;
481 err = 0; 482
482done:
483 up_read(&uts_sem); 483 up_read(&uts_sem);
484 return err; 484 return err;
485} 485}
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 04eb1eab6e3e..845081b01267 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -225,6 +225,32 @@ static __inline__ int has_low_battery(void)
225 return (data1 == data2); /* Was the write blocked? */ 225 return (data1 == data2); /* Was the write blocked? */
226} 226}
227 227
228static void __init mostek_set_system_time(void)
229{
230 unsigned int year, mon, day, hour, min, sec;
231 struct mostek48t02 *mregs;
232
233 mregs = (struct mostek48t02 *)mstk48t02_regs;
234 if(!mregs) {
235 prom_printf("Something wrong, clock regs not mapped yet.\n");
236 prom_halt();
237 }
238 spin_lock_irq(&mostek_lock);
239 mregs->creg |= MSTK_CREG_READ;
240 sec = MSTK_REG_SEC(mregs);
241 min = MSTK_REG_MIN(mregs);
242 hour = MSTK_REG_HOUR(mregs);
243 day = MSTK_REG_DOM(mregs);
244 mon = MSTK_REG_MONTH(mregs);
245 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
246 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
247 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
248 set_normalized_timespec(&wall_to_monotonic,
249 -xtime.tv_sec, -xtime.tv_nsec);
250 mregs->creg &= ~MSTK_CREG_READ;
251 spin_unlock_irq(&mostek_lock);
252}
253
228/* Probe for the real time clock chip on Sun4 */ 254/* Probe for the real time clock chip on Sun4 */
229static __inline__ void sun4_clock_probe(void) 255static __inline__ void sun4_clock_probe(void)
230{ 256{
@@ -273,6 +299,7 @@ static __inline__ void sun4_clock_probe(void)
273#endif 299#endif
274} 300}
275 301
302#ifndef CONFIG_SUN4
276static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) 303static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
277{ 304{
278 struct device_node *dp = op->node; 305 struct device_node *dp = op->node;
@@ -307,6 +334,8 @@ static int __devinit clock_probe(struct of_device *op, const struct of_device_id
307 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP) 334 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
308 kick_start_clock(); 335 kick_start_clock();
309 336
337 mostek_set_system_time();
338
310 return 0; 339 return 0;
311} 340}
312 341
@@ -325,56 +354,37 @@ static struct of_platform_driver clock_driver = {
325 354
326 355
327/* Probe for the mostek real time clock chip. */ 356/* Probe for the mostek real time clock chip. */
328static void clock_init(void) 357static int __init clock_init(void)
329{ 358{
330 of_register_driver(&clock_driver, &of_bus_type); 359 return of_register_driver(&clock_driver, &of_bus_type);
331} 360}
332 361
362/* Must be after subsys_initcall() so that busses are probed. Must
363 * be before device_initcall() because things like the RTC driver
364 * need to see the clock registers.
365 */
366fs_initcall(clock_init);
367#endif /* !CONFIG_SUN4 */
368
333void __init sbus_time_init(void) 369void __init sbus_time_init(void)
334{ 370{
335 unsigned int year, mon, day, hour, min, sec;
336 struct mostek48t02 *mregs;
337
338#ifdef CONFIG_SUN4
339 int temp;
340 struct intersil *iregs;
341#endif
342 371
343 BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); 372 BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM);
344 btfixup(); 373 btfixup();
345 374
346 if (ARCH_SUN4) 375 if (ARCH_SUN4)
347 sun4_clock_probe(); 376 sun4_clock_probe();
348 else
349 clock_init();
350 377
351 sparc_init_timers(timer_interrupt); 378 sparc_init_timers(timer_interrupt);
352 379
353#ifdef CONFIG_SUN4 380#ifdef CONFIG_SUN4
354 if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) { 381 if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) {
355#endif 382 mostek_set_system_time();
356 mregs = (struct mostek48t02 *)mstk48t02_regs;
357 if(!mregs) {
358 prom_printf("Something wrong, clock regs not mapped yet.\n");
359 prom_halt();
360 }
361 spin_lock_irq(&mostek_lock);
362 mregs->creg |= MSTK_CREG_READ;
363 sec = MSTK_REG_SEC(mregs);
364 min = MSTK_REG_MIN(mregs);
365 hour = MSTK_REG_HOUR(mregs);
366 day = MSTK_REG_DOM(mregs);
367 mon = MSTK_REG_MONTH(mregs);
368 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
369 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
370 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
371 set_normalized_timespec(&wall_to_monotonic,
372 -xtime.tv_sec, -xtime.tv_nsec);
373 mregs->creg &= ~MSTK_CREG_READ;
374 spin_unlock_irq(&mostek_lock);
375#ifdef CONFIG_SUN4
376 } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) { 383 } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) {
377 /* initialise the intersil on sun4 */ 384 /* initialise the intersil on sun4 */
385 unsigned int year, mon, day, hour, min, sec;
386 int temp;
387 struct intersil *iregs;
378 388
379 iregs=intersil_clock; 389 iregs=intersil_clock;
380 if(!iregs) { 390 if(!iregs) {
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 42c1c700c0a7..2bb1309003dd 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -64,6 +64,7 @@ iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
64 64
65 sbus->iommu = (struct iommu_struct *)iounit; 65 sbus->iommu = (struct iommu_struct *)iounit;
66 iounit->page_table = xpt; 66 iounit->page_table = xpt;
67 spin_lock_init(&iounit->lock);
67 68
68 for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); 69 for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
69 xpt < xptend;) 70 xpt < xptend;)
diff --git a/arch/sparc/prom/tree.c b/arch/sparc/prom/tree.c
index 2bf03ee8cde5..5ec246573a98 100644
--- a/arch/sparc/prom/tree.c
+++ b/arch/sparc/prom/tree.c
@@ -205,24 +205,6 @@ int prom_searchsiblings(int node_start, char *nodename)
205 return 0; 205 return 0;
206} 206}
207 207
208/* Gets name in the form prom v2+ uses it (name@x,yyyyy or name (if no reg)) */
209int prom_getname (int node, char *buffer, int len)
210{
211 int i;
212 struct linux_prom_registers reg[PROMREG_MAX];
213
214 i = prom_getproperty (node, "name", buffer, len);
215 if (i <= 0) return -1;
216 buffer [i] = 0;
217 len -= i;
218 i = prom_getproperty (node, "reg", (char *)reg, sizeof (reg));
219 if (i <= 0) return 0;
220 if (len < 11) return -1;
221 buffer = strchr (buffer, 0);
222 sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
223 return 0;
224}
225
226/* Interal version of nextprop that does not alter return values. */ 208/* Interal version of nextprop that does not alter return values. */
227char * __prom_nextprop(int node, char * oprop) 209char * __prom_nextprop(int node, char * oprop)
228{ 210{
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 38353621069e..43d9229fca07 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.18-rc1 3# Linux kernel version: 2.6.18-rc2
4# Wed Jul 12 14:00:58 2006 4# Fri Jul 21 14:19:24 2006
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -36,6 +36,7 @@ CONFIG_SWAP=y
36CONFIG_SYSVIPC=y 36CONFIG_SYSVIPC=y
37CONFIG_POSIX_MQUEUE=y 37CONFIG_POSIX_MQUEUE=y
38# CONFIG_BSD_PROCESS_ACCT is not set 38# CONFIG_BSD_PROCESS_ACCT is not set
39# CONFIG_TASKSTATS is not set
39CONFIG_SYSCTL=y 40CONFIG_SYSCTL=y
40# CONFIG_AUDIT is not set 41# CONFIG_AUDIT is not set
41# CONFIG_IKCONFIG is not set 42# CONFIG_IKCONFIG is not set
@@ -1120,7 +1121,7 @@ CONFIG_USB_HIDDEV=y
1120# CONFIG_USB_LEGOTOWER is not set 1121# CONFIG_USB_LEGOTOWER is not set
1121# CONFIG_USB_LCD is not set 1122# CONFIG_USB_LCD is not set
1122# CONFIG_USB_LED is not set 1123# CONFIG_USB_LED is not set
1123# CONFIG_USB_CY7C63 is not set 1124# CONFIG_USB_CYPRESS_CY7C63 is not set
1124# CONFIG_USB_CYTHERM is not set 1125# CONFIG_USB_CYTHERM is not set
1125# CONFIG_USB_PHIDGETKIT is not set 1126# CONFIG_USB_PHIDGETKIT is not set
1126# CONFIG_USB_PHIDGETSERVO is not set 1127# CONFIG_USB_PHIDGETSERVO is not set
@@ -1279,7 +1280,6 @@ CONFIG_RAMFS=y
1279# CONFIG_NFSD is not set 1280# CONFIG_NFSD is not set
1280# CONFIG_SMB_FS is not set 1281# CONFIG_SMB_FS is not set
1281# CONFIG_CIFS is not set 1282# CONFIG_CIFS is not set
1282# CONFIG_CIFS_DEBUG2 is not set
1283# CONFIG_NCP_FS is not set 1283# CONFIG_NCP_FS is not set
1284# CONFIG_CODA_FS is not set 1284# CONFIG_CODA_FS is not set
1285# CONFIG_AFS_FS is not set 1285# CONFIG_AFS_FS is not set
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index f8ef2f2b9b37..ec10f7edcf86 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -66,9 +66,6 @@ static int check_cpu_node(struct device_node *dp, int *cur_inst,
66 void *compare_arg, 66 void *compare_arg,
67 struct device_node **dev_node, int *mid) 67 struct device_node **dev_node, int *mid)
68{ 68{
69 if (strcmp(dp->type, "cpu"))
70 return -ENODEV;
71
72 if (!compare(dp, *cur_inst, compare_arg)) { 69 if (!compare(dp, *cur_inst, compare_arg)) {
73 if (dev_node) 70 if (dev_node)
74 *dev_node = dp; 71 *dev_node = dp;
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 75684b56767e..c8e9dc9d68a9 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -551,9 +551,10 @@ setup_trap_table:
551 save %sp, -192, %sp 551 save %sp, -192, %sp
552 552
553 /* Force interrupts to be disabled. */ 553 /* Force interrupts to be disabled. */
554 rdpr %pstate, %o1 554 rdpr %pstate, %l0
555 andn %o1, PSTATE_IE, %o1 555 andn %l0, PSTATE_IE, %o1
556 wrpr %o1, 0x0, %pstate 556 wrpr %o1, 0x0, %pstate
557 rdpr %pil, %l1
557 wrpr %g0, 15, %pil 558 wrpr %g0, 15, %pil
558 559
559 /* Make the firmware call to jump over to the Linux trap table. */ 560 /* Make the firmware call to jump over to the Linux trap table. */
@@ -622,11 +623,9 @@ setup_trap_table:
622 call init_irqwork_curcpu 623 call init_irqwork_curcpu
623 nop 624 nop
624 625
625 /* Now we can turn interrupts back on. */ 626 /* Now we can restore interrupt state. */
626 rdpr %pstate, %o1 627 wrpr %l0, 0, %pstate
627 or %o1, PSTATE_IE, %o1 628 wrpr %l1, 0x0, %pil
628 wrpr %o1, 0, %pstate
629 wrpr %g0, 0x0, %pil
630 629
631 ret 630 ret
632 restore 631 restore
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 7064cee290ae..238bbf6de07d 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -542,9 +542,17 @@ static void __init build_device_resources(struct of_device *op,
542 /* Convert to num-cells. */ 542 /* Convert to num-cells. */
543 num_reg /= 4; 543 num_reg /= 4;
544 544
545 /* Conver to num-entries. */ 545 /* Convert to num-entries. */
546 num_reg /= na + ns; 546 num_reg /= na + ns;
547 547
548 /* Prevent overruning the op->resources[] array. */
549 if (num_reg > PROMREG_MAX) {
550 printk(KERN_WARNING "%s: Too many regs (%d), "
551 "limiting to %d.\n",
552 op->node->full_name, num_reg, PROMREG_MAX);
553 num_reg = PROMREG_MAX;
554 }
555
548 for (index = 0; index < num_reg; index++) { 556 for (index = 0; index < num_reg; index++) {
549 struct resource *r = &op->resource[index]; 557 struct resource *r = &op->resource[index];
550 u32 addr[OF_MAX_ADDR_CELLS]; 558 u32 addr[OF_MAX_ADDR_CELLS];
@@ -650,8 +658,22 @@ apply_interrupt_map(struct device_node *dp, struct device_node *pp,
650 next: 658 next:
651 imap += (na + 3); 659 imap += (na + 3);
652 } 660 }
653 if (i == imlen) 661 if (i == imlen) {
662 /* Psycho and Sabre PCI controllers can have 'interrupt-map'
663 * properties that do not include the on-board device
664 * interrupts. Instead, the device's 'interrupts' property
665 * is already a fully specified INO value.
666 *
667 * Handle this by deciding that, if we didn't get a
668 * match in the parent's 'interrupt-map', and the
669 * parent is an IRQ translater, then use the parent as
670 * our IRQ controller.
671 */
672 if (pp->irq_trans)
673 return pp;
674
654 return NULL; 675 return NULL;
676 }
655 677
656 *irq_p = irq; 678 *irq_p = irq;
657 cp = of_find_node_by_phandle(handle); 679 cp = of_find_node_by_phandle(handle);
@@ -803,6 +825,14 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
803 op->num_irqs = 0; 825 op->num_irqs = 0;
804 } 826 }
805 827
828 /* Prevent overruning the op->irqs[] array. */
829 if (op->num_irqs > PROMINTR_MAX) {
830 printk(KERN_WARNING "%s: Too many irqs (%d), "
831 "limiting to %d.\n",
832 dp->full_name, op->num_irqs, PROMINTR_MAX);
833 op->num_irqs = PROMINTR_MAX;
834 }
835
806 build_device_resources(op, parent); 836 build_device_resources(op, parent);
807 for (i = 0; i < op->num_irqs; i++) 837 for (i = 0; i < op->num_irqs; i++)
808 op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]); 838 op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]);
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 197a7ffd57ee..1ec0aab68c08 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1099,9 +1099,6 @@ static void pbm_register_toplevel_resources(struct pci_controller_info *p,
1099{ 1099{
1100 char *name = pbm->name; 1100 char *name = pbm->name;
1101 1101
1102 sprintf(name, "PSYCHO%d PBM%c",
1103 p->index,
1104 (pbm == &p->pbm_A ? 'A' : 'B'));
1105 pbm->io_space.name = pbm->mem_space.name = name; 1102 pbm->io_space.name = pbm->mem_space.name = name;
1106 1103
1107 request_resource(&ioport_resource, &pbm->io_space); 1104 request_resource(&ioport_resource, &pbm->io_space);
@@ -1203,12 +1200,13 @@ static void psycho_pbm_init(struct pci_controller_info *p,
1203 pbm->io_space.flags = IORESOURCE_IO; 1200 pbm->io_space.flags = IORESOURCE_IO;
1204 pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE; 1201 pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE;
1205 pbm->mem_space.flags = IORESOURCE_MEM; 1202 pbm->mem_space.flags = IORESOURCE_MEM;
1206 pbm_register_toplevel_resources(p, pbm);
1207 1203
1208 pbm->parent = p; 1204 pbm->parent = p;
1209 pbm->prom_node = dp; 1205 pbm->prom_node = dp;
1210 pbm->name = dp->full_name; 1206 pbm->name = dp->full_name;
1211 1207
1208 pbm_register_toplevel_resources(p, pbm);
1209
1212 printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n", 1210 printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n",
1213 pbm->name, 1211 pbm->name,
1214 pbm->chip_version, pbm->chip_revision); 1212 pbm->chip_version, pbm->chip_revision);
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index c86007a2aa3f..5cc5ab63293f 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -344,10 +344,12 @@ static unsigned long __psycho_onboard_imap_off[] = {
344/*0x2f*/ PSYCHO_IMAP_CE, 344/*0x2f*/ PSYCHO_IMAP_CE,
345/*0x30*/ PSYCHO_IMAP_A_ERR, 345/*0x30*/ PSYCHO_IMAP_A_ERR,
346/*0x31*/ PSYCHO_IMAP_B_ERR, 346/*0x31*/ PSYCHO_IMAP_B_ERR,
347/*0x32*/ PSYCHO_IMAP_PMGMT 347/*0x32*/ PSYCHO_IMAP_PMGMT,
348/*0x33*/ PSYCHO_IMAP_GFX,
349/*0x34*/ PSYCHO_IMAP_EUPA,
348}; 350};
349#define PSYCHO_ONBOARD_IRQ_BASE 0x20 351#define PSYCHO_ONBOARD_IRQ_BASE 0x20
350#define PSYCHO_ONBOARD_IRQ_LAST 0x32 352#define PSYCHO_ONBOARD_IRQ_LAST 0x34
351#define psycho_onboard_imap_offset(__ino) \ 353#define psycho_onboard_imap_offset(__ino) \
352 __psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE] 354 __psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
353 355
@@ -529,6 +531,10 @@ static unsigned long __sabre_onboard_imap_off[] = {
529/*0x2e*/ SABRE_IMAP_UE, 531/*0x2e*/ SABRE_IMAP_UE,
530/*0x2f*/ SABRE_IMAP_CE, 532/*0x2f*/ SABRE_IMAP_CE,
531/*0x30*/ SABRE_IMAP_PCIERR, 533/*0x30*/ SABRE_IMAP_PCIERR,
534/*0x31*/ 0 /* reserved */,
535/*0x32*/ 0 /* reserved */,
536/*0x33*/ SABRE_IMAP_GFX,
537/*0x34*/ SABRE_IMAP_EUPA,
532}; 538};
533#define SABRE_ONBOARD_IRQ_BASE 0x20 539#define SABRE_ONBOARD_IRQ_BASE 0x20
534#define SABRE_ONBOARD_IRQ_LAST 0x30 540#define SABRE_ONBOARD_IRQ_LAST 0x30
@@ -895,6 +901,8 @@ static unsigned long sysio_irq_offsets[] = {
895 SYSIO_IMAP_CE, 901 SYSIO_IMAP_CE,
896 SYSIO_IMAP_SBERR, 902 SYSIO_IMAP_SBERR,
897 SYSIO_IMAP_PMGMT, 903 SYSIO_IMAP_PMGMT,
904 SYSIO_IMAP_GFX,
905 SYSIO_IMAP_EUPA,
898}; 906};
899 907
900#undef bogon 908#undef bogon
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 237524d87cab..beffc82a1e85 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -254,7 +254,6 @@ EXPORT_SYMBOL(prom_getproperty);
254EXPORT_SYMBOL(prom_node_has_property); 254EXPORT_SYMBOL(prom_node_has_property);
255EXPORT_SYMBOL(prom_setprop); 255EXPORT_SYMBOL(prom_setprop);
256EXPORT_SYMBOL(saved_command_line); 256EXPORT_SYMBOL(saved_command_line);
257EXPORT_SYMBOL(prom_getname);
258EXPORT_SYMBOL(prom_finddevice); 257EXPORT_SYMBOL(prom_finddevice);
259EXPORT_SYMBOL(prom_feval); 258EXPORT_SYMBOL(prom_feval);
260EXPORT_SYMBOL(prom_getbool); 259EXPORT_SYMBOL(prom_getbool);
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 51c056df528e..054d0abdb7ee 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -701,21 +701,21 @@ extern void check_pending(int signum);
701 701
702asmlinkage long sys_getdomainname(char __user *name, int len) 702asmlinkage long sys_getdomainname(char __user *name, int len)
703{ 703{
704 int nlen; 704 int nlen, err;
705 int err = -EFAULT; 705
706 if (len < 0 || len > __NEW_UTS_LEN)
707 return -EINVAL;
706 708
707 down_read(&uts_sem); 709 down_read(&uts_sem);
708 710
709 nlen = strlen(system_utsname.domainname) + 1; 711 nlen = strlen(system_utsname.domainname) + 1;
710
711 if (nlen < len) 712 if (nlen < len)
712 len = nlen; 713 len = nlen;
713 if (len > __NEW_UTS_LEN) 714
714 goto done; 715 err = -EFAULT;
715 if (copy_to_user(name, system_utsname.domainname, len)) 716 if (!copy_to_user(name, system_utsname.domainname, len))
716 goto done; 717 err = 0;
717 err = 0; 718
718done:
719 up_read(&uts_sem); 719 up_read(&uts_sem);
720 return err; 720 return err;
721} 721}
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index b43de647ba73..094d3e35be18 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -928,8 +928,6 @@ static void sparc64_start_timers(void)
928 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 928 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
929 : /* no outputs */ 929 : /* no outputs */
930 : "r" (pstate)); 930 : "r" (pstate));
931
932 local_irq_enable();
933} 931}
934 932
935struct freq_table { 933struct freq_table {
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 1605967cce91..55ae802dc0ad 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/kprobes.h> 21#include <linux/kprobes.h>
22#include <linux/kallsyms.h>
22 23
23#include <asm/page.h> 24#include <asm/page.h>
24#include <asm/pgtable.h> 25#include <asm/pgtable.h>
@@ -132,6 +133,8 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
132 133
133 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", 134 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
134 regs->tpc); 135 regs->tpc);
136 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
137 print_symbol("RPC: <%s>\n", regs->u_regs[15]);
135 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); 138 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
136 __asm__("mov %%sp, %0" : "=r" (ksp)); 139 __asm__("mov %%sp, %0" : "=r" (ksp));
137 show_stack(current, ksp); 140 show_stack(current, ksp);
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
index 49075abd7cbc..500f05e2cfcb 100644
--- a/arch/sparc64/prom/tree.c
+++ b/arch/sparc64/prom/tree.c
@@ -193,91 +193,6 @@ prom_searchsiblings(int node_start, const char *nodename)
193 return 0; 193 return 0;
194} 194}
195 195
196/* Gets name in the {name@x,yyyyy|name (if no reg)} form */
197int
198prom_getname (int node, char *buffer, int len)
199{
200 int i, sbus = 0;
201 int pci = 0, ebus = 0, ide = 0;
202 struct linux_prom_registers *reg;
203 struct linux_prom64_registers reg64[PROMREG_MAX];
204
205 for (sbus = prom_getparent (node); sbus; sbus = prom_getparent (sbus)) {
206 i = prom_getproperty (sbus, "name", buffer, len);
207 if (i > 0) {
208 buffer [i] = 0;
209 if (!strcmp (buffer, "sbus"))
210 goto getit;
211 }
212 }
213 if ((pci = prom_getparent (node))) {
214 i = prom_getproperty (pci, "name", buffer, len);
215 if (i > 0) {
216 buffer [i] = 0;
217 if (!strcmp (buffer, "pci"))
218 goto getit;
219 }
220 pci = 0;
221 }
222 if ((ebus = prom_getparent (node))) {
223 i = prom_getproperty (ebus, "name", buffer, len);
224 if (i > 0) {
225 buffer[i] = 0;
226 if (!strcmp (buffer, "ebus"))
227 goto getit;
228 }
229 ebus = 0;
230 }
231 if ((ide = prom_getparent (node))) {
232 i = prom_getproperty (ide, "name", buffer, len);
233 if (i > 0) {
234 buffer [i] = 0;
235 if (!strcmp (buffer, "ide"))
236 goto getit;
237 }
238 ide = 0;
239 }
240getit:
241 i = prom_getproperty (node, "name", buffer, len);
242 if (i <= 0) {
243 buffer [0] = 0;
244 return -1;
245 }
246 buffer [i] = 0;
247 len -= i;
248 i = prom_getproperty (node, "reg", (char *)reg64, sizeof (reg64));
249 if (i <= 0) return 0;
250 if (len < 16) return -1;
251 buffer = strchr (buffer, 0);
252 if (sbus) {
253 reg = (struct linux_prom_registers *)reg64;
254 sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
255 } else if (pci) {
256 int dev, fn;
257 reg = (struct linux_prom_registers *)reg64;
258 fn = (reg[0].which_io >> 8) & 0x07;
259 dev = (reg[0].which_io >> 11) & 0x1f;
260 if (fn)
261 sprintf (buffer, "@%x,%x", dev, fn);
262 else
263 sprintf (buffer, "@%x", dev);
264 } else if (ebus) {
265 reg = (struct linux_prom_registers *)reg64;
266 sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);
267 } else if (ide) {
268 reg = (struct linux_prom_registers *)reg64;
269 sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);
270 } else if (i == 4) { /* Happens on 8042's children on Ultra/PCI. */
271 reg = (struct linux_prom_registers *)reg64;
272 sprintf (buffer, "@%x", reg[0].which_io);
273 } else {
274 sprintf (buffer, "@%x,%x",
275 (unsigned int)(reg64[0].phys_addr >> 36),
276 (unsigned int)(reg64[0].phys_addr));
277 }
278 return 0;
279}
280
281/* Return the first property type for node 'node'. 196/* Return the first property type for node 'node'.
282 * buffer should be at least 32B in length 197 * buffer should be at least 32B in length
283 */ 198 */
diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64
index dffd1184c956..9558a7cf34d5 100644
--- a/arch/um/Makefile-x86_64
+++ b/arch/um/Makefile-x86_64
@@ -11,6 +11,7 @@ USER_CFLAGS += -fno-builtin -m64
11CHECKFLAGS += -m64 11CHECKFLAGS += -m64
12AFLAGS += -m64 12AFLAGS += -m64
13LDFLAGS += -m elf_x86_64 13LDFLAGS += -m elf_x86_64
14CPPFLAGS += -m64
14 15
15ELF_ARCH := i386:x86-64 16ELF_ARCH := i386:x86-64
16ELF_FORMAT := elf64-x86-64 17ELF_FORMAT := elf64-x86-64
diff --git a/arch/um/include/longjmp.h b/arch/um/include/longjmp.h
index 8e7053013f7b..1b5c0131a12e 100644
--- a/arch/um/include/longjmp.h
+++ b/arch/um/include/longjmp.h
@@ -8,8 +8,8 @@
8 longjmp(*buf, val); \ 8 longjmp(*buf, val); \
9} while(0) 9} while(0)
10 10
11#define UML_SETJMP(buf, enable) ({ \ 11#define UML_SETJMP(buf) ({ \
12 int n; \ 12 int n, enable; \
13 enable = get_signals(); \ 13 enable = get_signals(); \
14 n = setjmp(*buf); \ 14 n = setjmp(*buf); \
15 if(n != 0) \ 15 if(n != 0) \
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index b6c52496e15a..5316e8a4a4fd 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -15,9 +15,9 @@
15#include "irq_user.h" 15#include "irq_user.h"
16#include "sysdep/tls.h" 16#include "sysdep/tls.h"
17 17
18#define OS_TYPE_FILE 1 18#define OS_TYPE_FILE 1
19#define OS_TYPE_DIR 2 19#define OS_TYPE_DIR 2
20#define OS_TYPE_SYMLINK 3 20#define OS_TYPE_SYMLINK 3
21#define OS_TYPE_CHARDEV 4 21#define OS_TYPE_CHARDEV 4
22#define OS_TYPE_BLOCKDEV 5 22#define OS_TYPE_BLOCKDEV 5
23#define OS_TYPE_FIFO 6 23#define OS_TYPE_FIFO 6
@@ -61,68 +61,68 @@ struct openflags {
61}; 61};
62 62
63#define OPENFLAGS() ((struct openflags) { .r = 0, .w = 0, .s = 0, .c = 0, \ 63#define OPENFLAGS() ((struct openflags) { .r = 0, .w = 0, .s = 0, .c = 0, \
64 .t = 0, .a = 0, .e = 0, .cl = 0 }) 64 .t = 0, .a = 0, .e = 0, .cl = 0 })
65 65
66static inline struct openflags of_read(struct openflags flags) 66static inline struct openflags of_read(struct openflags flags)
67{ 67{
68 flags.r = 1; 68 flags.r = 1;
69 return(flags); 69 return flags;
70} 70}
71 71
72static inline struct openflags of_write(struct openflags flags) 72static inline struct openflags of_write(struct openflags flags)
73{ 73{
74 flags.w = 1; 74 flags.w = 1;
75 return(flags); 75 return flags;
76} 76}
77 77
78static inline struct openflags of_rdwr(struct openflags flags) 78static inline struct openflags of_rdwr(struct openflags flags)
79{ 79{
80 return(of_read(of_write(flags))); 80 return of_read(of_write(flags));
81} 81}
82 82
83static inline struct openflags of_set_rw(struct openflags flags, int r, int w) 83static inline struct openflags of_set_rw(struct openflags flags, int r, int w)
84{ 84{
85 flags.r = r; 85 flags.r = r;
86 flags.w = w; 86 flags.w = w;
87 return(flags); 87 return flags;
88} 88}
89 89
90static inline struct openflags of_sync(struct openflags flags) 90static inline struct openflags of_sync(struct openflags flags)
91{ 91{
92 flags.s = 1; 92 flags.s = 1;
93 return(flags); 93 return flags;
94} 94}
95 95
96static inline struct openflags of_create(struct openflags flags) 96static inline struct openflags of_create(struct openflags flags)
97{ 97{
98 flags.c = 1; 98 flags.c = 1;
99 return(flags); 99 return flags;
100} 100}
101 101
102static inline struct openflags of_trunc(struct openflags flags) 102static inline struct openflags of_trunc(struct openflags flags)
103{ 103{
104 flags.t = 1; 104 flags.t = 1;
105 return(flags); 105 return flags;
106} 106}
107 107
108static inline struct openflags of_append(struct openflags flags) 108static inline struct openflags of_append(struct openflags flags)
109{ 109{
110 flags.a = 1; 110 flags.a = 1;
111 return(flags); 111 return flags;
112} 112}
113 113
114static inline struct openflags of_excl(struct openflags flags) 114static inline struct openflags of_excl(struct openflags flags)
115{ 115{
116 flags.e = 1; 116 flags.e = 1;
117 return(flags); 117 return flags;
118} 118}
119 119
120static inline struct openflags of_cloexec(struct openflags flags) 120static inline struct openflags of_cloexec(struct openflags flags)
121{ 121{
122 flags.cl = 1; 122 flags.cl = 1;
123 return(flags); 123 return flags;
124} 124}
125 125
126/* file.c */ 126/* file.c */
127extern int os_stat_file(const char *file_name, struct uml_stat *buf); 127extern int os_stat_file(const char *file_name, struct uml_stat *buf);
128extern int os_stat_fd(const int fd, struct uml_stat *buf); 128extern int os_stat_fd(const int fd, struct uml_stat *buf);
@@ -204,7 +204,7 @@ extern int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr);
204 204
205extern int os_map_memory(void *virt, int fd, unsigned long long off, 205extern int os_map_memory(void *virt, int fd, unsigned long long off,
206 unsigned long len, int r, int w, int x); 206 unsigned long len, int r, int w, int x);
207extern int os_protect_memory(void *addr, unsigned long len, 207extern int os_protect_memory(void *addr, unsigned long len,
208 int r, int w, int x); 208 int r, int w, int x);
209extern int os_unmap_memory(void *addr, int len); 209extern int os_unmap_memory(void *addr, int len);
210extern int os_drop_memory(void *addr, int length); 210extern int os_drop_memory(void *addr, int length);
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index abf14aaf905f..48cf88dd02d4 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -110,7 +110,7 @@ long sys_uname(struct old_utsname __user * name)
110 if (!name) 110 if (!name)
111 return -EFAULT; 111 return -EFAULT;
112 down_read(&uts_sem); 112 down_read(&uts_sem);
113 err = copy_to_user(name, utsname(), sizeof (*name)); 113 err = copy_to_user(name, &system_utsname, sizeof (*name));
114 up_read(&uts_sem); 114 up_read(&uts_sem);
115 return err?-EFAULT:0; 115 return err?-EFAULT:0;
116} 116}
@@ -126,21 +126,21 @@ long sys_olduname(struct oldold_utsname __user * name)
126 126
127 down_read(&uts_sem); 127 down_read(&uts_sem);
128 128
129 error = __copy_to_user(&name->sysname, &utsname()->sysname, 129 error = __copy_to_user(&name->sysname,&system_utsname.sysname,
130 __OLD_UTS_LEN); 130 __OLD_UTS_LEN);
131 error |= __put_user(0, name->sysname + __OLD_UTS_LEN); 131 error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
132 error |= __copy_to_user(&name->nodename, &utsname()->nodename, 132 error |= __copy_to_user(&name->nodename,&system_utsname.nodename,
133 __OLD_UTS_LEN); 133 __OLD_UTS_LEN);
134 error |= __put_user(0, name->nodename + __OLD_UTS_LEN); 134 error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
135 error |= __copy_to_user(&name->release, &utsname()->release, 135 error |= __copy_to_user(&name->release,&system_utsname.release,
136 __OLD_UTS_LEN); 136 __OLD_UTS_LEN);
137 error |= __put_user(0, name->release + __OLD_UTS_LEN); 137 error |= __put_user(0,name->release+__OLD_UTS_LEN);
138 error |= __copy_to_user(&name->version, &utsname()->version, 138 error |= __copy_to_user(&name->version,&system_utsname.version,
139 __OLD_UTS_LEN); 139 __OLD_UTS_LEN);
140 error |= __put_user(0, name->version + __OLD_UTS_LEN); 140 error |= __put_user(0,name->version+__OLD_UTS_LEN);
141 error |= __copy_to_user(&name->machine, &utsname()->machine, 141 error |= __copy_to_user(&name->machine,&system_utsname.machine,
142 __OLD_UTS_LEN); 142 __OLD_UTS_LEN);
143 error |= __put_user(0, name->machine + __OLD_UTS_LEN); 143 error |= __put_user(0,name->machine+__OLD_UTS_LEN);
144 144
145 up_read(&uts_sem); 145 up_read(&uts_sem);
146 146
diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S
index 72acdce205e0..f8aeb448aab6 100644
--- a/arch/um/kernel/vmlinux.lds.S
+++ b/arch/um/kernel/vmlinux.lds.S
@@ -1,5 +1,3 @@
1/* in case the preprocessor is a 32bit one */
2#undef i386
3#ifdef CONFIG_LD_SCRIPT_STATIC 1#ifdef CONFIG_LD_SCRIPT_STATIC
4#include "uml.lds.S" 2#include "uml.lds.S"
5#else 3#else
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index b1cda818f5b5..b98d3ca2cd1b 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -273,12 +273,12 @@ void init_new_thread_signals(void)
273int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr) 273int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr)
274{ 274{
275 jmp_buf buf; 275 jmp_buf buf;
276 int n, enable; 276 int n;
277 277
278 *jmp_ptr = &buf; 278 *jmp_ptr = &buf;
279 n = UML_SETJMP(&buf, enable); 279 n = UML_SETJMP(&buf);
280 if(n != 0) 280 if(n != 0)
281 return(n); 281 return n;
282 (*fn)(arg); 282 (*fn)(arg);
283 return(0); 283 return 0;
284} 284}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index bf35572d9cfa..7baf90fda58b 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -435,7 +435,6 @@ void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
435{ 435{
436 unsigned long flags; 436 unsigned long flags;
437 jmp_buf switch_buf, fork_buf; 437 jmp_buf switch_buf, fork_buf;
438 int enable;
439 438
440 *switch_buf_ptr = &switch_buf; 439 *switch_buf_ptr = &switch_buf;
441 *fork_buf_ptr = &fork_buf; 440 *fork_buf_ptr = &fork_buf;
@@ -450,7 +449,7 @@ void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
450 */ 449 */
451 flags = get_signals(); 450 flags = get_signals();
452 block_signals(); 451 block_signals();
453 if(UML_SETJMP(&fork_buf, enable) == 0) 452 if(UML_SETJMP(&fork_buf) == 0)
454 new_thread_proc(stack, handler); 453 new_thread_proc(stack, handler);
455 454
456 remove_sigstack(); 455 remove_sigstack();
@@ -467,21 +466,19 @@ void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
467void thread_wait(void *sw, void *fb) 466void thread_wait(void *sw, void *fb)
468{ 467{
469 jmp_buf buf, **switch_buf = sw, *fork_buf; 468 jmp_buf buf, **switch_buf = sw, *fork_buf;
470 int enable;
471 469
472 *switch_buf = &buf; 470 *switch_buf = &buf;
473 fork_buf = fb; 471 fork_buf = fb;
474 if(UML_SETJMP(&buf, enable) == 0) 472 if(UML_SETJMP(&buf) == 0)
475 siglongjmp(*fork_buf, INIT_JMP_REMOVE_SIGSTACK); 473 siglongjmp(*fork_buf, INIT_JMP_REMOVE_SIGSTACK);
476} 474}
477 475
478void switch_threads(void *me, void *next) 476void switch_threads(void *me, void *next)
479{ 477{
480 jmp_buf my_buf, **me_ptr = me, *next_buf = next; 478 jmp_buf my_buf, **me_ptr = me, *next_buf = next;
481 int enable;
482 479
483 *me_ptr = &my_buf; 480 *me_ptr = &my_buf;
484 if(UML_SETJMP(&my_buf, enable) == 0) 481 if(UML_SETJMP(&my_buf) == 0)
485 UML_LONGJMP(next_buf, 1); 482 UML_LONGJMP(next_buf, 1);
486} 483}
487 484
@@ -495,14 +492,14 @@ static jmp_buf *cb_back;
495int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr) 492int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr)
496{ 493{
497 jmp_buf **switch_buf = switch_buf_ptr; 494 jmp_buf **switch_buf = switch_buf_ptr;
498 int n, enable; 495 int n;
499 496
500 set_handler(SIGWINCH, (__sighandler_t) sig_handler, 497 set_handler(SIGWINCH, (__sighandler_t) sig_handler,
501 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGALRM, 498 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGALRM,
502 SIGVTALRM, -1); 499 SIGVTALRM, -1);
503 500
504 *fork_buf_ptr = &initial_jmpbuf; 501 *fork_buf_ptr = &initial_jmpbuf;
505 n = UML_SETJMP(&initial_jmpbuf, enable); 502 n = UML_SETJMP(&initial_jmpbuf);
506 switch(n){ 503 switch(n){
507 case INIT_JMP_NEW_THREAD: 504 case INIT_JMP_NEW_THREAD:
508 new_thread_proc((void *) stack, new_thread_handler); 505 new_thread_proc((void *) stack, new_thread_handler);
@@ -529,14 +526,13 @@ int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr)
529void initial_thread_cb_skas(void (*proc)(void *), void *arg) 526void initial_thread_cb_skas(void (*proc)(void *), void *arg)
530{ 527{
531 jmp_buf here; 528 jmp_buf here;
532 int enable;
533 529
534 cb_proc = proc; 530 cb_proc = proc;
535 cb_arg = arg; 531 cb_arg = arg;
536 cb_back = &here; 532 cb_back = &here;
537 533
538 block_signals(); 534 block_signals();
539 if(UML_SETJMP(&here, enable) == 0) 535 if(UML_SETJMP(&here) == 0)
540 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK); 536 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
541 unblock_signals(); 537 unblock_signals();
542 538
diff --git a/arch/um/os-Linux/uaccess.c b/arch/um/os-Linux/uaccess.c
index e523719330b2..865f6a6a2590 100644
--- a/arch/um/os-Linux/uaccess.c
+++ b/arch/um/os-Linux/uaccess.c
@@ -14,11 +14,10 @@ unsigned long __do_user_copy(void *to, const void *from, int n,
14 int n), int *faulted_out) 14 int n), int *faulted_out)
15{ 15{
16 unsigned long *faddrp = (unsigned long *) fault_addr, ret; 16 unsigned long *faddrp = (unsigned long *) fault_addr, ret;
17 int enable;
18 17
19 jmp_buf jbuf; 18 jmp_buf jbuf;
20 *fault_catcher = &jbuf; 19 *fault_catcher = &jbuf;
21 if(UML_SETJMP(&jbuf, enable) == 0){ 20 if(UML_SETJMP(&jbuf) == 0){
22 (*op)(to, from, n); 21 (*op)(to, from, n);
23 ret = 0; 22 ret = 0;
24 *faulted_out = 0; 23 *faulted_out = 0;
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 83d389b8ebd8..840d5d93d5cc 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-git22 3# Linux kernel version: 2.6.18-rc2
4# Tue Jul 4 14:24:40 2006 4# Tue Jul 18 17:13:20 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -37,6 +37,7 @@ CONFIG_SWAP=y
37CONFIG_SYSVIPC=y 37CONFIG_SYSVIPC=y
38CONFIG_POSIX_MQUEUE=y 38CONFIG_POSIX_MQUEUE=y
39# CONFIG_BSD_PROCESS_ACCT is not set 39# CONFIG_BSD_PROCESS_ACCT is not set
40# CONFIG_TASKSTATS is not set
40CONFIG_SYSCTL=y 41CONFIG_SYSCTL=y
41# CONFIG_AUDIT is not set 42# CONFIG_AUDIT is not set
42CONFIG_IKCONFIG=y 43CONFIG_IKCONFIG=y
@@ -413,6 +414,7 @@ CONFIG_BLK_DEV_LOOP=y
413CONFIG_BLK_DEV_RAM=y 414CONFIG_BLK_DEV_RAM=y
414CONFIG_BLK_DEV_RAM_COUNT=16 415CONFIG_BLK_DEV_RAM_COUNT=16
415CONFIG_BLK_DEV_RAM_SIZE=4096 416CONFIG_BLK_DEV_RAM_SIZE=4096
417CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
416CONFIG_BLK_DEV_INITRD=y 418CONFIG_BLK_DEV_INITRD=y
417# CONFIG_CDROM_PKTCDVD is not set 419# CONFIG_CDROM_PKTCDVD is not set
418# CONFIG_ATA_OVER_ETH is not set 420# CONFIG_ATA_OVER_ETH is not set
@@ -1195,7 +1197,7 @@ CONFIG_USB_MON=y
1195# CONFIG_USB_LEGOTOWER is not set 1197# CONFIG_USB_LEGOTOWER is not set
1196# CONFIG_USB_LCD is not set 1198# CONFIG_USB_LCD is not set
1197# CONFIG_USB_LED is not set 1199# CONFIG_USB_LED is not set
1198# CONFIG_USB_CY7C63 is not set 1200# CONFIG_USB_CYPRESS_CY7C63 is not set
1199# CONFIG_USB_CYTHERM is not set 1201# CONFIG_USB_CYTHERM is not set
1200# CONFIG_USB_PHIDGETKIT is not set 1202# CONFIG_USB_PHIDGETKIT is not set
1201# CONFIG_USB_PHIDGETSERVO is not set 1203# CONFIG_USB_PHIDGETSERVO is not set
@@ -1373,7 +1375,6 @@ CONFIG_SUNRPC=y
1373# CONFIG_RPCSEC_GSS_SPKM3 is not set 1375# CONFIG_RPCSEC_GSS_SPKM3 is not set
1374# CONFIG_SMB_FS is not set 1376# CONFIG_SMB_FS is not set
1375# CONFIG_CIFS is not set 1377# CONFIG_CIFS is not set
1376# CONFIG_CIFS_DEBUG2 is not set
1377# CONFIG_NCP_FS is not set 1378# CONFIG_NCP_FS is not set
1378# CONFIG_CODA_FS is not set 1379# CONFIG_CODA_FS is not set
1379# CONFIG_AFS_FS is not set 1380# CONFIG_AFS_FS is not set
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 9b5bb413a6e9..5d4a7d125ed0 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -103,7 +103,7 @@ ENTRY(ia32_sysenter_target)
103 pushq %rax 103 pushq %rax
104 CFI_ADJUST_CFA_OFFSET 8 104 CFI_ADJUST_CFA_OFFSET 8
105 cld 105 cld
106 SAVE_ARGS 0,0,1 106 SAVE_ARGS 0,0,0
107 /* no need to do an access_ok check here because rbp has been 107 /* no need to do an access_ok check here because rbp has been
108 32bit zero extended */ 108 32bit zero extended */
1091: movl (%rbp),%r9d 1091: movl (%rbp),%r9d
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index e71ed53b08fb..146924ba5df5 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -85,7 +85,8 @@
85#define CSR_AGENT_MASK 0xffe0ffff 85#define CSR_AGENT_MASK 0xffe0ffff
86 86
87#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */ 87#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
88#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */ 88#define MAX_NUM_CHASSIS 8 /* max number of chassis */
89#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2) /* max dev->bus->number */
89#define PHBS_PER_CALGARY 4 90#define PHBS_PER_CALGARY 4
90 91
91/* register offsets in Calgary's internal register space */ 92/* register offsets in Calgary's internal register space */
@@ -110,7 +111,8 @@ static const unsigned long phb_offsets[] = {
110 0xB000 /* PHB3 */ 111 0xB000 /* PHB3 */
111}; 112};
112 113
113void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES]; 114static char bus_to_phb[MAX_PHB_BUS_NUM];
115void* tce_table_kva[MAX_PHB_BUS_NUM];
114unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; 116unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
115static int translate_empty_slots __read_mostly = 0; 117static int translate_empty_slots __read_mostly = 0;
116static int calgary_detected __read_mostly = 0; 118static int calgary_detected __read_mostly = 0;
@@ -119,7 +121,7 @@ static int calgary_detected __read_mostly = 0;
119 * the bitmap of PHBs the user requested that we disable 121 * the bitmap of PHBs the user requested that we disable
120 * translation on. 122 * translation on.
121 */ 123 */
122static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM); 124static DECLARE_BITMAP(translation_disabled, MAX_PHB_BUS_NUM);
123 125
124static void tce_cache_blast(struct iommu_table *tbl); 126static void tce_cache_blast(struct iommu_table *tbl);
125 127
@@ -452,7 +454,7 @@ static struct dma_mapping_ops calgary_dma_ops = {
452 454
453static inline int busno_to_phbid(unsigned char num) 455static inline int busno_to_phbid(unsigned char num)
454{ 456{
455 return bus_to_phb(num) % PHBS_PER_CALGARY; 457 return bus_to_phb[num];
456} 458}
457 459
458static inline unsigned long split_queue_offset(unsigned char num) 460static inline unsigned long split_queue_offset(unsigned char num)
@@ -812,7 +814,7 @@ static int __init calgary_init(void)
812 int i, ret = -ENODEV; 814 int i, ret = -ENODEV;
813 struct pci_dev *dev = NULL; 815 struct pci_dev *dev = NULL;
814 816
815 for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) { 817 for (i = 0; i < MAX_PHB_BUS_NUM; i++) {
816 dev = pci_get_device(PCI_VENDOR_ID_IBM, 818 dev = pci_get_device(PCI_VENDOR_ID_IBM,
817 PCI_DEVICE_ID_IBM_CALGARY, 819 PCI_DEVICE_ID_IBM_CALGARY,
818 dev); 820 dev);
@@ -822,7 +824,7 @@ static int __init calgary_init(void)
822 calgary_init_one_nontraslated(dev); 824 calgary_init_one_nontraslated(dev);
823 continue; 825 continue;
824 } 826 }
825 if (!tce_table_kva[i] && !translate_empty_slots) { 827 if (!tce_table_kva[dev->bus->number] && !translate_empty_slots) {
826 pci_dev_put(dev); 828 pci_dev_put(dev);
827 continue; 829 continue;
828 } 830 }
@@ -842,7 +844,7 @@ error:
842 pci_dev_put(dev); 844 pci_dev_put(dev);
843 continue; 845 continue;
844 } 846 }
845 if (!tce_table_kva[i] && !translate_empty_slots) 847 if (!tce_table_kva[dev->bus->number] && !translate_empty_slots)
846 continue; 848 continue;
847 calgary_disable_translation(dev); 849 calgary_disable_translation(dev);
848 calgary_free_tar(dev); 850 calgary_free_tar(dev);
@@ -876,9 +878,10 @@ static inline int __init determine_tce_table_size(u64 ram)
876void __init detect_calgary(void) 878void __init detect_calgary(void)
877{ 879{
878 u32 val; 880 u32 val;
879 int bus, table_idx; 881 int bus;
880 void *tbl; 882 void *tbl;
881 int detected = 0; 883 int calgary_found = 0;
884 int phb = -1;
882 885
883 /* 886 /*
884 * if the user specified iommu=off or iommu=soft or we found 887 * if the user specified iommu=off or iommu=soft or we found
@@ -889,38 +892,46 @@ void __init detect_calgary(void)
889 892
890 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE); 893 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
891 894
892 for (bus = 0, table_idx = 0; 895 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
893 bus <= num_online_nodes() * MAX_PHB_BUS_NUM; 896 int dev;
894 bus++) { 897
895 BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM); 898 tce_table_kva[bus] = NULL;
899 bus_to_phb[bus] = -1;
900
896 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY) 901 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
897 continue; 902 continue;
903
904 /*
905 * There are 4 PHBs per Calgary chip. Set phb to which phb (0-3)
906 * it is connected to releative to the clagary chip.
907 */
908 phb = (phb + 1) % PHBS_PER_CALGARY;
909
898 if (test_bit(bus, translation_disabled)) { 910 if (test_bit(bus, translation_disabled)) {
899 printk(KERN_INFO "Calgary: translation is disabled for " 911 printk(KERN_INFO "Calgary: translation is disabled for "
900 "PHB 0x%x\n", bus); 912 "PHB 0x%x\n", bus);
901 /* skip this phb, don't allocate a tbl for it */ 913 /* skip this phb, don't allocate a tbl for it */
902 tce_table_kva[table_idx] = NULL;
903 table_idx++;
904 continue; 914 continue;
905 } 915 }
906 /* 916 /*
907 * scan the first slot of the PCI bus to see if there 917 * Scan the slots of the PCI bus to see if there is a device present.
908 * are any devices present 918 * The parent bus will be the zero-ith device, so start at 1.
909 */ 919 */
910 val = read_pci_config(bus, 1, 0, 0); 920 for (dev = 1; dev < 8; dev++) {
911 if (val != 0xffffffff || translate_empty_slots) { 921 val = read_pci_config(bus, dev, 0, 0);
912 tbl = alloc_tce_table(); 922 if (val != 0xffffffff || translate_empty_slots) {
913 if (!tbl) 923 tbl = alloc_tce_table();
914 goto cleanup; 924 if (!tbl)
915 detected = 1; 925 goto cleanup;
916 } else 926 tce_table_kva[bus] = tbl;
917 tbl = NULL; 927 bus_to_phb[bus] = phb;
918 928 calgary_found = 1;
919 tce_table_kva[table_idx] = tbl; 929 break;
920 table_idx++; 930 }
931 }
921 } 932 }
922 933
923 if (detected) { 934 if (calgary_found) {
924 iommu_detected = 1; 935 iommu_detected = 1;
925 calgary_detected = 1; 936 calgary_detected = 1;
926 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. " 937 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. "
@@ -929,9 +940,9 @@ void __init detect_calgary(void)
929 return; 940 return;
930 941
931cleanup: 942cleanup:
932 for (--table_idx; table_idx >= 0; --table_idx) 943 for (--bus; bus >= 0; --bus)
933 if (tce_table_kva[table_idx]) 944 if (tce_table_kva[bus])
934 free_tce_table(tce_table_kva[table_idx]); 945 free_tce_table(tce_table_kva[bus]);
935} 946}
936 947
937int __init calgary_iommu_init(void) 948int __init calgary_iommu_init(void)
@@ -1002,7 +1013,7 @@ static int __init calgary_parse_options(char *p)
1002 if (p == endp) 1013 if (p == endp)
1003 break; 1014 break;
1004 1015
1005 if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) { 1016 if (bridge < MAX_PHB_BUS_NUM) {
1006 printk(KERN_INFO "Calgary: disabling " 1017 printk(KERN_INFO "Calgary: disabling "
1007 "translation for PHB 0x%x\n", bridge); 1018 "translation for PHB 0x%x\n", bridge);
1008 set_bit(bridge, translation_disabled); 1019 set_bit(bridge, translation_disabled);
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index ebdb77fe2057..6a55f87ba97f 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -31,9 +31,10 @@ struct dma_mapping_ops swiotlb_dma_ops = {
31void pci_swiotlb_init(void) 31void pci_swiotlb_init(void)
32{ 32{
33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
34 if (!iommu_detected && !no_iommu && 34 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
35 (end_pfn > MAX_DMA32_PFN || force_iommu))
36 swiotlb = 1; 35 swiotlb = 1;
36 if (swiotlb_force)
37 swiotlb = 1;
37 if (swiotlb) { 38 if (swiotlb) {
38 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); 39 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
39 swiotlb_init(); 40 swiotlb_init();
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
index d3a9e79e954c..5530dda3f27a 100644
--- a/arch/x86_64/kernel/tce.c
+++ b/arch/x86_64/kernel/tce.c
@@ -96,7 +96,6 @@ static inline unsigned int table_size_to_number_of_entries(unsigned char size)
96static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) 96static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
97{ 97{
98 unsigned int bitmapsz; 98 unsigned int bitmapsz;
99 unsigned int tce_table_index;
100 unsigned long bmppages; 99 unsigned long bmppages;
101 int ret; 100 int ret;
102 101
@@ -105,8 +104,7 @@ static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
105 /* set the tce table size - measured in entries */ 104 /* set the tce table size - measured in entries */
106 tbl->it_size = table_size_to_number_of_entries(specified_table_size); 105 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
107 106
108 tce_table_index = bus_to_phb(tbl->it_busno); 107 tbl->it_base = (unsigned long)tce_table_kva[dev->bus->number];
109 tbl->it_base = (unsigned long)tce_table_kva[tce_table_index];
110 if (!tbl->it_base) { 108 if (!tbl->it_base) {
111 printk(KERN_ERR "Calgary: iommu_table_setparms: " 109 printk(KERN_ERR "Calgary: iommu_table_setparms: "
112 "no table allocated?!\n"); 110 "no table allocated?!\n");
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index b9ff75992c16..7a9b18224182 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -28,6 +28,7 @@
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#ifdef CONFIG_ACPI 29#ifdef CONFIG_ACPI
30#include <acpi/achware.h> /* for PM timer frequency */ 30#include <acpi/achware.h> /* for PM timer frequency */
31#include <acpi/acpi_bus.h>
31#endif 32#endif
32#include <asm/8253pit.h> 33#include <asm/8253pit.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
@@ -193,7 +194,7 @@ unsigned long profile_pc(struct pt_regs *regs)
193 is just accounted to the spinlock function. 194 is just accounted to the spinlock function.
194 Better would be to write these functions in assembler again 195 Better would be to write these functions in assembler again
195 and check exactly. */ 196 and check exactly. */
196 if (in_lock_functions(pc)) { 197 if (!user_mode(regs) && in_lock_functions(pc)) {
197 char *v = *(char **)regs->rsp; 198 char *v = *(char **)regs->rsp;
198 if ((v >= _stext && v <= _etext) || 199 if ((v >= _stext && v <= _etext) ||
199 (v >= _sinittext && v <= _einittext) || 200 (v >= _sinittext && v <= _einittext) ||
@@ -953,11 +954,18 @@ __cpuinit int unsynchronized_tsc(void)
953#ifdef CONFIG_SMP 954#ifdef CONFIG_SMP
954 if (apic_is_clustered_box()) 955 if (apic_is_clustered_box())
955 return 1; 956 return 1;
956 /* Intel systems are normally all synchronized. Exceptions
957 are handled in the check above. */
958 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
959 return 0;
960#endif 957#endif
958 /* Most intel systems have synchronized TSCs except for
959 multi node systems */
960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
961#ifdef CONFIG_ACPI
962 /* But TSC doesn't tick in C3 so don't use it there */
963 if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 100)
964 return 1;
965#endif
966 return 0;
967 }
968
961 /* Assume multi socket systems are not synchronized */ 969 /* Assume multi socket systems are not synchronized */
962 return num_present_cpus() > 1; 970 return num_present_cpus() > 1;
963} 971}
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index eb39a2775236..f7a9d1421078 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -254,7 +254,6 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
254{ 254{
255 const unsigned cpu = safe_smp_processor_id(); 255 const unsigned cpu = safe_smp_processor_id();
256 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; 256 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
257 int i = 11;
258 unsigned used = 0; 257 unsigned used = 0;
259 258
260 printk("\nCall Trace:\n"); 259 printk("\nCall Trace:\n");
@@ -275,11 +274,20 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
275 if (unwind_init_blocked(&info, tsk) == 0) 274 if (unwind_init_blocked(&info, tsk) == 0)
276 unw_ret = show_trace_unwind(&info, NULL); 275 unw_ret = show_trace_unwind(&info, NULL);
277 } 276 }
278 if (unw_ret > 0) { 277 if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
279 if (call_trace > 0) 278#ifdef CONFIG_STACK_UNWIND
279 unsigned long rip = info.regs.rip;
280 print_symbol("DWARF2 unwinder stuck at %s\n", rip);
281 if (call_trace == 1) {
282 printk("Leftover inexact backtrace:\n");
283 stack = (unsigned long *)info.regs.rsp;
284 } else if (call_trace > 1)
280 return; 285 return;
281 printk("Legacy call trace:"); 286 else
282 i = 18; 287 printk("Full inexact backtrace again:\n");
288#else
289 printk("Inexact backtrace:\n");
290#endif
283 } 291 }
284 } 292 }
285 293
@@ -1118,8 +1126,10 @@ static int __init call_trace_setup(char *s)
1118 call_trace = -1; 1126 call_trace = -1;
1119 else if (strcmp(s, "both") == 0) 1127 else if (strcmp(s, "both") == 0)
1120 call_trace = 0; 1128 call_trace = 0;
1121 else if (strcmp(s, "new") == 0) 1129 else if (strcmp(s, "newfallback") == 0)
1122 call_trace = 1; 1130 call_trace = 1;
1131 else if (strcmp(s, "new") == 0)
1132 call_trace = 2;
1123 return 1; 1133 return 1;
1124} 1134}
1125__setup("call_trace=", call_trace_setup); 1135__setup("call_trace=", call_trace_setup);
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index b50a7c7c47f8..3acf60ded2a0 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -2,7 +2,6 @@
2#include <linux/pci.h> 2#include <linux/pci.h>
3#include <asm/mpspec.h> 3#include <asm/mpspec.h>
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <asm/k8.h>
6 5
7/* 6/*
8 * This discovers the pcibus <-> node mapping on AMD K8. 7 * This discovers the pcibus <-> node mapping on AMD K8.
@@ -19,6 +18,7 @@
19#define NR_LDT_BUS_NUMBER_REGISTERS 3 18#define NR_LDT_BUS_NUMBER_REGISTERS 3
20#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF) 19#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF)
21#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF) 20#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF)
21#define PCI_DEVICE_ID_K8HTCONFIG 0x1100
22 22
23/** 23/**
24 * fill_mp_bus_to_cpumask() 24 * fill_mp_bus_to_cpumask()
@@ -28,7 +28,8 @@
28__init static int 28__init static int
29fill_mp_bus_to_cpumask(void) 29fill_mp_bus_to_cpumask(void)
30{ 30{
31 int i, j, k; 31 struct pci_dev *nb_dev = NULL;
32 int i, j;
32 u32 ldtbus, nid; 33 u32 ldtbus, nid;
33 static int lbnr[3] = { 34 static int lbnr[3] = {
34 LDT_BUS_NUMBER_REGISTER_0, 35 LDT_BUS_NUMBER_REGISTER_0,
@@ -36,9 +37,8 @@ fill_mp_bus_to_cpumask(void)
36 LDT_BUS_NUMBER_REGISTER_2 37 LDT_BUS_NUMBER_REGISTER_2
37 }; 38 };
38 39
39 cache_k8_northbridges(); 40 while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
40 for (k = 0; k < num_k8_northbridges; k++) { 41 PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) {
41 struct pci_dev *nb_dev = k8_northbridges[k];
42 pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid); 42 pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid);
43 43
44 for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) { 44 for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) {
diff --git a/block/blktrace.c b/block/blktrace.c
index b8c0702777ff..265f7a830619 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -80,7 +80,7 @@ static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC
80#define trace_sync_bit(rw) \ 80#define trace_sync_bit(rw) \
81 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) 81 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
82#define trace_ahead_bit(rw) \ 82#define trace_ahead_bit(rw) \
83 (((rw) & (1 << BIO_RW_AHEAD)) << (BIO_RW_AHEAD - 0)) 83 (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
84 84
85/* 85/*
86 * The worker for the various blk_add_trace*() types. Fills out a 86 * The worker for the various blk_add_trace*() types. Fills out a
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 102ebc2c5c34..aae3123bf3ee 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -936,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
936 * seeks. so allow a little bit of time for him to submit a new rq 936 * seeks. so allow a little bit of time for him to submit a new rq
937 */ 937 */
938 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 938 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
939 sl = 2; 939 sl = min(sl, msecs_to_jiffies(2));
940 940
941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
942 return 1; 942 return 1;
diff --git a/block/ioctl.c b/block/ioctl.c
index 9cfa2e1ecb24..309760b7e37f 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -72,7 +72,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
72 bdevp = bdget_disk(disk, part); 72 bdevp = bdget_disk(disk, part);
73 if (!bdevp) 73 if (!bdevp)
74 return -ENOMEM; 74 return -ENOMEM;
75 mutex_lock(&bdevp->bd_mutex); 75 mutex_lock_nested(&bdevp->bd_mutex, BD_MUTEX_PARTITION);
76 if (bdevp->bd_openers) { 76 if (bdevp->bd_openers) {
77 mutex_unlock(&bdevp->bd_mutex); 77 mutex_unlock(&bdevp->bd_mutex);
78 bdput(bdevp); 78 bdput(bdevp);
@@ -82,7 +82,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
82 fsync_bdev(bdevp); 82 fsync_bdev(bdevp);
83 invalidate_bdev(bdevp, 0); 83 invalidate_bdev(bdevp, 0);
84 84
85 mutex_lock(&bdev->bd_mutex); 85 mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_WHOLE);
86 delete_partition(disk, part); 86 delete_partition(disk, part);
87 mutex_unlock(&bdev->bd_mutex); 87 mutex_unlock(&bdev->bd_mutex);
88 mutex_unlock(&bdevp->bd_mutex); 88 mutex_unlock(&bdevp->bd_mutex);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 93d94749310b..b5382cedf0c0 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -400,6 +400,16 @@ config BLK_DEV_RAM_SIZE
400 what are you doing. If you are using IBM S/390, then set this to 400 what are you doing. If you are using IBM S/390, then set this to
401 8192. 401 8192.
402 402
403config BLK_DEV_RAM_BLOCKSIZE
404 int "Default RAM disk block size (bytes)"
405 depends on BLK_DEV_RAM
406 default "1024"
407 help
408 The default value is 1024 kilobytes. PAGE_SIZE is a much more
409 efficient choice however. The default is kept to ensure initrd
410 setups function - apparently needed by the rd_load_image routine
411 that supposes the filesystem in the image uses a 1024 blocksize.
412
403config BLK_DEV_INITRD 413config BLK_DEV_INITRD
404 bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" 414 bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
405 depends on BROKEN || !FRV 415 depends on BROKEN || !FRV
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 1c4df22dfd2a..7b0eca703a67 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1233,6 +1233,50 @@ static inline void complete_buffers(struct bio *bio, int status)
1233 } 1233 }
1234} 1234}
1235 1235
1236static void cciss_check_queues(ctlr_info_t *h)
1237{
1238 int start_queue = h->next_to_run;
1239 int i;
1240
1241 /* check to see if we have maxed out the number of commands that can
1242 * be placed on the queue. If so then exit. We do this check here
1243 * in case the interrupt we serviced was from an ioctl and did not
1244 * free any new commands.
1245 */
1246 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1247 return;
1248
1249 /* We have room on the queue for more commands. Now we need to queue
1250 * them up. We will also keep track of the next queue to run so
1251 * that every queue gets a chance to be started first.
1252 */
1253 for (i = 0; i < h->highest_lun + 1; i++) {
1254 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1255 /* make sure the disk has been added and the drive is real
1256 * because this can be called from the middle of init_one.
1257 */
1258 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1259 continue;
1260 blk_start_queue(h->gendisk[curr_queue]->queue);
1261
1262 /* check to see if we have maxed out the number of commands
1263 * that can be placed on the queue.
1264 */
1265 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1266 if (curr_queue == start_queue) {
1267 h->next_to_run =
1268 (start_queue + 1) % (h->highest_lun + 1);
1269 break;
1270 } else {
1271 h->next_to_run = curr_queue;
1272 break;
1273 }
1274 } else {
1275 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1276 }
1277 }
1278}
1279
1236static void cciss_softirq_done(struct request *rq) 1280static void cciss_softirq_done(struct request *rq)
1237{ 1281{
1238 CommandList_struct *cmd = rq->completion_data; 1282 CommandList_struct *cmd = rq->completion_data;
@@ -1264,6 +1308,7 @@ static void cciss_softirq_done(struct request *rq)
1264 spin_lock_irqsave(&h->lock, flags); 1308 spin_lock_irqsave(&h->lock, flags);
1265 end_that_request_last(rq, rq->errors); 1309 end_that_request_last(rq, rq->errors);
1266 cmd_free(h, cmd, 1); 1310 cmd_free(h, cmd, 1);
1311 cciss_check_queues(h);
1267 spin_unlock_irqrestore(&h->lock, flags); 1312 spin_unlock_irqrestore(&h->lock, flags);
1268} 1313}
1269 1314
@@ -2528,8 +2573,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2528 CommandList_struct *c; 2573 CommandList_struct *c;
2529 unsigned long flags; 2574 unsigned long flags;
2530 __u32 a, a1, a2; 2575 __u32 a, a1, a2;
2531 int j;
2532 int start_queue = h->next_to_run;
2533 2576
2534 if (interrupt_not_for_us(h)) 2577 if (interrupt_not_for_us(h))
2535 return IRQ_NONE; 2578 return IRQ_NONE;
@@ -2588,45 +2631,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2588 } 2631 }
2589 } 2632 }
2590 2633
2591 /* check to see if we have maxed out the number of commands that can
2592 * be placed on the queue. If so then exit. We do this check here
2593 * in case the interrupt we serviced was from an ioctl and did not
2594 * free any new commands.
2595 */
2596 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2597 goto cleanup;
2598
2599 /* We have room on the queue for more commands. Now we need to queue
2600 * them up. We will also keep track of the next queue to run so
2601 * that every queue gets a chance to be started first.
2602 */
2603 for (j = 0; j < h->highest_lun + 1; j++) {
2604 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2605 /* make sure the disk has been added and the drive is real
2606 * because this can be called from the middle of init_one.
2607 */
2608 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
2609 continue;
2610 blk_start_queue(h->gendisk[curr_queue]->queue);
2611
2612 /* check to see if we have maxed out the number of commands
2613 * that can be placed on the queue.
2614 */
2615 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
2616 if (curr_queue == start_queue) {
2617 h->next_to_run =
2618 (start_queue + 1) % (h->highest_lun + 1);
2619 goto cleanup;
2620 } else {
2621 h->next_to_run = curr_queue;
2622 goto cleanup;
2623 }
2624 } else {
2625 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2626 }
2627 }
2628
2629 cleanup:
2630 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2634 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2631 return IRQ_HANDLED; 2635 return IRQ_HANDLED;
2632} 2636}
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 757f42dd8e86..78082edc14b4 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1739,8 +1739,6 @@ static void getgeometry(int ctlr)
1739 (log_index < id_ctlr_buf->nr_drvs) 1739 (log_index < id_ctlr_buf->nr_drvs)
1740 && (log_unit < NWD); 1740 && (log_unit < NWD);
1741 log_unit++) { 1741 log_unit++) {
1742 struct gendisk *disk = ida_gendisk[ctlr][log_unit];
1743
1744 size = sizeof(sense_log_drv_stat_t); 1742 size = sizeof(sense_log_drv_stat_t);
1745 1743
1746 /* 1744 /*
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 3cf246abb5ec..a3f64bfe6b58 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -84,7 +84,7 @@ int rd_size = CONFIG_BLK_DEV_RAM_SIZE; /* Size of the RAM disks */
84 * behaviour. The default is still BLOCK_SIZE (needed by rd_load_image that 84 * behaviour. The default is still BLOCK_SIZE (needed by rd_load_image that
85 * supposes the filesystem in the image uses a BLOCK_SIZE blocksize). 85 * supposes the filesystem in the image uses a BLOCK_SIZE blocksize).
86 */ 86 */
87static int rd_blocksize = BLOCK_SIZE; /* blocksize of the RAM disks */ 87static int rd_blocksize = CONFIG_BLK_DEV_RAM_BLOCKSIZE;
88 88
89/* 89/*
90 * Copyright (C) 2000 Linus Torvalds. 90 * Copyright (C) 2000 Linus Torvalds.
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 6a0c2230f82f..e2d4beac7420 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -67,6 +67,8 @@ static int ignore = 0;
67static int ignore_dga = 0; 67static int ignore_dga = 0;
68static int ignore_csr = 0; 68static int ignore_csr = 0;
69static int ignore_sniffer = 0; 69static int ignore_sniffer = 0;
70static int disable_scofix = 0;
71static int force_scofix = 0;
70static int reset = 0; 72static int reset = 0;
71 73
72#ifdef CONFIG_BT_HCIUSB_SCO 74#ifdef CONFIG_BT_HCIUSB_SCO
@@ -107,9 +109,12 @@ static struct usb_device_id blacklist_ids[] = {
107 { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE }, 109 { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE },
108 110
109 /* Broadcom BCM2035 */ 111 /* Broadcom BCM2035 */
110 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_BROKEN_ISOC }, 112 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
111 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, 113 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 },
112 114
115 /* IBM/Lenovo ThinkPad with Broadcom chip */
116 { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU },
117
113 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ 118 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */
114 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, 119 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET },
115 120
@@ -119,11 +124,13 @@ static struct usb_device_id blacklist_ids[] = {
119 /* ISSC Bluetooth Adapter v3.1 */ 124 /* ISSC Bluetooth Adapter v3.1 */
120 { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET }, 125 { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET },
121 126
122 /* RTX Telecom based adapter with buggy SCO support */ 127 /* RTX Telecom based adapters with buggy SCO support */
123 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, 128 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC },
129 { USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC },
124 130
125 /* Belkin F8T012 */ 131 /* Belkin F8T012 and F8T013 devices */
126 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU }, 132 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU },
133 { USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_WRONG_SCO_MTU },
127 134
128 /* Digianswer devices */ 135 /* Digianswer devices */
129 { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER }, 136 { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER },
@@ -990,8 +997,10 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id
990 if (reset || id->driver_info & HCI_RESET) 997 if (reset || id->driver_info & HCI_RESET)
991 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); 998 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
992 999
993 if (id->driver_info & HCI_WRONG_SCO_MTU) 1000 if (force_scofix || id->driver_info & HCI_WRONG_SCO_MTU) {
994 set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); 1001 if (!disable_scofix)
1002 set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
1003 }
995 1004
996 if (id->driver_info & HCI_SNIFFER) { 1005 if (id->driver_info & HCI_SNIFFER) {
997 if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) 1006 if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
@@ -1161,6 +1170,12 @@ MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001");
1161module_param(ignore_sniffer, bool, 0644); 1170module_param(ignore_sniffer, bool, 0644);
1162MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002"); 1171MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002");
1163 1172
1173module_param(disable_scofix, bool, 0644);
1174MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
1175
1176module_param(force_scofix, bool, 0644);
1177MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
1178
1164module_param(reset, bool, 0644); 1179module_param(reset, bool, 0644);
1165MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); 1180MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
1166 1181
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c
index 5b91e4e25641..7719bd75810b 100644
--- a/drivers/char/nsc_gpio.c
+++ b/drivers/char/nsc_gpio.c
@@ -68,13 +68,11 @@ ssize_t nsc_gpio_write(struct file *file, const char __user *data,
68 amp->gpio_config(m, ~1, 0); 68 amp->gpio_config(m, ~1, 0);
69 break; 69 break;
70 case 'T': 70 case 'T':
71 dev_dbg(dev, "GPIO%d output is push pull\n", 71 dev_dbg(dev, "GPIO%d output is push pull\n", m);
72 m);
73 amp->gpio_config(m, ~2, 2); 72 amp->gpio_config(m, ~2, 2);
74 break; 73 break;
75 case 't': 74 case 't':
76 dev_dbg(dev, "GPIO%d output is open drain\n", 75 dev_dbg(dev, "GPIO%d output is open drain\n", m);
77 m);
78 amp->gpio_config(m, ~2, 0); 76 amp->gpio_config(m, ~2, 0);
79 break; 77 break;
80 case 'P': 78 case 'P':
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index 11bd78c80628..645eb81cb5a9 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -212,22 +212,21 @@ static void pc8736x_gpio_change(unsigned index)
212 pc8736x_gpio_set(index, !pc8736x_gpio_current(index)); 212 pc8736x_gpio_set(index, !pc8736x_gpio_current(index));
213} 213}
214 214
215static struct nsc_gpio_ops pc8736x_access = { 215static struct nsc_gpio_ops pc8736x_gpio_ops = {
216 .owner = THIS_MODULE, 216 .owner = THIS_MODULE,
217 .gpio_config = pc8736x_gpio_configure, 217 .gpio_config = pc8736x_gpio_configure,
218 .gpio_dump = nsc_gpio_dump, 218 .gpio_dump = nsc_gpio_dump,
219 .gpio_get = pc8736x_gpio_get, 219 .gpio_get = pc8736x_gpio_get,
220 .gpio_set = pc8736x_gpio_set, 220 .gpio_set = pc8736x_gpio_set,
221 .gpio_set_high = pc8736x_gpio_set_high,
222 .gpio_set_low = pc8736x_gpio_set_low,
223 .gpio_change = pc8736x_gpio_change, 221 .gpio_change = pc8736x_gpio_change,
224 .gpio_current = pc8736x_gpio_current 222 .gpio_current = pc8736x_gpio_current
225}; 223};
224EXPORT_SYMBOL(pc8736x_gpio_ops);
226 225
227static int pc8736x_gpio_open(struct inode *inode, struct file *file) 226static int pc8736x_gpio_open(struct inode *inode, struct file *file)
228{ 227{
229 unsigned m = iminor(inode); 228 unsigned m = iminor(inode);
230 file->private_data = &pc8736x_access; 229 file->private_data = &pc8736x_gpio_ops;
231 230
232 dev_dbg(&pdev->dev, "open %d\n", m); 231 dev_dbg(&pdev->dev, "open %d\n", m);
233 232
@@ -236,7 +235,7 @@ static int pc8736x_gpio_open(struct inode *inode, struct file *file)
236 return nonseekable_open(inode, file); 235 return nonseekable_open(inode, file);
237} 236}
238 237
239static const struct file_operations pc8736x_gpio_fops = { 238static const struct file_operations pc8736x_gpio_fileops = {
240 .owner = THIS_MODULE, 239 .owner = THIS_MODULE,
241 .open = pc8736x_gpio_open, 240 .open = pc8736x_gpio_open,
242 .write = nsc_gpio_write, 241 .write = nsc_gpio_write,
@@ -278,7 +277,7 @@ static int __init pc8736x_gpio_init(void)
278 dev_err(&pdev->dev, "no device found\n"); 277 dev_err(&pdev->dev, "no device found\n");
279 goto undo_platform_dev_add; 278 goto undo_platform_dev_add;
280 } 279 }
281 pc8736x_access.dev = &pdev->dev; 280 pc8736x_gpio_ops.dev = &pdev->dev;
282 281
283 /* Verify that chip and it's GPIO unit are both enabled. 282 /* Verify that chip and it's GPIO unit are both enabled.
284 My BIOS does this, so I take minimum action here 283 My BIOS does this, so I take minimum action here
@@ -328,7 +327,7 @@ static int __init pc8736x_gpio_init(void)
328 pc8736x_init_shadow(); 327 pc8736x_init_shadow();
329 328
330 /* ignore minor errs, and succeed */ 329 /* ignore minor errs, and succeed */
331 cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fops); 330 cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fileops);
332 cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT); 331 cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT);
333 332
334 return 0; 333 return 0;
@@ -355,7 +354,5 @@ static void __exit pc8736x_gpio_cleanup(void)
355 platform_device_put(pdev); 354 platform_device_put(pdev);
356} 355}
357 356
358EXPORT_SYMBOL(pc8736x_access);
359
360module_init(pc8736x_gpio_init); 357module_init(pc8736x_gpio_init);
361module_exit(pc8736x_gpio_cleanup); 358module_exit(pc8736x_gpio_cleanup);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 17bc8abd5df5..00f574cbb0d4 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -1174,8 +1174,12 @@ static void dcd_change(MGSLPC_INFO *info)
1174 else 1174 else
1175 info->input_signal_events.dcd_down++; 1175 info->input_signal_events.dcd_down++;
1176#ifdef CONFIG_HDLC 1176#ifdef CONFIG_HDLC
1177 if (info->netcount) 1177 if (info->netcount) {
1178 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, info->netdev); 1178 if (info->serial_signals & SerialSignal_DCD)
1179 netif_carrier_on(info->netdev);
1180 else
1181 netif_carrier_off(info->netdev);
1182 }
1179#endif 1183#endif
1180 wake_up_interruptible(&info->status_event_wait_q); 1184 wake_up_interruptible(&info->status_event_wait_q);
1181 wake_up_interruptible(&info->event_wait_q); 1185 wake_up_interruptible(&info->event_wait_q);
@@ -4251,8 +4255,10 @@ static int hdlcdev_open(struct net_device *dev)
4251 spin_lock_irqsave(&info->lock, flags); 4255 spin_lock_irqsave(&info->lock, flags);
4252 get_signals(info); 4256 get_signals(info);
4253 spin_unlock_irqrestore(&info->lock, flags); 4257 spin_unlock_irqrestore(&info->lock, flags);
4254 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 4258 if (info->serial_signals & SerialSignal_DCD)
4255 4259 netif_carrier_on(dev);
4260 else
4261 netif_carrier_off(dev);
4256 return 0; 4262 return 0;
4257} 4263}
4258 4264
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 425c58719db6..b956c7babd18 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -5,7 +5,6 @@
5 5
6 Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */ 6 Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */
7 7
8#include <linux/config.h>
9#include <linux/device.h> 8#include <linux/device.h>
10#include <linux/fs.h> 9#include <linux/fs.h>
11#include <linux/module.h> 10#include <linux/module.h>
@@ -22,37 +21,37 @@
22#include <linux/scx200_gpio.h> 21#include <linux/scx200_gpio.h>
23#include <linux/nsc_gpio.h> 22#include <linux/nsc_gpio.h>
24 23
25#define NAME "scx200_gpio" 24#define DRVNAME "scx200_gpio"
26#define DEVNAME NAME
27 25
28static struct platform_device *pdev; 26static struct platform_device *pdev;
29 27
30MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); 28MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
31MODULE_DESCRIPTION("NatSemi SCx200 GPIO Pin Driver"); 29MODULE_DESCRIPTION("NatSemi/AMD SCx200 GPIO Pin Driver");
32MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
33 31
34static int major = 0; /* default to dynamic major */ 32static int major = 0; /* default to dynamic major */
35module_param(major, int, 0); 33module_param(major, int, 0);
36MODULE_PARM_DESC(major, "Major device number"); 34MODULE_PARM_DESC(major, "Major device number");
37 35
38struct nsc_gpio_ops scx200_access = { 36#define MAX_PINS 32 /* 64 later, when known ok */
37
38struct nsc_gpio_ops scx200_gpio_ops = {
39 .owner = THIS_MODULE, 39 .owner = THIS_MODULE,
40 .gpio_config = scx200_gpio_configure, 40 .gpio_config = scx200_gpio_configure,
41 .gpio_dump = nsc_gpio_dump, 41 .gpio_dump = nsc_gpio_dump,
42 .gpio_get = scx200_gpio_get, 42 .gpio_get = scx200_gpio_get,
43 .gpio_set = scx200_gpio_set, 43 .gpio_set = scx200_gpio_set,
44 .gpio_set_high = scx200_gpio_set_high,
45 .gpio_set_low = scx200_gpio_set_low,
46 .gpio_change = scx200_gpio_change, 44 .gpio_change = scx200_gpio_change,
47 .gpio_current = scx200_gpio_current 45 .gpio_current = scx200_gpio_current
48}; 46};
47EXPORT_SYMBOL(scx200_gpio_ops);
49 48
50static int scx200_gpio_open(struct inode *inode, struct file *file) 49static int scx200_gpio_open(struct inode *inode, struct file *file)
51{ 50{
52 unsigned m = iminor(inode); 51 unsigned m = iminor(inode);
53 file->private_data = &scx200_access; 52 file->private_data = &scx200_gpio_ops;
54 53
55 if (m > 63) 54 if (m >= MAX_PINS)
56 return -EINVAL; 55 return -EINVAL;
57 return nonseekable_open(inode, file); 56 return nonseekable_open(inode, file);
58} 57}
@@ -62,8 +61,7 @@ static int scx200_gpio_release(struct inode *inode, struct file *file)
62 return 0; 61 return 0;
63} 62}
64 63
65 64static const struct file_operations scx200_gpio_fileops = {
66static const struct file_operations scx200_gpio_fops = {
67 .owner = THIS_MODULE, 65 .owner = THIS_MODULE,
68 .write = nsc_gpio_write, 66 .write = nsc_gpio_write,
69 .read = nsc_gpio_read, 67 .read = nsc_gpio_read,
@@ -71,21 +69,20 @@ static const struct file_operations scx200_gpio_fops = {
71 .release = scx200_gpio_release, 69 .release = scx200_gpio_release,
72}; 70};
73 71
74struct cdev *scx200_devices; 72struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */
75static int num_pins = 32;
76 73
77static int __init scx200_gpio_init(void) 74static int __init scx200_gpio_init(void)
78{ 75{
79 int rc, i; 76 int rc;
80 dev_t dev = MKDEV(major, 0); 77 dev_t devid;
81 78
82 if (!scx200_gpio_present()) { 79 if (!scx200_gpio_present()) {
83 printk(KERN_ERR NAME ": no SCx200 gpio present\n"); 80 printk(KERN_ERR DRVNAME ": no SCx200 gpio present\n");
84 return -ENODEV; 81 return -ENODEV;
85 } 82 }
86 83
87 /* support dev_dbg() with pdev->dev */ 84 /* support dev_dbg() with pdev->dev */
88 pdev = platform_device_alloc(DEVNAME, 0); 85 pdev = platform_device_alloc(DRVNAME, 0);
89 if (!pdev) 86 if (!pdev)
90 return -ENOMEM; 87 return -ENOMEM;
91 88
@@ -94,37 +91,25 @@ static int __init scx200_gpio_init(void)
94 goto undo_malloc; 91 goto undo_malloc;
95 92
96 /* nsc_gpio uses dev_dbg(), so needs this */ 93 /* nsc_gpio uses dev_dbg(), so needs this */
97 scx200_access.dev = &pdev->dev; 94 scx200_gpio_ops.dev = &pdev->dev;
98 95
99 if (major) 96 if (major) {
100 rc = register_chrdev_region(dev, num_pins, "scx200_gpio"); 97 devid = MKDEV(major, 0);
101 else { 98 rc = register_chrdev_region(devid, MAX_PINS, "scx200_gpio");
102 rc = alloc_chrdev_region(&dev, 0, num_pins, "scx200_gpio"); 99 } else {
103 major = MAJOR(dev); 100 rc = alloc_chrdev_region(&devid, 0, MAX_PINS, "scx200_gpio");
101 major = MAJOR(devid);
104 } 102 }
105 if (rc < 0) { 103 if (rc < 0) {
106 dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc); 104 dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc);
107 goto undo_platform_device_add; 105 goto undo_platform_device_add;
108 } 106 }
109 scx200_devices = kzalloc(num_pins * sizeof(struct cdev), GFP_KERNEL); 107
110 if (!scx200_devices) { 108 cdev_init(&scx200_gpio_cdev, &scx200_gpio_fileops);
111 rc = -ENOMEM; 109 cdev_add(&scx200_gpio_cdev, devid, MAX_PINS);
112 goto undo_chrdev_region;
113 }
114 for (i = 0; i < num_pins; i++) {
115 struct cdev *cdev = &scx200_devices[i];
116 cdev_init(cdev, &scx200_gpio_fops);
117 cdev->owner = THIS_MODULE;
118 rc = cdev_add(cdev, MKDEV(major, i), 1);
119 /* tolerate 'minor' errors */
120 if (rc)
121 dev_err(&pdev->dev, "Error %d on minor %d", rc, i);
122 }
123 110
124 return 0; /* succeed */ 111 return 0; /* succeed */
125 112
126undo_chrdev_region:
127 unregister_chrdev_region(dev, num_pins);
128undo_platform_device_add: 113undo_platform_device_add:
129 platform_device_del(pdev); 114 platform_device_del(pdev);
130undo_malloc: 115undo_malloc:
@@ -135,10 +120,11 @@ undo_malloc:
135 120
136static void __exit scx200_gpio_cleanup(void) 121static void __exit scx200_gpio_cleanup(void)
137{ 122{
138 kfree(scx200_devices); 123 cdev_del(&scx200_gpio_cdev);
139 unregister_chrdev_region(MKDEV(major, 0), num_pins); 124 /* cdev_put(&scx200_gpio_cdev); */
125
126 unregister_chrdev_region(MKDEV(major, 0), MAX_PINS);
140 platform_device_unregister(pdev); 127 platform_device_unregister(pdev);
141 /* kfree(pdev); */
142} 128}
143 129
144module_init(scx200_gpio_init); 130module_init(scx200_gpio_init);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index df782dd1098c..78b1b1a2732b 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -1344,8 +1344,12 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
1344 } else 1344 } else
1345 info->input_signal_events.dcd_down++; 1345 info->input_signal_events.dcd_down++;
1346#ifdef CONFIG_HDLC 1346#ifdef CONFIG_HDLC
1347 if (info->netcount) 1347 if (info->netcount) {
1348 hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev); 1348 if (status & MISCSTATUS_DCD)
1349 netif_carrier_on(info->netdev);
1350 else
1351 netif_carrier_off(info->netdev);
1352 }
1349#endif 1353#endif
1350 } 1354 }
1351 if (status & MISCSTATUS_CTS_LATCHED) 1355 if (status & MISCSTATUS_CTS_LATCHED)
@@ -7844,8 +7848,10 @@ static int hdlcdev_open(struct net_device *dev)
7844 spin_lock_irqsave(&info->irq_spinlock, flags); 7848 spin_lock_irqsave(&info->irq_spinlock, flags);
7845 usc_get_serial_signals(info); 7849 usc_get_serial_signals(info);
7846 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7850 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7847 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 7851 if (info->serial_signals & SerialSignal_DCD)
7848 7852 netif_carrier_on(dev);
7853 else
7854 netif_carrier_off(dev);
7849 return 0; 7855 return 0;
7850} 7856}
7851 7857
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index e829594195c1..b2dbbdb1bf81 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -1497,8 +1497,10 @@ static int hdlcdev_open(struct net_device *dev)
1497 spin_lock_irqsave(&info->lock, flags); 1497 spin_lock_irqsave(&info->lock, flags);
1498 get_signals(info); 1498 get_signals(info);
1499 spin_unlock_irqrestore(&info->lock, flags); 1499 spin_unlock_irqrestore(&info->lock, flags);
1500 hdlc_set_carrier(info->signals & SerialSignal_DCD, dev); 1500 if (info->signals & SerialSignal_DCD)
1501 1501 netif_carrier_on(dev);
1502 else
1503 netif_carrier_off(dev);
1502 return 0; 1504 return 0;
1503} 1505}
1504 1506
@@ -1997,8 +1999,12 @@ static void dcd_change(struct slgt_info *info)
1997 info->input_signal_events.dcd_down++; 1999 info->input_signal_events.dcd_down++;
1998 } 2000 }
1999#ifdef CONFIG_HDLC 2001#ifdef CONFIG_HDLC
2000 if (info->netcount) 2002 if (info->netcount) {
2001 hdlc_set_carrier(info->signals & SerialSignal_DCD, info->netdev); 2003 if (info->signals & SerialSignal_DCD)
2004 netif_carrier_on(info->netdev);
2005 else
2006 netif_carrier_off(info->netdev);
2007 }
2002#endif 2008#endif
2003 wake_up_interruptible(&info->status_event_wait_q); 2009 wake_up_interruptible(&info->status_event_wait_q);
2004 wake_up_interruptible(&info->event_wait_q); 2010 wake_up_interruptible(&info->event_wait_q);
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 1e443a233f51..66f3754fbbdf 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -1752,8 +1752,10 @@ static int hdlcdev_open(struct net_device *dev)
1752 spin_lock_irqsave(&info->lock, flags); 1752 spin_lock_irqsave(&info->lock, flags);
1753 get_signals(info); 1753 get_signals(info);
1754 spin_unlock_irqrestore(&info->lock, flags); 1754 spin_unlock_irqrestore(&info->lock, flags);
1755 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 1755 if (info->serial_signals & SerialSignal_DCD)
1756 1756 netif_carrier_on(dev);
1757 else
1758 netif_carrier_off(dev);
1757 return 0; 1759 return 0;
1758} 1760}
1759 1761
@@ -2522,8 +2524,12 @@ void isr_io_pin( SLMP_INFO *info, u16 status )
2522 } else 2524 } else
2523 info->input_signal_events.dcd_down++; 2525 info->input_signal_events.dcd_down++;
2524#ifdef CONFIG_HDLC 2526#ifdef CONFIG_HDLC
2525 if (info->netcount) 2527 if (info->netcount) {
2526 hdlc_set_carrier(status & SerialSignal_DCD, info->netdev); 2528 if (status & SerialSignal_DCD)
2529 netif_carrier_on(info->netdev);
2530 else
2531 netif_carrier_off(info->netdev);
2532 }
2527#endif 2533#endif
2528 } 2534 }
2529 if (status & MISCSTATUS_CTS_LATCHED) 2535 if (status & MISCSTATUS_CTS_LATCHED)
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 6889e7db3aff..a082a2e34252 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1141,6 +1141,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
1141 put_device(dev); 1141 put_device(dev);
1142 clear_bit(chip->dev_num, dev_mask); 1142 clear_bit(chip->dev_num, dev_mask);
1143 kfree(chip); 1143 kfree(chip);
1144 kfree(devname);
1144 return NULL; 1145 return NULL;
1145 } 1146 }
1146 1147
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 3232b1932597..ee7ac6f43c65 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -424,6 +424,7 @@ static irqreturn_t tis_int_handler(int irq, void *dev_id, struct pt_regs *regs)
424 iowrite32(interrupt, 424 iowrite32(interrupt,
425 chip->vendor.iobase + 425 chip->vendor.iobase +
426 TPM_INT_STATUS(chip->vendor.locality)); 426 TPM_INT_STATUS(chip->vendor.locality));
427 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
427 return IRQ_HANDLED; 428 return IRQ_HANDLED;
428} 429}
429 430
@@ -431,23 +432,19 @@ static int interrupts = 1;
431module_param(interrupts, bool, 0444); 432module_param(interrupts, bool, 0444);
432MODULE_PARM_DESC(interrupts, "Enable interrupts"); 433MODULE_PARM_DESC(interrupts, "Enable interrupts");
433 434
434static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, 435static int tpm_tis_init(struct device *dev, resource_size_t start,
435 const struct pnp_device_id *pnp_id) 436 resource_size_t len)
436{ 437{
437 u32 vendor, intfcaps, intmask; 438 u32 vendor, intfcaps, intmask;
438 int rc, i; 439 int rc, i;
439 unsigned long start, len;
440 struct tpm_chip *chip; 440 struct tpm_chip *chip;
441 441
442 start = pnp_mem_start(pnp_dev, 0);
443 len = pnp_mem_len(pnp_dev, 0);
444
445 if (!start) 442 if (!start)
446 start = TIS_MEM_BASE; 443 start = TIS_MEM_BASE;
447 if (!len) 444 if (!len)
448 len = TIS_MEM_LEN; 445 len = TIS_MEM_LEN;
449 446
450 if (!(chip = tpm_register_hardware(&pnp_dev->dev, &tpm_tis))) 447 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
451 return -ENODEV; 448 return -ENODEV;
452 449
453 chip->vendor.iobase = ioremap(start, len); 450 chip->vendor.iobase = ioremap(start, len);
@@ -464,7 +461,7 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
464 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); 461 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
465 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); 462 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
466 463
467 dev_info(&pnp_dev->dev, 464 dev_info(dev,
468 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 465 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
469 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 466 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
470 467
@@ -472,26 +469,26 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
472 intfcaps = 469 intfcaps =
473 ioread32(chip->vendor.iobase + 470 ioread32(chip->vendor.iobase +
474 TPM_INTF_CAPS(chip->vendor.locality)); 471 TPM_INTF_CAPS(chip->vendor.locality));
475 dev_dbg(&pnp_dev->dev, "TPM interface capabilities (0x%x):\n", 472 dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
476 intfcaps); 473 intfcaps);
477 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) 474 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
478 dev_dbg(&pnp_dev->dev, "\tBurst Count Static\n"); 475 dev_dbg(dev, "\tBurst Count Static\n");
479 if (intfcaps & TPM_INTF_CMD_READY_INT) 476 if (intfcaps & TPM_INTF_CMD_READY_INT)
480 dev_dbg(&pnp_dev->dev, "\tCommand Ready Int Support\n"); 477 dev_dbg(dev, "\tCommand Ready Int Support\n");
481 if (intfcaps & TPM_INTF_INT_EDGE_FALLING) 478 if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
482 dev_dbg(&pnp_dev->dev, "\tInterrupt Edge Falling\n"); 479 dev_dbg(dev, "\tInterrupt Edge Falling\n");
483 if (intfcaps & TPM_INTF_INT_EDGE_RISING) 480 if (intfcaps & TPM_INTF_INT_EDGE_RISING)
484 dev_dbg(&pnp_dev->dev, "\tInterrupt Edge Rising\n"); 481 dev_dbg(dev, "\tInterrupt Edge Rising\n");
485 if (intfcaps & TPM_INTF_INT_LEVEL_LOW) 482 if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
486 dev_dbg(&pnp_dev->dev, "\tInterrupt Level Low\n"); 483 dev_dbg(dev, "\tInterrupt Level Low\n");
487 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) 484 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
488 dev_dbg(&pnp_dev->dev, "\tInterrupt Level High\n"); 485 dev_dbg(dev, "\tInterrupt Level High\n");
489 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) 486 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
490 dev_dbg(&pnp_dev->dev, "\tLocality Change Int Support\n"); 487 dev_dbg(dev, "\tLocality Change Int Support\n");
491 if (intfcaps & TPM_INTF_STS_VALID_INT) 488 if (intfcaps & TPM_INTF_STS_VALID_INT)
492 dev_dbg(&pnp_dev->dev, "\tSts Valid Int Support\n"); 489 dev_dbg(dev, "\tSts Valid Int Support\n");
493 if (intfcaps & TPM_INTF_DATA_AVAIL_INT) 490 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
494 dev_dbg(&pnp_dev->dev, "\tData Avail Int Support\n"); 491 dev_dbg(dev, "\tData Avail Int Support\n");
495 492
496 if (request_locality(chip, 0) != 0) { 493 if (request_locality(chip, 0) != 0) {
497 rc = -ENODEV; 494 rc = -ENODEV;
@@ -594,6 +591,16 @@ out_err:
594 return rc; 591 return rc;
595} 592}
596 593
594static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
595 const struct pnp_device_id *pnp_id)
596{
597 resource_size_t start, len;
598 start = pnp_mem_start(pnp_dev, 0);
599 len = pnp_mem_len(pnp_dev, 0);
600
601 return tpm_tis_init(&pnp_dev->dev, start, len);
602}
603
597static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg) 604static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
598{ 605{
599 return tpm_pm_suspend(&dev->dev, msg); 606 return tpm_pm_suspend(&dev->dev, msg);
@@ -628,8 +635,36 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
628 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); 635 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
629MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); 636MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
630 637
638static struct device_driver tis_drv = {
639 .name = "tpm_tis",
640 .bus = &platform_bus_type,
641 .owner = THIS_MODULE,
642 .suspend = tpm_pm_suspend,
643 .resume = tpm_pm_resume,
644};
645
646static struct platform_device *pdev;
647
648static int force;
649module_param(force, bool, 0444);
650MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
631static int __init init_tis(void) 651static int __init init_tis(void)
632{ 652{
653 int rc;
654
655 if (force) {
656 rc = driver_register(&tis_drv);
657 if (rc < 0)
658 return rc;
659 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
660 return PTR_ERR(pdev);
661 if((rc=tpm_tis_init(&pdev->dev, 0, 0)) != 0) {
662 platform_device_unregister(pdev);
663 driver_unregister(&tis_drv);
664 }
665 return rc;
666 }
667
633 return pnp_register_driver(&tis_pnp_driver); 668 return pnp_register_driver(&tis_pnp_driver);
634} 669}
635 670
@@ -654,7 +689,11 @@ static void __exit cleanup_tis(void)
654 tpm_remove_hardware(chip->dev); 689 tpm_remove_hardware(chip->dev);
655 } 690 }
656 spin_unlock(&tis_lock); 691 spin_unlock(&tis_lock);
657 pnp_unregister_driver(&tis_pnp_driver); 692 if (force) {
693 platform_device_unregister(pdev);
694 driver_unregister(&tis_drv);
695 } else
696 pnp_unregister_driver(&tis_pnp_driver);
658} 697}
659 698
660module_init(init_tis); 699module_init(init_tis);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8d328186f774..bc1088d9b379 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -364,10 +364,12 @@ static ssize_t store_##file_name \
364 if (ret != 1) \ 364 if (ret != 1) \
365 return -EINVAL; \ 365 return -EINVAL; \
366 \ 366 \
367 lock_cpu_hotplug(); \
367 mutex_lock(&policy->lock); \ 368 mutex_lock(&policy->lock); \
368 ret = __cpufreq_set_policy(policy, &new_policy); \ 369 ret = __cpufreq_set_policy(policy, &new_policy); \
369 policy->user_policy.object = policy->object; \ 370 policy->user_policy.object = policy->object; \
370 mutex_unlock(&policy->lock); \ 371 mutex_unlock(&policy->lock); \
372 unlock_cpu_hotplug(); \
371 \ 373 \
372 return ret ? ret : count; \ 374 return ret ? ret : count; \
373} 375}
@@ -1197,20 +1199,18 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1197 *********************************************************************/ 1199 *********************************************************************/
1198 1200
1199 1201
1202/* Must be called with lock_cpu_hotplug held */
1200int __cpufreq_driver_target(struct cpufreq_policy *policy, 1203int __cpufreq_driver_target(struct cpufreq_policy *policy,
1201 unsigned int target_freq, 1204 unsigned int target_freq,
1202 unsigned int relation) 1205 unsigned int relation)
1203{ 1206{
1204 int retval = -EINVAL; 1207 int retval = -EINVAL;
1205 1208
1206 lock_cpu_hotplug();
1207 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1209 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1208 target_freq, relation); 1210 target_freq, relation);
1209 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1211 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1210 retval = cpufreq_driver->target(policy, target_freq, relation); 1212 retval = cpufreq_driver->target(policy, target_freq, relation);
1211 1213
1212 unlock_cpu_hotplug();
1213
1214 return retval; 1214 return retval;
1215} 1215}
1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1225,17 +1225,23 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1225 if (!policy) 1225 if (!policy)
1226 return -EINVAL; 1226 return -EINVAL;
1227 1227
1228 lock_cpu_hotplug();
1228 mutex_lock(&policy->lock); 1229 mutex_lock(&policy->lock);
1229 1230
1230 ret = __cpufreq_driver_target(policy, target_freq, relation); 1231 ret = __cpufreq_driver_target(policy, target_freq, relation);
1231 1232
1232 mutex_unlock(&policy->lock); 1233 mutex_unlock(&policy->lock);
1234 unlock_cpu_hotplug();
1233 1235
1234 cpufreq_cpu_put(policy); 1236 cpufreq_cpu_put(policy);
1235 return ret; 1237 return ret;
1236} 1238}
1237EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1239EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1238 1240
1241/*
1242 * Locking: Must be called with the lock_cpu_hotplug() lock held
1243 * when "event" is CPUFREQ_GOV_LIMITS
1244 */
1239 1245
1240static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1246static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1241{ 1247{
@@ -1257,24 +1263,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1257} 1263}
1258 1264
1259 1265
1260int cpufreq_governor(unsigned int cpu, unsigned int event)
1261{
1262 int ret = 0;
1263 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1264
1265 if (!policy)
1266 return -EINVAL;
1267
1268 mutex_lock(&policy->lock);
1269 ret = __cpufreq_governor(policy, event);
1270 mutex_unlock(&policy->lock);
1271
1272 cpufreq_cpu_put(policy);
1273 return ret;
1274}
1275EXPORT_SYMBOL_GPL(cpufreq_governor);
1276
1277
1278int cpufreq_register_governor(struct cpufreq_governor *governor) 1266int cpufreq_register_governor(struct cpufreq_governor *governor)
1279{ 1267{
1280 struct cpufreq_governor *t; 1268 struct cpufreq_governor *t;
@@ -1342,6 +1330,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1342EXPORT_SYMBOL(cpufreq_get_policy); 1330EXPORT_SYMBOL(cpufreq_get_policy);
1343 1331
1344 1332
1333/*
1334 * Locking: Must be called with the lock_cpu_hotplug() lock held
1335 */
1345static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1336static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
1346{ 1337{
1347 int ret = 0; 1338 int ret = 0;
@@ -1436,6 +1427,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1436 if (!data) 1427 if (!data)
1437 return -EINVAL; 1428 return -EINVAL;
1438 1429
1430 lock_cpu_hotplug();
1431
1439 /* lock this CPU */ 1432 /* lock this CPU */
1440 mutex_lock(&data->lock); 1433 mutex_lock(&data->lock);
1441 1434
@@ -1446,6 +1439,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1446 data->user_policy.governor = data->governor; 1439 data->user_policy.governor = data->governor;
1447 1440
1448 mutex_unlock(&data->lock); 1441 mutex_unlock(&data->lock);
1442
1443 unlock_cpu_hotplug();
1449 cpufreq_cpu_put(data); 1444 cpufreq_cpu_put(data);
1450 1445
1451 return ret; 1446 return ret;
@@ -1469,6 +1464,7 @@ int cpufreq_update_policy(unsigned int cpu)
1469 if (!data) 1464 if (!data)
1470 return -ENODEV; 1465 return -ENODEV;
1471 1466
1467 lock_cpu_hotplug();
1472 mutex_lock(&data->lock); 1468 mutex_lock(&data->lock);
1473 1469
1474 dprintk("updating policy for CPU %u\n", cpu); 1470 dprintk("updating policy for CPU %u\n", cpu);
@@ -1494,7 +1490,7 @@ int cpufreq_update_policy(unsigned int cpu)
1494 ret = __cpufreq_set_policy(data, &policy); 1490 ret = __cpufreq_set_policy(data, &policy);
1495 1491
1496 mutex_unlock(&data->lock); 1492 mutex_unlock(&data->lock);
1497 1493 unlock_cpu_hotplug();
1498 cpufreq_cpu_put(data); 1494 cpufreq_cpu_put(data);
1499 return ret; 1495 return ret;
1500} 1496}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b3ebc8f01975..c4c578defabf 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -525,7 +525,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
525 break; 525 break;
526 526
527 case CPUFREQ_GOV_LIMITS: 527 case CPUFREQ_GOV_LIMITS:
528 lock_cpu_hotplug();
529 mutex_lock(&dbs_mutex); 528 mutex_lock(&dbs_mutex);
530 if (policy->max < this_dbs_info->cur_policy->cur) 529 if (policy->max < this_dbs_info->cur_policy->cur)
531 __cpufreq_driver_target( 530 __cpufreq_driver_target(
@@ -536,7 +535,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
536 this_dbs_info->cur_policy, 535 this_dbs_info->cur_policy,
537 policy->min, CPUFREQ_RELATION_L); 536 policy->min, CPUFREQ_RELATION_L);
538 mutex_unlock(&dbs_mutex); 537 mutex_unlock(&dbs_mutex);
539 unlock_cpu_hotplug();
540 break; 538 break;
541 } 539 }
542 return 0; 540 return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 87299924e735..52cf1f021825 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -239,6 +239,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
240 this_dbs_info->prev_cpu_wall); 240 this_dbs_info->prev_cpu_wall);
241 this_dbs_info->prev_cpu_wall = cur_jiffies; 241 this_dbs_info->prev_cpu_wall = cur_jiffies;
242 if (!total_ticks)
243 return;
242 /* 244 /*
243 * Every sampling_rate, we check, if current idle time is less 245 * Every sampling_rate, we check, if current idle time is less
244 * than 20% (default), then we try to increase frequency 246 * than 20% (default), then we try to increase frequency
@@ -304,7 +306,12 @@ static void do_dbs_timer(void *data)
304 unsigned int cpu = smp_processor_id(); 306 unsigned int cpu = smp_processor_id();
305 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 307 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
306 308
309 if (!dbs_info->enable)
310 return;
311
312 lock_cpu_hotplug();
307 dbs_check_cpu(dbs_info); 313 dbs_check_cpu(dbs_info);
314 unlock_cpu_hotplug();
308 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 315 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
309 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 316 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
310} 317}
@@ -319,11 +326,11 @@ static inline void dbs_timer_init(unsigned int cpu)
319 return; 326 return;
320} 327}
321 328
322static inline void dbs_timer_exit(unsigned int cpu) 329static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
323{ 330{
324 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 331 dbs_info->enable = 0;
325 332 cancel_delayed_work(&dbs_info->work);
326 cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work); 333 flush_workqueue(kondemand_wq);
327} 334}
328 335
329static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 336static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -396,8 +403,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
396 403
397 case CPUFREQ_GOV_STOP: 404 case CPUFREQ_GOV_STOP:
398 mutex_lock(&dbs_mutex); 405 mutex_lock(&dbs_mutex);
399 dbs_timer_exit(policy->cpu); 406 dbs_timer_exit(this_dbs_info);
400 this_dbs_info->enable = 0;
401 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 407 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
402 dbs_enable--; 408 dbs_enable--;
403 if (dbs_enable == 0) 409 if (dbs_enable == 0)
@@ -408,7 +414,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
408 break; 414 break;
409 415
410 case CPUFREQ_GOV_LIMITS: 416 case CPUFREQ_GOV_LIMITS:
411 lock_cpu_hotplug();
412 mutex_lock(&dbs_mutex); 417 mutex_lock(&dbs_mutex);
413 if (policy->max < this_dbs_info->cur_policy->cur) 418 if (policy->max < this_dbs_info->cur_policy->cur)
414 __cpufreq_driver_target(this_dbs_info->cur_policy, 419 __cpufreq_driver_target(this_dbs_info->cur_policy,
@@ -419,7 +424,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
419 policy->min, 424 policy->min,
420 CPUFREQ_RELATION_L); 425 CPUFREQ_RELATION_L);
421 mutex_unlock(&dbs_mutex); 426 mutex_unlock(&dbs_mutex);
422 unlock_cpu_hotplug();
423 break; 427 break;
424 } 428 }
425 return 0; 429 return 0;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 44ae5e5b94cf..a06c204589cd 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -18,6 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/cpufreq.h> 20#include <linux/cpufreq.h>
21#include <linux/cpu.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/fs.h> 23#include <linux/fs.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
@@ -70,6 +71,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
70 71
71 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
72 73
74 lock_cpu_hotplug();
73 mutex_lock(&userspace_mutex); 75 mutex_lock(&userspace_mutex);
74 if (!cpu_is_managed[policy->cpu]) 76 if (!cpu_is_managed[policy->cpu])
75 goto err; 77 goto err;
@@ -92,6 +94,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
92 94
93 err: 95 err:
94 mutex_unlock(&userspace_mutex); 96 mutex_unlock(&userspace_mutex);
97 unlock_cpu_hotplug();
95 return ret; 98 return ret;
96} 99}
97 100
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 17ee684144f9..b643d71298a9 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -59,6 +59,9 @@
59#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ 59#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
60#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) 60#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
61 61
62/* Whenever making any changes to the following
63 * structure *make sure* you keep E, d_data
64 * and cword aligned on 16 Bytes boundaries!!! */
62struct aes_ctx { 65struct aes_ctx {
63 struct { 66 struct {
64 struct cword encrypt; 67 struct cword encrypt;
@@ -66,8 +69,10 @@ struct aes_ctx {
66 } cword; 69 } cword;
67 u32 *D; 70 u32 *D;
68 int key_length; 71 int key_length;
69 u32 E[AES_EXTENDED_KEY_SIZE]; 72 u32 E[AES_EXTENDED_KEY_SIZE]
70 u32 d_data[AES_EXTENDED_KEY_SIZE]; 73 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
74 u32 d_data[AES_EXTENDED_KEY_SIZE]
75 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
71}; 76};
72 77
73/* ====== Key management routines ====== */ 78/* ====== Key management routines ====== */
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 78bf46d917b7..dbd4d6c3698e 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -828,7 +828,7 @@ static int __init ioat_init_module(void)
828 /* if forced, worst case is that rmmod hangs */ 828 /* if forced, worst case is that rmmod hangs */
829 __unsafe(THIS_MODULE); 829 __unsafe(THIS_MODULE);
830 830
831 return pci_module_init(&ioat_pci_drv); 831 return pci_register_driver(&ioat_pci_drv);
832} 832}
833 833
834module_init(ioat_init_module); 834module_init(ioat_init_module);
diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c
index 66d03f242d3c..1a159e8843ca 100644
--- a/drivers/fc4/fc.c
+++ b/drivers/fc4/fc.c
@@ -429,7 +429,7 @@ static inline void fcp_scsi_receive(fc_channel *fc, int token, int status, fc_hd
429 429
430 if (fcmd->data) { 430 if (fcmd->data) {
431 if (SCpnt->use_sg) 431 if (SCpnt->use_sg)
432 dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->buffer, 432 dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->request_buffer,
433 SCpnt->use_sg, 433 SCpnt->use_sg,
434 SCpnt->sc_data_direction); 434 SCpnt->sc_data_direction);
435 else 435 else
@@ -810,7 +810,7 @@ static int fcp_scsi_queue_it(fc_channel *fc, Scsi_Cmnd *SCpnt, fcp_cmnd *fcmd, i
810 SCpnt->request_bufflen, 810 SCpnt->request_bufflen,
811 SCpnt->sc_data_direction); 811 SCpnt->sc_data_direction);
812 } else { 812 } else {
813 struct scatterlist *sg = (struct scatterlist *)SCpnt->buffer; 813 struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
814 int nents; 814 int nents;
815 815
816 FCD(("XXX: Use_sg %d %d\n", SCpnt->use_sg, sg->length)) 816 FCD(("XXX: Use_sg %d %d\n", SCpnt->use_sg, sg->length))
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index f712e4cfd9dc..7cf3eb023521 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive)
776 * not available so we don't need to recheck that. 776 * not available so we don't need to recheck that.
777 */ 777 */
778 capacity = idedisk_capacity(drive); 778 capacity = idedisk_capacity(drive);
779 barrier = ide_id_has_flush_cache(id) && 779 barrier = ide_id_has_flush_cache(id) && !drive->noflush &&
780 (drive->addressing == 0 || capacity <= (1ULL << 28) || 780 (drive->addressing == 0 || capacity <= (1ULL << 28) ||
781 ide_id_has_flush_cache_ext(id)); 781 ide_id_has_flush_cache_ext(id));
782 782
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 98918fb6b2ce..7c3a13e1cf64 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -750,7 +750,7 @@ void ide_dma_verbose(ide_drive_t *drive)
750 goto bug_dma_off; 750 goto bug_dma_off;
751 printk(", DMA"); 751 printk(", DMA");
752 } else if (id->field_valid & 1) { 752 } else if (id->field_valid & 1) {
753 printk(", BUG"); 753 goto bug_dma_off;
754 } 754 }
755 return; 755 return;
756bug_dma_off: 756bug_dma_off:
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 05fbd9298db7..defd4b4bd374 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1539,7 +1539,7 @@ static int __init ide_setup(char *s)
1539 const char *hd_words[] = { 1539 const char *hd_words[] = {
1540 "none", "noprobe", "nowerr", "cdrom", "serialize", 1540 "none", "noprobe", "nowerr", "cdrom", "serialize",
1541 "autotune", "noautotune", "minus8", "swapdata", "bswap", 1541 "autotune", "noautotune", "minus8", "swapdata", "bswap",
1542 "minus11", "remap", "remap63", "scsi", NULL }; 1542 "noflush", "remap", "remap63", "scsi", NULL };
1543 unit = s[2] - 'a'; 1543 unit = s[2] - 'a';
1544 hw = unit / MAX_DRIVES; 1544 hw = unit / MAX_DRIVES;
1545 unit = unit % MAX_DRIVES; 1545 unit = unit % MAX_DRIVES;
@@ -1578,6 +1578,9 @@ static int __init ide_setup(char *s)
1578 case -10: /* "bswap" */ 1578 case -10: /* "bswap" */
1579 drive->bswap = 1; 1579 drive->bswap = 1;
1580 goto done; 1580 goto done;
1581 case -11: /* noflush */
1582 drive->noflush = 1;
1583 goto done;
1581 case -12: /* "remap" */ 1584 case -12: /* "remap" */
1582 drive->remap_0_to_1 = 1; 1585 drive->remap_0_to_1 = 1;
1583 goto done; 1586 goto done;
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 3cb04424d351..e9bad185968a 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -498,9 +498,14 @@ static int config_chipset_for_dma (ide_drive_t *drive)
498{ 498{
499 u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); 499 u8 speed = ide_dma_speed(drive, it821x_ratemask(drive));
500 500
501 config_it821x_chipset_for_pio(drive, !speed); 501 if (speed) {
502 it821x_tune_chipset(drive, speed); 502 config_it821x_chipset_for_pio(drive, 0);
503 return ide_dma_enable(drive); 503 it821x_tune_chipset(drive, speed);
504
505 return ide_dma_enable(drive);
506 }
507
508 return 0;
504} 509}
505 510
506/** 511/**
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 3f6705f3083a..f85c97f7500a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -701,7 +701,7 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
701 } 701 }
702} 702}
703 703
704void ib_destroy_cm_id(struct ib_cm_id *cm_id) 704static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
705{ 705{
706 struct cm_id_private *cm_id_priv; 706 struct cm_id_private *cm_id_priv;
707 struct cm_work *work; 707 struct cm_work *work;
@@ -735,12 +735,22 @@ retest:
735 sizeof cm_id_priv->av.port->cm_dev->ca_guid, 735 sizeof cm_id_priv->av.port->cm_dev->ca_guid,
736 NULL, 0); 736 NULL, 0);
737 break; 737 break;
738 case IB_CM_REQ_RCVD:
739 if (err == -ENOMEM) {
740 /* Do not reject to allow future retries. */
741 cm_reset_to_idle(cm_id_priv);
742 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
743 } else {
744 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
745 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
746 NULL, 0, NULL, 0);
747 }
748 break;
738 case IB_CM_MRA_REQ_RCVD: 749 case IB_CM_MRA_REQ_RCVD:
739 case IB_CM_REP_SENT: 750 case IB_CM_REP_SENT:
740 case IB_CM_MRA_REP_RCVD: 751 case IB_CM_MRA_REP_RCVD:
741 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 752 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
742 /* Fall through */ 753 /* Fall through */
743 case IB_CM_REQ_RCVD:
744 case IB_CM_MRA_REQ_SENT: 754 case IB_CM_MRA_REQ_SENT:
745 case IB_CM_REP_RCVD: 755 case IB_CM_REP_RCVD:
746 case IB_CM_MRA_REP_SENT: 756 case IB_CM_MRA_REP_SENT:
@@ -775,6 +785,11 @@ retest:
775 kfree(cm_id_priv->private_data); 785 kfree(cm_id_priv->private_data);
776 kfree(cm_id_priv); 786 kfree(cm_id_priv);
777} 787}
788
789void ib_destroy_cm_id(struct ib_cm_id *cm_id)
790{
791 cm_destroy_id(cm_id, 0);
792}
778EXPORT_SYMBOL(ib_destroy_cm_id); 793EXPORT_SYMBOL(ib_destroy_cm_id);
779 794
780int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, 795int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
@@ -1163,7 +1178,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
1163 } 1178 }
1164 cm_deref_id(cm_id_priv); 1179 cm_deref_id(cm_id_priv);
1165 if (ret) 1180 if (ret)
1166 ib_destroy_cm_id(&cm_id_priv->id); 1181 cm_destroy_id(&cm_id_priv->id, ret);
1167} 1182}
1168 1183
1169static void cm_format_mra(struct cm_mra_msg *mra_msg, 1184static void cm_format_mra(struct cm_mra_msg *mra_msg,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 863f64befc7c..d6f99d5720fc 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -262,14 +262,14 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv)
262static int cma_acquire_ib_dev(struct rdma_id_private *id_priv) 262static int cma_acquire_ib_dev(struct rdma_id_private *id_priv)
263{ 263{
264 struct cma_device *cma_dev; 264 struct cma_device *cma_dev;
265 union ib_gid *gid; 265 union ib_gid gid;
266 int ret = -ENODEV; 266 int ret = -ENODEV;
267 267
268 gid = ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr); 268 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid),
269 269
270 mutex_lock(&lock); 270 mutex_lock(&lock);
271 list_for_each_entry(cma_dev, &dev_list, list) { 271 list_for_each_entry(cma_dev, &dev_list, list) {
272 ret = ib_find_cached_gid(cma_dev->device, gid, 272 ret = ib_find_cached_gid(cma_dev->device, &gid,
273 &id_priv->id.port_num, NULL); 273 &id_priv->id.port_num, NULL);
274 if (!ret) { 274 if (!ret) {
275 cma_attach_to_dev(id_priv, cma_dev); 275 cma_attach_to_dev(id_priv, cma_dev);
@@ -812,6 +812,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
812 cma_modify_qp_err(&id_priv->id); 812 cma_modify_qp_err(&id_priv->id);
813 status = ib_event->param.rej_rcvd.reason; 813 status = ib_event->param.rej_rcvd.reason;
814 event = RDMA_CM_EVENT_REJECTED; 814 event = RDMA_CM_EVENT_REJECTED;
815 private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
815 break; 816 break;
816 default: 817 default:
817 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", 818 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
@@ -1134,8 +1135,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1134 struct ib_sa_path_rec path_rec; 1135 struct ib_sa_path_rec path_rec;
1135 1136
1136 memset(&path_rec, 0, sizeof path_rec); 1137 memset(&path_rec, 0, sizeof path_rec);
1137 path_rec.sgid = *ib_addr_get_sgid(addr); 1138 ib_addr_get_sgid(addr, &path_rec.sgid);
1138 path_rec.dgid = *ib_addr_get_dgid(addr); 1139 ib_addr_get_dgid(addr, &path_rec.dgid);
1139 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); 1140 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
1140 path_rec.numb_path = 1; 1141 path_rec.numb_path = 1;
1141 1142
@@ -1263,7 +1264,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
1263{ 1264{
1264 struct cma_device *cma_dev; 1265 struct cma_device *cma_dev;
1265 struct ib_port_attr port_attr; 1266 struct ib_port_attr port_attr;
1266 union ib_gid *gid; 1267 union ib_gid gid;
1267 u16 pkey; 1268 u16 pkey;
1268 int ret; 1269 int ret;
1269 u8 p; 1270 u8 p;
@@ -1284,8 +1285,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
1284 } 1285 }
1285 1286
1286port_found: 1287port_found:
1287 gid = ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr); 1288 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
1288 ret = ib_get_cached_gid(cma_dev->device, p, 0, gid);
1289 if (ret) 1289 if (ret)
1290 goto out; 1290 goto out;
1291 1291
@@ -1293,6 +1293,7 @@ port_found:
1293 if (ret) 1293 if (ret)
1294 goto out; 1294 goto out;
1295 1295
1296 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1296 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 1297 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1297 id_priv->id.port_num = p; 1298 id_priv->id.port_num = p;
1298 cma_attach_to_dev(id_priv, cma_dev); 1299 cma_attach_to_dev(id_priv, cma_dev);
@@ -1339,6 +1340,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1339{ 1340{
1340 struct cma_work *work; 1341 struct cma_work *work;
1341 struct sockaddr_in *src_in, *dst_in; 1342 struct sockaddr_in *src_in, *dst_in;
1343 union ib_gid gid;
1342 int ret; 1344 int ret;
1343 1345
1344 work = kzalloc(sizeof *work, GFP_KERNEL); 1346 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -1351,8 +1353,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1351 goto err; 1353 goto err;
1352 } 1354 }
1353 1355
1354 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, 1356 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1355 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr)); 1357 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1356 1358
1357 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { 1359 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {
1358 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; 1360 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 615fe9cc6c56..86a3b2d401db 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -426,7 +426,7 @@ EXPORT_SYMBOL(ib_flush_fmr_pool);
426struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, 426struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
427 u64 *page_list, 427 u64 *page_list,
428 int list_len, 428 int list_len,
429 u64 *io_virtual_address) 429 u64 io_virtual_address)
430{ 430{
431 struct ib_fmr_pool *pool = pool_handle; 431 struct ib_fmr_pool *pool = pool_handle;
432 struct ib_pool_fmr *fmr; 432 struct ib_pool_fmr *fmr;
@@ -440,7 +440,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
440 fmr = ib_fmr_cache_lookup(pool, 440 fmr = ib_fmr_cache_lookup(pool,
441 page_list, 441 page_list,
442 list_len, 442 list_len,
443 *io_virtual_address); 443 io_virtual_address);
444 if (fmr) { 444 if (fmr) {
445 /* found in cache */ 445 /* found in cache */
446 ++fmr->ref_count; 446 ++fmr->ref_count;
@@ -464,7 +464,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
464 spin_unlock_irqrestore(&pool->pool_lock, flags); 464 spin_unlock_irqrestore(&pool->pool_lock, flags);
465 465
466 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, 466 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
467 *io_virtual_address); 467 io_virtual_address);
468 468
469 if (result) { 469 if (result) {
470 spin_lock_irqsave(&pool->pool_lock, flags); 470 spin_lock_irqsave(&pool->pool_lock, flags);
@@ -481,7 +481,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
481 fmr->ref_count = 1; 481 fmr->ref_count = 1;
482 482
483 if (pool->cache_bucket) { 483 if (pool->cache_bucket) {
484 fmr->io_virtual_address = *io_virtual_address; 484 fmr->io_virtual_address = io_virtual_address;
485 fmr->page_list_len = list_len; 485 fmr->page_list_len = list_len;
486 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); 486 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
487 487
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5ed4dab52a6f..1c3cfbbe6a97 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -167,6 +167,15 @@ static int is_vendor_method_in_use(
167 return 0; 167 return 0;
168} 168}
169 169
170int ib_response_mad(struct ib_mad *mad)
171{
172 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
173 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
174 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
175 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
176}
177EXPORT_SYMBOL(ib_response_mad);
178
170/* 179/*
171 * ib_register_mad_agent - Register to send/receive MADs 180 * ib_register_mad_agent - Register to send/receive MADs
172 */ 181 */
@@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
570} 579}
571EXPORT_SYMBOL(ib_unregister_mad_agent); 580EXPORT_SYMBOL(ib_unregister_mad_agent);
572 581
573static inline int response_mad(struct ib_mad *mad)
574{
575 /* Trap represses are responses although response bit is reset */
576 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
577 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
578}
579
580static void dequeue_mad(struct ib_mad_list_head *mad_list) 582static void dequeue_mad(struct ib_mad_list_head *mad_list)
581{ 583{
582 struct ib_mad_queue *mad_queue; 584 struct ib_mad_queue *mad_queue;
@@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
723 switch (ret) 725 switch (ret)
724 { 726 {
725 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 727 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
726 if (response_mad(&mad_priv->mad.mad) && 728 if (ib_response_mad(&mad_priv->mad.mad) &&
727 mad_agent_priv->agent.recv_handler) { 729 mad_agent_priv->agent.recv_handler) {
728 local->mad_priv = mad_priv; 730 local->mad_priv = mad_priv;
729 local->recv_mad_agent = mad_agent_priv; 731 local->recv_mad_agent = mad_agent_priv;
@@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
1551 unsigned long flags; 1553 unsigned long flags;
1552 1554
1553 spin_lock_irqsave(&port_priv->reg_lock, flags); 1555 spin_lock_irqsave(&port_priv->reg_lock, flags);
1554 if (response_mad(mad)) { 1556 if (ib_response_mad(mad)) {
1555 u32 hi_tid; 1557 u32 hi_tid;
1556 struct ib_mad_agent_private *entry; 1558 struct ib_mad_agent_private *entry;
1557 1559
@@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1799 } 1801 }
1800 1802
1801 /* Complete corresponding request */ 1803 /* Complete corresponding request */
1802 if (response_mad(mad_recv_wc->recv_buf.mad)) { 1804 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1803 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1805 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1804 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 1806 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1805 if (!mad_send_wr) { 1807 if (!mad_send_wr) {
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index e911c99ff843..aeda484ffd82 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -488,13 +488,13 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
488 spin_unlock_irqrestore(&tid_lock, flags); 488 spin_unlock_irqrestore(&tid_lock, flags);
489} 489}
490 490
491static int send_mad(struct ib_sa_query *query, int timeout_ms) 491static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
492{ 492{
493 unsigned long flags; 493 unsigned long flags;
494 int ret, id; 494 int ret, id;
495 495
496retry: 496retry:
497 if (!idr_pre_get(&query_idr, GFP_ATOMIC)) 497 if (!idr_pre_get(&query_idr, gfp_mask))
498 return -ENOMEM; 498 return -ENOMEM;
499 spin_lock_irqsave(&idr_lock, flags); 499 spin_lock_irqsave(&idr_lock, flags);
500 ret = idr_get_new(&query_idr, query, &id); 500 ret = idr_get_new(&query_idr, query, &id);
@@ -630,7 +630,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
630 630
631 *sa_query = &query->sa_query; 631 *sa_query = &query->sa_query;
632 632
633 ret = send_mad(&query->sa_query, timeout_ms); 633 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
634 if (ret < 0) 634 if (ret < 0)
635 goto err2; 635 goto err2;
636 636
@@ -752,7 +752,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
752 752
753 *sa_query = &query->sa_query; 753 *sa_query = &query->sa_query;
754 754
755 ret = send_mad(&query->sa_query, timeout_ms); 755 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
756 if (ret < 0) 756 if (ret < 0)
757 goto err2; 757 goto err2;
758 758
@@ -844,7 +844,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
844 844
845 *sa_query = &query->sa_query; 845 *sa_query = &query->sa_query;
846 846
847 ret = send_mad(&query->sa_query, timeout_ms); 847 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
848 if (ret < 0) 848 if (ret < 0)
849 goto err2; 849 goto err2;
850 850
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index afe70a549c2f..1273f8807e84 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -112,8 +112,10 @@ struct ib_umad_device {
112struct ib_umad_file { 112struct ib_umad_file {
113 struct ib_umad_port *port; 113 struct ib_umad_port *port;
114 struct list_head recv_list; 114 struct list_head recv_list;
115 struct list_head send_list;
115 struct list_head port_list; 116 struct list_head port_list;
116 spinlock_t recv_lock; 117 spinlock_t recv_lock;
118 spinlock_t send_lock;
117 wait_queue_head_t recv_wait; 119 wait_queue_head_t recv_wait;
118 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; 120 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
119 int agents_dead; 121 int agents_dead;
@@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file,
177 return ret; 179 return ret;
178} 180}
179 181
182static void dequeue_send(struct ib_umad_file *file,
183 struct ib_umad_packet *packet)
184 {
185 spin_lock_irq(&file->send_lock);
186 list_del(&packet->list);
187 spin_unlock_irq(&file->send_lock);
188 }
189
180static void send_handler(struct ib_mad_agent *agent, 190static void send_handler(struct ib_mad_agent *agent,
181 struct ib_mad_send_wc *send_wc) 191 struct ib_mad_send_wc *send_wc)
182{ 192{
183 struct ib_umad_file *file = agent->context; 193 struct ib_umad_file *file = agent->context;
184 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; 194 struct ib_umad_packet *packet = send_wc->send_buf->context[0];
185 195
196 dequeue_send(file, packet);
186 ib_destroy_ah(packet->msg->ah); 197 ib_destroy_ah(packet->msg->ah);
187 ib_free_send_mad(packet->msg); 198 ib_free_send_mad(packet->msg);
188 199
@@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
370 return 0; 381 return 0;
371} 382}
372 383
384static int same_destination(struct ib_user_mad_hdr *hdr1,
385 struct ib_user_mad_hdr *hdr2)
386{
387 if (!hdr1->grh_present && !hdr2->grh_present)
388 return (hdr1->lid == hdr2->lid);
389
390 if (hdr1->grh_present && hdr2->grh_present)
391 return !memcmp(hdr1->gid, hdr2->gid, 16);
392
393 return 0;
394}
395
396static int is_duplicate(struct ib_umad_file *file,
397 struct ib_umad_packet *packet)
398{
399 struct ib_umad_packet *sent_packet;
400 struct ib_mad_hdr *sent_hdr, *hdr;
401
402 hdr = (struct ib_mad_hdr *) packet->mad.data;
403 list_for_each_entry(sent_packet, &file->send_list, list) {
404 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
405
406 if ((hdr->tid != sent_hdr->tid) ||
407 (hdr->mgmt_class != sent_hdr->mgmt_class))
408 continue;
409
410 /*
411 * No need to be overly clever here. If two new operations have
412 * the same TID, reject the second as a duplicate. This is more
413 * restrictive than required by the spec.
414 */
415 if (!ib_response_mad((struct ib_mad *) hdr)) {
416 if (!ib_response_mad((struct ib_mad *) sent_hdr))
417 return 1;
418 continue;
419 } else if (!ib_response_mad((struct ib_mad *) sent_hdr))
420 continue;
421
422 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
423 return 1;
424 }
425
426 return 0;
427}
428
373static ssize_t ib_umad_write(struct file *filp, const char __user *buf, 429static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
374 size_t count, loff_t *pos) 430 size_t count, loff_t *pos)
375{ 431{
@@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
379 struct ib_ah_attr ah_attr; 435 struct ib_ah_attr ah_attr;
380 struct ib_ah *ah; 436 struct ib_ah *ah;
381 struct ib_rmpp_mad *rmpp_mad; 437 struct ib_rmpp_mad *rmpp_mad;
382 u8 method;
383 __be64 *tid; 438 __be64 *tid;
384 int ret, data_len, hdr_len, copy_offset, rmpp_active; 439 int ret, data_len, hdr_len, copy_offset, rmpp_active;
385 440
@@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
473 } 528 }
474 529
475 /* 530 /*
476 * If userspace is generating a request that will generate a 531 * Set the high-order part of the transaction ID to make MADs from
477 * response, we need to make sure the high-order part of the 532 * different agents unique, and allow routing responses back to the
478 * transaction ID matches the agent being used to send the 533 * original requestor.
479 * MAD.
480 */ 534 */
481 method = ((struct ib_mad_hdr *) packet->msg->mad)->method; 535 if (!ib_response_mad(packet->msg->mad)) {
482
483 if (!(method & IB_MGMT_METHOD_RESP) &&
484 method != IB_MGMT_METHOD_TRAP_REPRESS &&
485 method != IB_MGMT_METHOD_SEND) {
486 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; 536 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
487 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | 537 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
488 (be64_to_cpup(tid) & 0xffffffff)); 538 (be64_to_cpup(tid) & 0xffffffff));
539 rmpp_mad->mad_hdr.tid = *tid;
540 }
541
542 spin_lock_irq(&file->send_lock);
543 ret = is_duplicate(file, packet);
544 if (!ret)
545 list_add_tail(&packet->list, &file->send_list);
546 spin_unlock_irq(&file->send_lock);
547 if (ret) {
548 ret = -EINVAL;
549 goto err_msg;
489 } 550 }
490 551
491 ret = ib_post_send_mad(packet->msg, NULL); 552 ret = ib_post_send_mad(packet->msg, NULL);
492 if (ret) 553 if (ret)
493 goto err_msg; 554 goto err_send;
494 555
495 up_read(&file->port->mutex); 556 up_read(&file->port->mutex);
496 return count; 557 return count;
497 558
559err_send:
560 dequeue_send(file, packet);
498err_msg: 561err_msg:
499 ib_free_send_mad(packet->msg); 562 ib_free_send_mad(packet->msg);
500err_ah: 563err_ah:
@@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
657 } 720 }
658 721
659 spin_lock_init(&file->recv_lock); 722 spin_lock_init(&file->recv_lock);
723 spin_lock_init(&file->send_lock);
660 INIT_LIST_HEAD(&file->recv_list); 724 INIT_LIST_HEAD(&file->recv_list);
725 INIT_LIST_HEAD(&file->send_list);
661 init_waitqueue_head(&file->recv_wait); 726 init_waitqueue_head(&file->recv_wait);
662 727
663 file->port = port; 728 file->port = port;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index bdf5d5098190..30923eb68ec7 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -42,6 +42,13 @@
42 42
43#include "uverbs.h" 43#include "uverbs.h"
44 44
45static struct lock_class_key pd_lock_key;
46static struct lock_class_key mr_lock_key;
47static struct lock_class_key cq_lock_key;
48static struct lock_class_key qp_lock_key;
49static struct lock_class_key ah_lock_key;
50static struct lock_class_key srq_lock_key;
51
45#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 52#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
46 do { \ 53 do { \
47 (udata)->inbuf = (void __user *) (ibuf); \ 54 (udata)->inbuf = (void __user *) (ibuf); \
@@ -76,12 +83,13 @@
76 */ 83 */
77 84
78static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 85static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
79 struct ib_ucontext *context) 86 struct ib_ucontext *context, struct lock_class_key *key)
80{ 87{
81 uobj->user_handle = user_handle; 88 uobj->user_handle = user_handle;
82 uobj->context = context; 89 uobj->context = context;
83 kref_init(&uobj->ref); 90 kref_init(&uobj->ref);
84 init_rwsem(&uobj->mutex); 91 init_rwsem(&uobj->mutex);
92 lockdep_set_class(&uobj->mutex, key);
85 uobj->live = 0; 93 uobj->live = 0;
86} 94}
87 95
@@ -470,7 +478,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
470 if (!uobj) 478 if (!uobj)
471 return -ENOMEM; 479 return -ENOMEM;
472 480
473 init_uobj(uobj, 0, file->ucontext); 481 init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
474 down_write(&uobj->mutex); 482 down_write(&uobj->mutex);
475 483
476 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 484 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
@@ -591,7 +599,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
591 if (!obj) 599 if (!obj)
592 return -ENOMEM; 600 return -ENOMEM;
593 601
594 init_uobj(&obj->uobject, 0, file->ucontext); 602 init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key);
595 down_write(&obj->uobject.mutex); 603 down_write(&obj->uobject.mutex);
596 604
597 /* 605 /*
@@ -770,7 +778,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
770 if (!obj) 778 if (!obj)
771 return -ENOMEM; 779 return -ENOMEM;
772 780
773 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); 781 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
774 down_write(&obj->uobject.mutex); 782 down_write(&obj->uobject.mutex);
775 783
776 if (cmd.comp_channel >= 0) { 784 if (cmd.comp_channel >= 0) {
@@ -1051,13 +1059,14 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1051 if (!obj) 1059 if (!obj)
1052 return -ENOMEM; 1060 return -ENOMEM;
1053 1061
1054 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext); 1062 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
1055 down_write(&obj->uevent.uobject.mutex); 1063 down_write(&obj->uevent.uobject.mutex);
1056 1064
1065 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
1057 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1066 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1058 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); 1067 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext);
1059 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext); 1068 rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
1060 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; 1069 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext);
1061 1070
1062 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { 1071 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
1063 ret = -EINVAL; 1072 ret = -EINVAL;
@@ -1125,7 +1134,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1125 1134
1126 put_pd_read(pd); 1135 put_pd_read(pd);
1127 put_cq_read(scq); 1136 put_cq_read(scq);
1128 put_cq_read(rcq); 1137 if (rcq != scq)
1138 put_cq_read(rcq);
1129 if (srq) 1139 if (srq)
1130 put_srq_read(srq); 1140 put_srq_read(srq);
1131 1141
@@ -1150,7 +1160,7 @@ err_put:
1150 put_pd_read(pd); 1160 put_pd_read(pd);
1151 if (scq) 1161 if (scq)
1152 put_cq_read(scq); 1162 put_cq_read(scq);
1153 if (rcq) 1163 if (rcq && rcq != scq)
1154 put_cq_read(rcq); 1164 put_cq_read(rcq);
1155 if (srq) 1165 if (srq)
1156 put_srq_read(srq); 1166 put_srq_read(srq);
@@ -1751,7 +1761,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1751 if (!uobj) 1761 if (!uobj)
1752 return -ENOMEM; 1762 return -ENOMEM;
1753 1763
1754 init_uobj(uobj, cmd.user_handle, file->ucontext); 1764 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
1755 down_write(&uobj->mutex); 1765 down_write(&uobj->mutex);
1756 1766
1757 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1767 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
@@ -1775,7 +1785,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1775 ah = ib_create_ah(pd, &attr); 1785 ah = ib_create_ah(pd, &attr);
1776 if (IS_ERR(ah)) { 1786 if (IS_ERR(ah)) {
1777 ret = PTR_ERR(ah); 1787 ret = PTR_ERR(ah);
1778 goto err; 1788 goto err_put;
1779 } 1789 }
1780 1790
1781 ah->uobject = uobj; 1791 ah->uobject = uobj;
@@ -1811,6 +1821,9 @@ err_copy:
1811err_destroy: 1821err_destroy:
1812 ib_destroy_ah(ah); 1822 ib_destroy_ah(ah);
1813 1823
1824err_put:
1825 put_pd_read(pd);
1826
1814err: 1827err:
1815 put_uobj_write(uobj); 1828 put_uobj_write(uobj);
1816 return ret; 1829 return ret;
@@ -1963,7 +1976,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1963 if (!obj) 1976 if (!obj)
1964 return -ENOMEM; 1977 return -ENOMEM;
1965 1978
1966 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); 1979 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
1967 down_write(&obj->uobject.mutex); 1980 down_write(&obj->uobject.mutex);
1968 1981
1969 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1982 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
@@ -1984,7 +1997,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1984 srq = pd->device->create_srq(pd, &attr, &udata); 1997 srq = pd->device->create_srq(pd, &attr, &udata);
1985 if (IS_ERR(srq)) { 1998 if (IS_ERR(srq)) {
1986 ret = PTR_ERR(srq); 1999 ret = PTR_ERR(srq);
1987 goto err; 2000 goto err_put;
1988 } 2001 }
1989 2002
1990 srq->device = pd->device; 2003 srq->device = pd->device;
@@ -2029,6 +2042,9 @@ err_copy:
2029err_destroy: 2042err_destroy:
2030 ib_destroy_srq(srq); 2043 ib_destroy_srq(srq);
2031 2044
2045err_put:
2046 put_pd_read(pd);
2047
2032err: 2048err:
2033 put_uobj_write(&obj->uobject); 2049 put_uobj_write(&obj->uobject);
2034 return ret; 2050 return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 823131d58b34..f98518d912b5 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -859,6 +859,38 @@ static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
859 __ipath_layer_rcv_lid(dd, hdr); 859 __ipath_layer_rcv_lid(dd, hdr);
860} 860}
861 861
862static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
863 u32 eflags,
864 u32 l,
865 u32 etail,
866 u64 *rc)
867{
868 char emsg[128];
869 struct ipath_message_header *hdr;
870
871 get_rhf_errstring(eflags, emsg, sizeof emsg);
872 hdr = (struct ipath_message_header *)&rc[1];
873 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
874 "tlen=%x opcode=%x egridx=%x: %s\n",
875 eflags, l,
876 ipath_hdrget_rcv_type((__le32 *) rc),
877 ipath_hdrget_length_in_bytes((__le32 *) rc),
878 be32_to_cpu(hdr->bth[0]) >> 24,
879 etail, emsg);
880
881 /* Count local link integrity errors. */
882 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
883 u8 n = (dd->ipath_ibcctrl >>
884 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
885 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
886
887 if (++dd->ipath_lli_counter > n) {
888 dd->ipath_lli_counter = 0;
889 dd->ipath_lli_errors++;
890 }
891 }
892}
893
862/* 894/*
863 * ipath_kreceive - receive a packet 895 * ipath_kreceive - receive a packet
864 * @dd: the infinipath device 896 * @dd: the infinipath device
@@ -875,7 +907,6 @@ void ipath_kreceive(struct ipath_devdata *dd)
875 struct ipath_message_header *hdr; 907 struct ipath_message_header *hdr;
876 u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0; 908 u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
877 static u64 totcalls; /* stats, may eventually remove */ 909 static u64 totcalls; /* stats, may eventually remove */
878 char emsg[128];
879 910
880 if (!dd->ipath_hdrqtailptr) { 911 if (!dd->ipath_hdrqtailptr) {
881 ipath_dev_err(dd, 912 ipath_dev_err(dd,
@@ -938,26 +969,9 @@ reloop:
938 "%x\n", etype); 969 "%x\n", etype);
939 } 970 }
940 971
941 if (eflags & ~(INFINIPATH_RHF_H_TIDERR | 972 if (unlikely(eflags))
942 INFINIPATH_RHF_H_IHDRERR)) { 973 ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
943 get_rhf_errstring(eflags, emsg, sizeof emsg); 974 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
944 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
945 "tlen=%x opcode=%x egridx=%x: %s\n",
946 eflags, l, etype, tlen, bthbytes[0],
947 ipath_hdrget_index((__le32 *) rc), emsg);
948 /* Count local link integrity errors. */
949 if (eflags & (INFINIPATH_RHF_H_ICRCERR |
950 INFINIPATH_RHF_H_VCRCERR)) {
951 u8 n = (dd->ipath_ibcctrl >>
952 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
953 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
954
955 if (++dd->ipath_lli_counter > n) {
956 dd->ipath_lli_counter = 0;
957 dd->ipath_lli_errors++;
958 }
959 }
960 } else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
961 int ret = __ipath_verbs_rcv(dd, rc + 1, 975 int ret = __ipath_verbs_rcv(dd, rc + 1,
962 ebuf, tlen); 976 ebuf, tlen);
963 if (ret == -ENODEV) 977 if (ret == -ENODEV)
@@ -981,25 +995,7 @@ reloop:
981 else if (etype == RCVHQ_RCV_TYPE_EXPECTED) 995 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
982 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", 996 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
983 be32_to_cpu(hdr->bth[0]) & 0xff); 997 be32_to_cpu(hdr->bth[0]) & 0xff);
984 else if (eflags & (INFINIPATH_RHF_H_TIDERR | 998 else {
985 INFINIPATH_RHF_H_IHDRERR)) {
986 /*
987 * This is a type 3 packet, only the LRH is in the
988 * rcvhdrq, the rest of the header is in the eager
989 * buffer.
990 */
991 u8 opcode;
992 if (ebuf) {
993 bthbytes = (u8 *) ebuf;
994 opcode = *bthbytes;
995 }
996 else
997 opcode = 0;
998 get_rhf_errstring(eflags, emsg, sizeof emsg);
999 ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, "
1000 "len %x\n", eflags, emsg, opcode, etail,
1001 tlen);
1002 } else {
1003 /* 999 /*
1004 * error packet, type of error unknown. 1000 * error packet, type of error unknown.
1005 * Probably type 3, but we don't know, so don't 1001 * Probably type 3, but we don't know, so don't
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 46773c673a1a..a5ca279370aa 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -197,6 +197,21 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
197 size_t off; 197 size_t off;
198 int ret; 198 int ret;
199 199
200 /*
201 * We use RKEY == zero for physical addresses
202 * (see ipath_get_dma_mr).
203 */
204 if (rkey == 0) {
205 sge->mr = NULL;
206 sge->vaddr = phys_to_virt(vaddr);
207 sge->length = len;
208 sge->sge_length = len;
209 ss->sg_list = NULL;
210 ss->num_sge = 1;
211 ret = 1;
212 goto bail;
213 }
214
200 mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; 215 mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
201 if (unlikely(mr == NULL || mr->lkey != rkey)) { 216 if (unlikely(mr == NULL || mr->lkey != rkey)) {
202 ret = 0; 217 ret = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 56ac336dd1ec..d70a9b6b5239 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -191,10 +191,6 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
191{ 191{
192 struct ipath_sge *sge = &ss->sge; 192 struct ipath_sge *sge = &ss->sge;
193 193
194 while (length > sge->sge_length) {
195 length -= sge->sge_length;
196 ss->sge = *ss->sg_list++;
197 }
198 while (length) { 194 while (length) {
199 u32 len = sge->length; 195 u32 len = sge->length;
200 196
@@ -627,6 +623,7 @@ static int ipath_query_device(struct ib_device *ibdev,
627 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | 623 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
628 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 624 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
629 IB_DEVICE_SYS_IMAGE_GUID; 625 IB_DEVICE_SYS_IMAGE_GUID;
626 props->page_size_cap = PAGE_SIZE;
630 props->vendor_id = ipath_layer_get_vendorid(dev->dd); 627 props->vendor_id = ipath_layer_get_vendorid(dev->dd);
631 props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); 628 props->vendor_part_id = ipath_layer_get_deviceid(dev->dd);
632 props->hw_ver = ipath_layer_get_pcirev(dev->dd); 629 props->hw_ver = ipath_layer_get_pcirev(dev->dd);
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index b12aa03be251..e215041b2db9 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -303,9 +303,10 @@ int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr)
303 memset(attr, 0, sizeof *attr); 303 memset(attr, 0, sizeof *attr);
304 attr->dlid = be16_to_cpu(ah->av->dlid); 304 attr->dlid = be16_to_cpu(ah->av->dlid);
305 attr->sl = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; 305 attr->sl = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
306 attr->static_rate = ah->av->msg_sr & 0x7;
307 attr->src_path_bits = ah->av->g_slid & 0x7F;
308 attr->port_num = be32_to_cpu(ah->av->port_pd) >> 24; 306 attr->port_num = be32_to_cpu(ah->av->port_pd) >> 24;
307 attr->static_rate = mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
308 attr->port_num);
309 attr->src_path_bits = ah->av->g_slid & 0x7F;
309 attr->ah_flags = mthca_ah_grh_present(ah) ? IB_AH_GRH : 0; 310 attr->ah_flags = mthca_ah_grh_present(ah) ? IB_AH_GRH : 0;
310 311
311 if (attr->ah_flags) { 312 if (attr->ah_flags) {
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index d0f7731802c9..deabc14b4ea4 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -778,11 +778,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
778 ((dev->fw_ver & 0xffff0000ull) >> 16) | 778 ((dev->fw_ver & 0xffff0000ull) >> 16) |
779 ((dev->fw_ver & 0x0000ffffull) << 16); 779 ((dev->fw_ver & 0x0000ffffull) << 16);
780 780
781 MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
782 dev->cmd.max_cmds = 1 << lg;
783
781 mthca_dbg(dev, "FW version %012llx, max commands %d\n", 784 mthca_dbg(dev, "FW version %012llx, max commands %d\n",
782 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); 785 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
783 786
784 MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
785 dev->cmd.max_cmds = 1 << lg;
786 MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET); 787 MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
787 MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET); 788 MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
788 789
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 490fc783bb0c..cd8b6721ac9c 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -222,9 +222,8 @@ static void *get_send_wqe(struct mthca_qp *qp, int n)
222 (PAGE_SIZE - 1)); 222 (PAGE_SIZE - 1));
223} 223}
224 224
225static void mthca_wq_init(struct mthca_wq *wq) 225static void mthca_wq_reset(struct mthca_wq *wq)
226{ 226{
227 /* mthca_alloc_qp_common() initializes the locks */
228 wq->next_ind = 0; 227 wq->next_ind = 0;
229 wq->last_comp = wq->max - 1; 228 wq->last_comp = wq->max - 1;
230 wq->head = 0; 229 wq->head = 0;
@@ -845,10 +844,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
845 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 844 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
846 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 845 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
847 846
848 mthca_wq_init(&qp->sq); 847 mthca_wq_reset(&qp->sq);
849 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 848 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
850 849
851 mthca_wq_init(&qp->rq); 850 mthca_wq_reset(&qp->rq);
852 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 851 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
853 852
854 if (mthca_is_memfree(dev)) { 853 if (mthca_is_memfree(dev)) {
@@ -1112,9 +1111,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1112 qp->atomic_rd_en = 0; 1111 qp->atomic_rd_en = 0;
1113 qp->resp_depth = 0; 1112 qp->resp_depth = 0;
1114 qp->sq_policy = send_policy; 1113 qp->sq_policy = send_policy;
1115 mthca_wq_init(&qp->sq); 1114 mthca_wq_reset(&qp->sq);
1116 mthca_wq_init(&qp->rq); 1115 mthca_wq_reset(&qp->rq);
1117 /* these are initialized separately so lockdep can tell them apart */ 1116
1118 spin_lock_init(&qp->sq.lock); 1117 spin_lock_init(&qp->sq.lock);
1119 spin_lock_init(&qp->rq.lock); 1118 spin_lock_init(&qp->rq.lock);
1120 1119
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index fab417c5cf43..b60a9d79ae54 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -370,7 +370,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
370 return -EINVAL; 370 return -EINVAL;
371 371
372 if (attr_mask & IB_SRQ_LIMIT) { 372 if (attr_mask & IB_SRQ_LIMIT) {
373 if (attr->srq_limit > srq->max) 373 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
374 if (attr->srq_limit > max_wr)
374 return -EINVAL; 375 return -EINVAL;
375 376
376 mutex_lock(&srq->mutex); 377 mutex_lock(&srq->mutex);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 3f89f5e19036..474aa214ab57 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -212,6 +212,7 @@ struct ipoib_path {
212 212
213struct ipoib_neigh { 213struct ipoib_neigh {
214 struct ipoib_ah *ah; 214 struct ipoib_ah *ah;
215 union ib_gid dgid;
215 struct sk_buff_head queue; 216 struct sk_buff_head queue;
216 217
217 struct neighbour *neighbour; 218 struct neighbour *neighbour;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 1c6ea1c682a5..cf71d2a5515c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -404,6 +404,8 @@ static void path_rec_completion(int status,
404 list_for_each_entry(neigh, &path->neigh_list, list) { 404 list_for_each_entry(neigh, &path->neigh_list, list) {
405 kref_get(&path->ah->ref); 405 kref_get(&path->ah->ref);
406 neigh->ah = path->ah; 406 neigh->ah = path->ah;
407 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
408 sizeof(union ib_gid));
407 409
408 while ((skb = __skb_dequeue(&neigh->queue))) 410 while ((skb = __skb_dequeue(&neigh->queue)))
409 __skb_queue_tail(&skqueue, skb); 411 __skb_queue_tail(&skqueue, skb);
@@ -510,6 +512,8 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
510 if (path->ah) { 512 if (path->ah) {
511 kref_get(&path->ah->ref); 513 kref_get(&path->ah->ref);
512 neigh->ah = path->ah; 514 neigh->ah = path->ah;
515 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
516 sizeof(union ib_gid));
513 517
514 ipoib_send(dev, skb, path->ah, 518 ipoib_send(dev, skb, path->ah,
515 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 519 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
@@ -633,6 +637,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
633 neigh = *to_ipoib_neigh(skb->dst->neighbour); 637 neigh = *to_ipoib_neigh(skb->dst->neighbour);
634 638
635 if (likely(neigh->ah)) { 639 if (likely(neigh->ah)) {
640 if (unlikely(memcmp(&neigh->dgid.raw,
641 skb->dst->neighbour->ha + 4,
642 sizeof(union ib_gid)))) {
643 spin_lock(&priv->lock);
644 /*
645 * It's safe to call ipoib_put_ah() inside
646 * priv->lock here, because we know that
647 * path->ah will always hold one more reference,
648 * so ipoib_put_ah() will never do more than
649 * decrement the ref count.
650 */
651 ipoib_put_ah(neigh->ah);
652 list_del(&neigh->list);
653 ipoib_neigh_free(neigh);
654 spin_unlock(&priv->lock);
655 ipoib_path_lookup(skb, dev);
656 goto out;
657 }
658
636 ipoib_send(dev, skb, neigh->ah, 659 ipoib_send(dev, skb, neigh->ah,
637 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 660 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
638 goto out; 661 goto out;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ab40488182b3..b5e6a7be603d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -264,6 +264,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
264 if (!ah) { 264 if (!ah) {
265 ipoib_warn(priv, "ib_address_create failed\n"); 265 ipoib_warn(priv, "ib_address_create failed\n");
266 } else { 266 } else {
267 spin_lock_irq(&priv->lock);
268 mcast->ah = ah;
269 spin_unlock_irq(&priv->lock);
270
267 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT 271 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
268 " AV %p, LID 0x%04x, SL %d\n", 272 " AV %p, LID 0x%04x, SL %d\n",
269 IPOIB_GID_ARG(mcast->mcmember.mgid), 273 IPOIB_GID_ARG(mcast->mcmember.mgid),
@@ -271,10 +275,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
271 be16_to_cpu(mcast->mcmember.mlid), 275 be16_to_cpu(mcast->mcmember.mlid),
272 mcast->mcmember.sl); 276 mcast->mcmember.sl);
273 } 277 }
274
275 spin_lock_irq(&priv->lock);
276 mcast->ah = ah;
277 spin_unlock_irq(&priv->lock);
278 } 278 }
279 279
280 /* actually send any queued packets */ 280 /* actually send any queued packets */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ff117bbf81b4..72febf1f8ff8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -594,7 +594,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
594 mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool, 594 mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool,
595 page_list, 595 page_list,
596 page_vec->length, 596 page_vec->length,
597 &io_addr); 597 io_addr);
598 598
599 if (IS_ERR(mem)) { 599 if (IS_ERR(mem)) {
600 status = (int)PTR_ERR(mem); 600 status = (int)PTR_ERR(mem);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 4e22afef7206..8f472e7113b4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -615,9 +615,10 @@ static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat,
615 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; 615 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j;
616 616
617 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 617 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
618 dma_pages, page_cnt, &io_addr); 618 dma_pages, page_cnt, io_addr);
619 if (IS_ERR(req->fmr)) { 619 if (IS_ERR(req->fmr)) {
620 ret = PTR_ERR(req->fmr); 620 ret = PTR_ERR(req->fmr);
621 req->fmr = NULL;
621 goto out; 622 goto out;
622 } 623 }
623 624
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index 35ee52f9b79e..713c4a8aa77d 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -18,6 +18,7 @@
18#include <asm/io.h> 18#include <asm/io.h>
19#include <linux/scx200_gpio.h> 19#include <linux/scx200_gpio.h>
20 20
21#define DRVNAME "net48xx-led"
21#define NET48XX_ERROR_LED_GPIO 20 22#define NET48XX_ERROR_LED_GPIO 20
22 23
23static struct platform_device *pdev; 24static struct platform_device *pdev;
@@ -66,13 +67,13 @@ static int net48xx_led_remove(struct platform_device *pdev)
66} 67}
67 68
68static struct platform_driver net48xx_led_driver = { 69static struct platform_driver net48xx_led_driver = {
69 .driver.owner = THIS_MODULE,
70 .probe = net48xx_led_probe, 70 .probe = net48xx_led_probe,
71 .remove = net48xx_led_remove, 71 .remove = net48xx_led_remove,
72 .suspend = net48xx_led_suspend, 72 .suspend = net48xx_led_suspend,
73 .resume = net48xx_led_resume, 73 .resume = net48xx_led_resume,
74 .driver = { 74 .driver = {
75 .name = "net48xx-led", 75 .name = DRVNAME,
76 .owner = THIS_MODULE,
76 }, 77 },
77}; 78};
78 79
@@ -89,7 +90,7 @@ static int __init net48xx_led_init(void)
89 if (ret < 0) 90 if (ret < 0)
90 goto out; 91 goto out;
91 92
92 pdev = platform_device_register_simple("net48xx-led", -1, NULL, 0); 93 pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
93 if (IS_ERR(pdev)) { 94 if (IS_ERR(pdev)) {
94 ret = PTR_ERR(pdev); 95 ret = PTR_ERR(pdev);
95 platform_driver_unregister(&net48xx_led_driver); 96 platform_driver_unregister(&net48xx_led_driver);
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index bbc229852881..ea31d8470510 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -48,10 +48,8 @@ config FUSION_SAS
48 List of supported controllers: 48 List of supported controllers:
49 49
50 LSISAS1064 50 LSISAS1064
51 LSISAS1066
52 LSISAS1068 51 LSISAS1068
53 LSISAS1064E 52 LSISAS1064E
54 LSISAS1066E
55 LSISAS1068E 53 LSISAS1068E
56 54
57config FUSION_MAX_SGE 55config FUSION_MAX_SGE
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index b114236f4395..341691390e86 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -9,7 +9,6 @@
9#EXTRA_CFLAGS += -DMPT_DEBUG_EXIT 9#EXTRA_CFLAGS += -DMPT_DEBUG_EXIT
10#EXTRA_CFLAGS += -DMPT_DEBUG_FAIL 10#EXTRA_CFLAGS += -DMPT_DEBUG_FAIL
11 11
12
13# 12#
14# driver/module specifics... 13# driver/module specifics...
15# 14#
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 43308df64623..29d0635cce1d 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -436,8 +436,6 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
436 */ 436 */
437 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) { 437 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
438 freereq = 0; 438 freereq = 0;
439 devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p does not return Request frame\n",
440 ioc->name, pEvReply));
441 } else { 439 } else {
442 devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", 440 devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n",
443 ioc->name, pEvReply)); 441 ioc->name, pEvReply));
@@ -678,19 +676,19 @@ int
678mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx) 676mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx)
679{ 677{
680 MPT_ADAPTER *ioc; 678 MPT_ADAPTER *ioc;
679 const struct pci_device_id *id;
681 680
682 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) { 681 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
683 return -EINVAL; 682 return -EINVAL;
684 }
685 683
686 MptDeviceDriverHandlers[cb_idx] = dd_cbfunc; 684 MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
687 685
688 /* call per pci device probe entry point */ 686 /* call per pci device probe entry point */
689 list_for_each_entry(ioc, &ioc_list, list) { 687 list_for_each_entry(ioc, &ioc_list, list) {
690 if(dd_cbfunc->probe) { 688 id = ioc->pcidev->driver ?
691 dd_cbfunc->probe(ioc->pcidev, 689 ioc->pcidev->driver->id_table : NULL;
692 ioc->pcidev->driver->id_table); 690 if (dd_cbfunc->probe)
693 } 691 dd_cbfunc->probe(ioc->pcidev, id);
694 } 692 }
695 693
696 return 0; 694 return 0;
@@ -1056,9 +1054,8 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1056 1054
1057 dinitprintk((MYIOC_s_INFO_FMT 1055 dinitprintk((MYIOC_s_INFO_FMT
1058 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n", 1056 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
1059 ioc->name, 1057 ioc->name, ioc->HostPageBuffer,
1060 ioc->HostPageBuffer, 1058 (u32)ioc->HostPageBuffer_dma,
1061 ioc->HostPageBuffer_dma,
1062 host_page_buffer_sz)); 1059 host_page_buffer_sz));
1063 ioc->alloc_total += host_page_buffer_sz; 1060 ioc->alloc_total += host_page_buffer_sz;
1064 ioc->HostPageBuffer_sz = host_page_buffer_sz; 1061 ioc->HostPageBuffer_sz = host_page_buffer_sz;
@@ -1380,6 +1377,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1380 printk(KERN_WARNING MYNAM 1377 printk(KERN_WARNING MYNAM
1381 ": WARNING - %s did not initialize properly! (%d)\n", 1378 ": WARNING - %s did not initialize properly! (%d)\n",
1382 ioc->name, r); 1379 ioc->name, r);
1380
1383 list_del(&ioc->list); 1381 list_del(&ioc->list);
1384 if (ioc->alt_ioc) 1382 if (ioc->alt_ioc)
1385 ioc->alt_ioc->alt_ioc = NULL; 1383 ioc->alt_ioc->alt_ioc = NULL;
@@ -1762,9 +1760,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1762 * chips (mpt_adapter_disable, 1760 * chips (mpt_adapter_disable,
1763 * mpt_diag_reset) 1761 * mpt_diag_reset)
1764 */ 1762 */
1765 ioc->cached_fw = NULL;
1766 ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n", 1763 ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n",
1767 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw)); 1764 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
1765 ioc->alt_ioc->cached_fw = NULL;
1768 } 1766 }
1769 } else { 1767 } else {
1770 printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); 1768 printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
@@ -1885,7 +1883,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1885 /* FIXME? Examine results here? */ 1883 /* FIXME? Examine results here? */
1886 } 1884 }
1887 1885
1888out: 1886 out:
1889 if ((ret != 0) && irq_allocated) { 1887 if ((ret != 0) && irq_allocated) {
1890 free_irq(ioc->pci_irq, ioc); 1888 free_irq(ioc->pci_irq, ioc);
1891 if (mpt_msi_enable) 1889 if (mpt_msi_enable)
@@ -2670,6 +2668,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2670 dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", 2668 dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n",
2671 ioc->name, count)); 2669 ioc->name, count));
2672 2670
2671 ioc->aen_event_read_flag=0;
2673 return r; 2672 return r;
2674} 2673}
2675 2674
@@ -2737,6 +2736,8 @@ mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
2737 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) { 2736 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
2738 ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */ 2737 ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
2739 ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma; 2738 ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
2739 ioc->alloc_total += size;
2740 ioc->alt_ioc->alloc_total -= size;
2740 } else { 2741 } else {
2741 if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) ) 2742 if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) )
2742 ioc->alloc_total += size; 2743 ioc->alloc_total += size;
@@ -3166,6 +3167,7 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
3166static int 3167static int
3167mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) 3168mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3168{ 3169{
3170 MPT_ADAPTER *iocp=NULL;
3169 u32 diag0val; 3171 u32 diag0val;
3170 u32 doorbell; 3172 u32 doorbell;
3171 int hard_reset_done = 0; 3173 int hard_reset_done = 0;
@@ -3301,17 +3303,23 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3301 /* FIXME? Examine results here? */ 3303 /* FIXME? Examine results here? */
3302 } 3304 }
3303 3305
3304 if (ioc->cached_fw) { 3306 if (ioc->cached_fw)
3307 iocp = ioc;
3308 else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
3309 iocp = ioc->alt_ioc;
3310 if (iocp) {
3305 /* If the DownloadBoot operation fails, the 3311 /* If the DownloadBoot operation fails, the
3306 * IOC will be left unusable. This is a fatal error 3312 * IOC will be left unusable. This is a fatal error
3307 * case. _diag_reset will return < 0 3313 * case. _diag_reset will return < 0
3308 */ 3314 */
3309 for (count = 0; count < 30; count ++) { 3315 for (count = 0; count < 30; count ++) {
3310 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3316 diag0val = CHIPREG_READ32(&iocp->chip->Diagnostic);
3311 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) { 3317 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
3312 break; 3318 break;
3313 } 3319 }
3314 3320
3321 dprintk((MYIOC_s_INFO_FMT "cached_fw: diag0val=%x count=%d\n",
3322 iocp->name, diag0val, count));
3315 /* wait 1 sec */ 3323 /* wait 1 sec */
3316 if (sleepFlag == CAN_SLEEP) { 3324 if (sleepFlag == CAN_SLEEP) {
3317 msleep (1000); 3325 msleep (1000);
@@ -3320,7 +3328,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3320 } 3328 }
3321 } 3329 }
3322 if ((count = mpt_downloadboot(ioc, 3330 if ((count = mpt_downloadboot(ioc,
3323 (MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) { 3331 (MpiFwHeader_t *)iocp->cached_fw, sleepFlag)) < 0) {
3324 printk(KERN_WARNING MYNAM 3332 printk(KERN_WARNING MYNAM
3325 ": firmware downloadboot failure (%d)!\n", count); 3333 ": firmware downloadboot failure (%d)!\n", count);
3326 } 3334 }
@@ -3907,18 +3915,18 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3907 3915
3908 if (sleepFlag == CAN_SLEEP) { 3916 if (sleepFlag == CAN_SLEEP) {
3909 while (--cntdn) { 3917 while (--cntdn) {
3918 msleep (1);
3910 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3919 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
3911 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) 3920 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
3912 break; 3921 break;
3913 msleep (1);
3914 count++; 3922 count++;
3915 } 3923 }
3916 } else { 3924 } else {
3917 while (--cntdn) { 3925 while (--cntdn) {
3926 mdelay (1);
3918 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3927 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
3919 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) 3928 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
3920 break; 3929 break;
3921 mdelay (1);
3922 count++; 3930 count++;
3923 } 3931 }
3924 } 3932 }
@@ -4883,6 +4891,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
4883 pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma); 4891 pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
4884 if (!pIoc4) 4892 if (!pIoc4)
4885 return; 4893 return;
4894 ioc->alloc_total += iocpage4sz;
4886 } else { 4895 } else {
4887 ioc4_dma = ioc->spi_data.IocPg4_dma; 4896 ioc4_dma = ioc->spi_data.IocPg4_dma;
4888 iocpage4sz = ioc->spi_data.IocPg4Sz; 4897 iocpage4sz = ioc->spi_data.IocPg4Sz;
@@ -4899,6 +4908,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
4899 } else { 4908 } else {
4900 pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma); 4909 pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
4901 ioc->spi_data.pIocPg4 = NULL; 4910 ioc->spi_data.pIocPg4 = NULL;
4911 ioc->alloc_total -= iocpage4sz;
4902 } 4912 }
4903} 4913}
4904 4914
@@ -5030,19 +5040,18 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
5030 EventAck_t *pAck; 5040 EventAck_t *pAck;
5031 5041
5032 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5042 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5033 printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK " 5043 dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
5034 "request frame for Event=%x EventContext=%x EventData=%x!\n", 5044 ioc->name,__FUNCTION__));
5035 ioc->name, evnp->Event, le32_to_cpu(evnp->EventContext),
5036 le32_to_cpu(evnp->Data[0]));
5037 return -1; 5045 return -1;
5038 } 5046 }
5039 memset(pAck, 0, sizeof(*pAck));
5040 5047
5041 dprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name)); 5048 devtverboseprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name));
5042 5049
5043 pAck->Function = MPI_FUNCTION_EVENT_ACK; 5050 pAck->Function = MPI_FUNCTION_EVENT_ACK;
5044 pAck->ChainOffset = 0; 5051 pAck->ChainOffset = 0;
5052 pAck->Reserved[0] = pAck->Reserved[1] = 0;
5045 pAck->MsgFlags = 0; 5053 pAck->MsgFlags = 0;
5054 pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0;
5046 pAck->Event = evnp->Event; 5055 pAck->Event = evnp->Event;
5047 pAck->EventContext = evnp->EventContext; 5056 pAck->EventContext = evnp->EventContext;
5048 5057
@@ -5704,9 +5713,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5704 break; 5713 break;
5705 case MPI_EVENT_EVENT_CHANGE: 5714 case MPI_EVENT_EVENT_CHANGE:
5706 if (evData0) 5715 if (evData0)
5707 ds = "Events(ON) Change"; 5716 ds = "Events ON";
5708 else 5717 else
5709 ds = "Events(OFF) Change"; 5718 ds = "Events OFF";
5710 break; 5719 break;
5711 case MPI_EVENT_INTEGRATED_RAID: 5720 case MPI_EVENT_INTEGRATED_RAID:
5712 { 5721 {
@@ -5777,8 +5786,27 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5777 break; 5786 break;
5778 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 5787 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
5779 snprintf(evStr, EVENT_DESCR_STR_SZ, 5788 snprintf(evStr, EVENT_DESCR_STR_SZ,
5780 "SAS Device Status Change: No Persistancy " 5789 "SAS Device Status Change: No Persistancy: id=%d", id);
5781 "Added: id=%d", id); 5790 break;
5791 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
5792 snprintf(evStr, EVENT_DESCR_STR_SZ,
5793 "SAS Device Status Change: Internal Device Reset : id=%d", id);
5794 break;
5795 case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
5796 snprintf(evStr, EVENT_DESCR_STR_SZ,
5797 "SAS Device Status Change: Internal Task Abort : id=%d", id);
5798 break;
5799 case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
5800 snprintf(evStr, EVENT_DESCR_STR_SZ,
5801 "SAS Device Status Change: Internal Abort Task Set : id=%d", id);
5802 break;
5803 case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
5804 snprintf(evStr, EVENT_DESCR_STR_SZ,
5805 "SAS Device Status Change: Internal Clear Task Set : id=%d", id);
5806 break;
5807 case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
5808 snprintf(evStr, EVENT_DESCR_STR_SZ,
5809 "SAS Device Status Change: Internal Query Task : id=%d", id);
5782 break; 5810 break;
5783 default: 5811 default:
5784 snprintf(evStr, EVENT_DESCR_STR_SZ, 5812 snprintf(evStr, EVENT_DESCR_STR_SZ,
@@ -6034,7 +6062,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6034 * @ioc: Pointer to MPT_ADAPTER structure 6062 * @ioc: Pointer to MPT_ADAPTER structure
6035 * @log_info: U32 LogInfo reply word from the IOC 6063 * @log_info: U32 LogInfo reply word from the IOC
6036 * 6064 *
6037 * Refer to lsi/fc_log.h. 6065 * Refer to lsi/mpi_log_fc.h.
6038 */ 6066 */
6039static void 6067static void
6040mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info) 6068mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
@@ -6131,8 +6159,10 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
6131 "Invalid SAS Address", /* 01h */ 6159 "Invalid SAS Address", /* 01h */
6132 NULL, /* 02h */ 6160 NULL, /* 02h */
6133 "Invalid Page", /* 03h */ 6161 "Invalid Page", /* 03h */
6134 NULL, /* 04h */ 6162 "Diag Message Error", /* 04h */
6135 "Task Terminated" /* 05h */ 6163 "Task Terminated", /* 05h */
6164 "Enclosure Management", /* 06h */
6165 "Target Mode" /* 07h */
6136 }; 6166 };
6137 static char *pl_code_str[] = { 6167 static char *pl_code_str[] = {
6138 NULL, /* 00h */ 6168 NULL, /* 00h */
@@ -6158,7 +6188,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
6158 "IO Executed", /* 14h */ 6188 "IO Executed", /* 14h */
6159 "Persistant Reservation Out Not Affiliation Owner", /* 15h */ 6189 "Persistant Reservation Out Not Affiliation Owner", /* 15h */
6160 "Open Transmit DMA Abort", /* 16h */ 6190 "Open Transmit DMA Abort", /* 16h */
6161 NULL, /* 17h */ 6191 "IO Device Missing Delay Retry", /* 17h */
6162 NULL, /* 18h */ 6192 NULL, /* 18h */
6163 NULL, /* 19h */ 6193 NULL, /* 19h */
6164 NULL, /* 1Ah */ 6194 NULL, /* 1Ah */
@@ -6238,7 +6268,7 @@ static void
6238mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf) 6268mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
6239{ 6269{
6240 u32 status = ioc_status & MPI_IOCSTATUS_MASK; 6270 u32 status = ioc_status & MPI_IOCSTATUS_MASK;
6241 char *desc = ""; 6271 char *desc = NULL;
6242 6272
6243 switch (status) { 6273 switch (status) {
6244 case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */ 6274 case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
@@ -6348,7 +6378,7 @@ mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
6348 desc = "Others"; 6378 desc = "Others";
6349 break; 6379 break;
6350 } 6380 }
6351 if (desc != "") 6381 if (desc != NULL)
6352 printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc); 6382 printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc);
6353} 6383}
6354 6384
@@ -6386,7 +6416,6 @@ EXPORT_SYMBOL(mpt_alloc_fw_memory);
6386EXPORT_SYMBOL(mpt_free_fw_memory); 6416EXPORT_SYMBOL(mpt_free_fw_memory);
6387EXPORT_SYMBOL(mptbase_sas_persist_operation); 6417EXPORT_SYMBOL(mptbase_sas_persist_operation);
6388 6418
6389
6390/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6419/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6391/* 6420/*
6392 * fusion_init - Fusion MPT base driver initialization routine. 6421 * fusion_init - Fusion MPT base driver initialization routine.
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index a5ce10b67d02..d4cb144ab402 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -75,8 +75,8 @@
75#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 75#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
76#endif 76#endif
77 77
78#define MPT_LINUX_VERSION_COMMON "3.04.00" 78#define MPT_LINUX_VERSION_COMMON "3.04.01"
79#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.00" 79#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.01"
80#define WHAT_MAGIC_STRING "@" "(" "#" ")" 80#define WHAT_MAGIC_STRING "@" "(" "#" ")"
81 81
82#define show_mptmod_ver(s,ver) \ 82#define show_mptmod_ver(s,ver) \
@@ -307,8 +307,8 @@ typedef struct _SYSIF_REGS
307 u32 HostIndex; /* 50 Host Index register */ 307 u32 HostIndex; /* 50 Host Index register */
308 u32 Reserved4[15]; /* 54-8F */ 308 u32 Reserved4[15]; /* 54-8F */
309 u32 Fubar; /* 90 For Fubar usage */ 309 u32 Fubar; /* 90 For Fubar usage */
310 u32 Reserved5[1050];/* 94-10F8 */ 310 u32 Reserved5[1050];/* 94-10F8 */
311 u32 Reset_1078; /* 10FC Reset 1078 */ 311 u32 Reset_1078; /* 10FC Reset 1078 */
312} SYSIF_REGS; 312} SYSIF_REGS;
313 313
314/* 314/*
@@ -363,6 +363,7 @@ typedef struct _VirtDevice {
363#define MPT_TARGET_FLAGS_VALID_56 0x10 363#define MPT_TARGET_FLAGS_VALID_56 0x10
364#define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20 364#define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20
365#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40 365#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40
366#define MPT_TARGET_FLAGS_LED_ON 0x80
366 367
367/* 368/*
368 * /proc/mpt interface 369 * /proc/mpt interface
@@ -634,7 +635,6 @@ typedef struct _MPT_ADAPTER
634 u16 handle; 635 u16 handle;
635 int sas_index; /* index refrencing */ 636 int sas_index; /* index refrencing */
636 MPT_SAS_MGMT sas_mgmt; 637 MPT_SAS_MGMT sas_mgmt;
637 int num_ports;
638 struct work_struct sas_persist_task; 638 struct work_struct sas_persist_task;
639 639
640 struct work_struct fc_setup_reset_work; 640 struct work_struct fc_setup_reset_work;
@@ -644,7 +644,6 @@ typedef struct _MPT_ADAPTER
644 struct work_struct fc_rescan_work; 644 struct work_struct fc_rescan_work;
645 char fc_rescan_work_q_name[KOBJ_NAME_LEN]; 645 char fc_rescan_work_q_name[KOBJ_NAME_LEN];
646 struct workqueue_struct *fc_rescan_work_q; 646 struct workqueue_struct *fc_rescan_work_q;
647 u8 port_serial_number;
648} MPT_ADAPTER; 647} MPT_ADAPTER;
649 648
650/* 649/*
@@ -982,7 +981,7 @@ typedef struct _MPT_SCSI_HOST {
982 wait_queue_head_t scandv_waitq; 981 wait_queue_head_t scandv_waitq;
983 int scandv_wait_done; 982 int scandv_wait_done;
984 long last_queue_full; 983 long last_queue_full;
985 u8 mpt_pq_filter; 984 u16 tm_iocstatus;
986} MPT_SCSI_HOST; 985} MPT_SCSI_HOST;
987 986
988/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 987/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index b4967bb8a7d6..30975ccd9947 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2332,7 +2332,7 @@ done_free_mem:
2332} 2332}
2333 2333
2334/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2334/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2335/* Prototype Routine for the HP HOST INFO command. 2335/* Prototype Routine for the HOST INFO command.
2336 * 2336 *
2337 * Outputs: None. 2337 * Outputs: None.
2338 * Return: 0 if successful 2338 * Return: 0 if successful
@@ -2568,7 +2568,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2568} 2568}
2569 2569
2570/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2570/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2571/* Prototype Routine for the HP TARGET INFO command. 2571/* Prototype Routine for the TARGET INFO command.
2572 * 2572 *
2573 * Outputs: None. 2573 * Outputs: None.
2574 * Return: 0 if successful 2574 * Return: 0 if successful
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
index a2f8a97992e6..043941882c6e 100644
--- a/drivers/message/fusion/mptctl.h
+++ b/drivers/message/fusion/mptctl.h
@@ -354,9 +354,6 @@ struct mpt_ioctl_command32 {
354 354
355 355
356/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 356/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
357/*
358 * HP Specific IOCTL Defines and Structures
359 */
360 357
361#define CPQFCTS_IOC_MAGIC 'Z' 358#define CPQFCTS_IOC_MAGIC 'Z'
362#define HP_IOC_MAGIC 'Z' 359#define HP_IOC_MAGIC 'Z'
@@ -364,8 +361,6 @@ struct mpt_ioctl_command32 {
364#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t) 361#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t)
365#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t) 362#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t)
366 363
367/* All HP IOCTLs must include this header
368 */
369typedef struct _hp_header { 364typedef struct _hp_header {
370 unsigned int iocnum; 365 unsigned int iocnum;
371 unsigned int host; 366 unsigned int host;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index a8f2fa985455..90da7d63b08e 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -77,10 +77,6 @@ MODULE_DESCRIPTION(my_NAME);
77MODULE_LICENSE("GPL"); 77MODULE_LICENSE("GPL");
78 78
79/* Command line args */ 79/* Command line args */
80static int mpt_pq_filter = 0;
81module_param(mpt_pq_filter, int, 0);
82MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
83
84#define MPTFC_DEV_LOSS_TMO (60) 80#define MPTFC_DEV_LOSS_TMO (60)
85static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */ 81static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */
86module_param(mptfc_dev_loss_tmo, int, 0); 82module_param(mptfc_dev_loss_tmo, int, 0);
@@ -513,8 +509,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
513 509
514 if (vtarget->num_luns == 0) { 510 if (vtarget->num_luns == 0) {
515 vtarget->ioc_id = hd->ioc->id; 511 vtarget->ioc_id = hd->ioc->id;
516 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES | 512 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
517 MPT_TARGET_FLAGS_VALID_INQUIRY;
518 hd->Targets[sdev->id] = vtarget; 513 hd->Targets[sdev->id] = vtarget;
519 } 514 }
520 515
@@ -1129,13 +1124,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1129 hd->timer.data = (unsigned long) hd; 1124 hd->timer.data = (unsigned long) hd;
1130 hd->timer.function = mptscsih_timer_expired; 1125 hd->timer.function = mptscsih_timer_expired;
1131 1126
1132 hd->mpt_pq_filter = mpt_pq_filter;
1133
1134 ddvprintk((MYIOC_s_INFO_FMT
1135 "mpt_pq_filter %x\n",
1136 ioc->name,
1137 mpt_pq_filter));
1138
1139 init_waitqueue_head(&hd->scandv_waitq); 1127 init_waitqueue_head(&hd->scandv_waitq);
1140 hd->scandv_wait_done = 0; 1128 hd->scandv_wait_done = 0;
1141 hd->last_queue_full = 0; 1129 hd->last_queue_full = 0;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index f7bd8b11ed3b..f66f2203143a 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -67,20 +67,19 @@
67#define my_VERSION MPT_LINUX_VERSION_COMMON 67#define my_VERSION MPT_LINUX_VERSION_COMMON
68#define MYNAM "mptsas" 68#define MYNAM "mptsas"
69 69
70/*
71 * Reserved channel for integrated raid
72 */
73#define MPTSAS_RAID_CHANNEL 1
74
70MODULE_AUTHOR(MODULEAUTHOR); 75MODULE_AUTHOR(MODULEAUTHOR);
71MODULE_DESCRIPTION(my_NAME); 76MODULE_DESCRIPTION(my_NAME);
72MODULE_LICENSE("GPL"); 77MODULE_LICENSE("GPL");
73 78
74static int mpt_pq_filter;
75module_param(mpt_pq_filter, int, 0);
76MODULE_PARM_DESC(mpt_pq_filter,
77 "Enable peripheral qualifier filter: enable=1 "
78 "(default=0)");
79
80static int mpt_pt_clear; 79static int mpt_pt_clear;
81module_param(mpt_pt_clear, int, 0); 80module_param(mpt_pt_clear, int, 0);
82MODULE_PARM_DESC(mpt_pt_clear, 81MODULE_PARM_DESC(mpt_pt_clear,
83 "Clear persistency table: enable=1 " 82 " Clear persistency table: enable=1 "
84 "(default=MPTSCSIH_PT_CLEAR=0)"); 83 "(default=MPTSCSIH_PT_CLEAR=0)");
85 84
86static int mptsasDoneCtx = -1; 85static int mptsasDoneCtx = -1;
@@ -144,7 +143,6 @@ struct mptsas_devinfo {
144 * Specific details on ports, wide/narrow 143 * Specific details on ports, wide/narrow
145 */ 144 */
146struct mptsas_portinfo_details{ 145struct mptsas_portinfo_details{
147 u8 port_id; /* port number provided to transport */
148 u16 num_phys; /* number of phys belong to this port */ 146 u16 num_phys; /* number of phys belong to this port */
149 u64 phy_bitmask; /* TODO, extend support for 255 phys */ 147 u64 phy_bitmask; /* TODO, extend support for 255 phys */
150 struct sas_rphy *rphy; /* transport layer rphy object */ 148 struct sas_rphy *rphy; /* transport layer rphy object */
@@ -350,10 +348,10 @@ mptsas_port_delete(struct mptsas_portinfo_details * port_details)
350 port_info = port_details->port_info; 348 port_info = port_details->port_info;
351 phy_info = port_info->phy_info; 349 phy_info = port_info->phy_info;
352 350
353 dsaswideprintk((KERN_DEBUG "%s: [%p]: port=%02d num_phys=%02d " 351 dsaswideprintk((KERN_DEBUG "%s: [%p]: num_phys=%02d "
354 "bitmask=0x%016llX\n", 352 "bitmask=0x%016llX\n",
355 __FUNCTION__, port_details, port_details->port_id, 353 __FUNCTION__, port_details, port_details->num_phys,
356 port_details->num_phys, port_details->phy_bitmask)); 354 port_details->phy_bitmask));
357 355
358 for (i = 0; i < port_info->num_phys; i++, phy_info++) { 356 for (i = 0; i < port_info->num_phys; i++, phy_info++) {
359 if(phy_info->port_details != port_details) 357 if(phy_info->port_details != port_details)
@@ -462,9 +460,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
462 * phy be removed by firmware events. 460 * phy be removed by firmware events.
463 */ 461 */
464 dsaswideprintk((KERN_DEBUG 462 dsaswideprintk((KERN_DEBUG
465 "%s: [%p]: port=%d deleting phy = %d\n", 463 "%s: [%p]: deleting phy = %d\n",
466 __FUNCTION__, port_details, 464 __FUNCTION__, port_details, i));
467 port_details->port_id, i));
468 port_details->num_phys--; 465 port_details->num_phys--;
469 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); 466 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
470 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 467 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
@@ -493,7 +490,6 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
493 goto out; 490 goto out;
494 port_details->num_phys = 1; 491 port_details->num_phys = 1;
495 port_details->port_info = port_info; 492 port_details->port_info = port_info;
496 port_details->port_id = ioc->port_serial_number++;
497 if (phy_info->phy_id < 64 ) 493 if (phy_info->phy_id < 64 )
498 port_details->phy_bitmask |= 494 port_details->phy_bitmask |=
499 (1 << phy_info->phy_id); 495 (1 << phy_info->phy_id);
@@ -525,12 +521,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
525 mptsas_get_port(phy_info_cmp); 521 mptsas_get_port(phy_info_cmp);
526 port_details->starget = 522 port_details->starget =
527 mptsas_get_starget(phy_info_cmp); 523 mptsas_get_starget(phy_info_cmp);
528 port_details->port_id =
529 phy_info_cmp->port_details->port_id;
530 port_details->num_phys = 524 port_details->num_phys =
531 phy_info_cmp->port_details->num_phys; 525 phy_info_cmp->port_details->num_phys;
532// port_info->port_serial_number--;
533 ioc->port_serial_number--;
534 if (!phy_info_cmp->port_details->num_phys) 526 if (!phy_info_cmp->port_details->num_phys)
535 kfree(phy_info_cmp->port_details); 527 kfree(phy_info_cmp->port_details);
536 } else 528 } else
@@ -554,11 +546,11 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
554 if (!port_details) 546 if (!port_details)
555 continue; 547 continue;
556 dsaswideprintk((KERN_DEBUG 548 dsaswideprintk((KERN_DEBUG
557 "%s: [%p]: phy_id=%02d port_id=%02d num_phys=%02d " 549 "%s: [%p]: phy_id=%02d num_phys=%02d "
558 "bitmask=0x%016llX\n", 550 "bitmask=0x%016llX\n",
559 __FUNCTION__, 551 __FUNCTION__,
560 port_details, i, port_details->port_id, 552 port_details, i, port_details->num_phys,
561 port_details->num_phys, port_details->phy_bitmask)); 553 port_details->phy_bitmask));
562 dsaswideprintk((KERN_DEBUG"\t\tport = %p rphy=%p\n", 554 dsaswideprintk((KERN_DEBUG"\t\tport = %p rphy=%p\n",
563 port_details->port, port_details->rphy)); 555 port_details->port, port_details->rphy));
564 } 556 }
@@ -651,16 +643,13 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
651static int 643static int
652mptsas_slave_configure(struct scsi_device *sdev) 644mptsas_slave_configure(struct scsi_device *sdev)
653{ 645{
654 struct Scsi_Host *host = sdev->host;
655 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
656 646
657 /* 647 if (sdev->channel == MPTSAS_RAID_CHANNEL)
658 * RAID volumes placed beyond the last expected port. 648 goto out;
659 * Ignore sending sas mode pages in that case.. 649
660 */ 650 sas_read_port_mode_page(sdev);
661 if (sdev->channel < hd->ioc->num_ports)
662 sas_read_port_mode_page(sdev);
663 651
652 out:
664 return mptscsih_slave_configure(sdev); 653 return mptscsih_slave_configure(sdev);
665} 654}
666 655
@@ -689,10 +678,7 @@ mptsas_target_alloc(struct scsi_target *starget)
689 678
690 hd->Targets[target_id] = vtarget; 679 hd->Targets[target_id] = vtarget;
691 680
692 /* 681 if (starget->channel == MPTSAS_RAID_CHANNEL)
693 * RAID volumes placed beyond the last expected port.
694 */
695 if (starget->channel == hd->ioc->num_ports)
696 goto out; 682 goto out;
697 683
698 rphy = dev_to_rphy(starget->dev.parent); 684 rphy = dev_to_rphy(starget->dev.parent);
@@ -743,7 +729,7 @@ mptsas_target_destroy(struct scsi_target *starget)
743 if (!starget->hostdata) 729 if (!starget->hostdata)
744 return; 730 return;
745 731
746 if (starget->channel == hd->ioc->num_ports) 732 if (starget->channel == MPTSAS_RAID_CHANNEL)
747 goto out; 733 goto out;
748 734
749 rphy = dev_to_rphy(starget->dev.parent); 735 rphy = dev_to_rphy(starget->dev.parent);
@@ -783,10 +769,7 @@ mptsas_slave_alloc(struct scsi_device *sdev)
783 starget = scsi_target(sdev); 769 starget = scsi_target(sdev);
784 vdev->vtarget = starget->hostdata; 770 vdev->vtarget = starget->hostdata;
785 771
786 /* 772 if (sdev->channel == MPTSAS_RAID_CHANNEL)
787 * RAID volumes placed beyond the last expected port.
788 */
789 if (sdev->channel == hd->ioc->num_ports)
790 goto out; 773 goto out;
791 774
792 rphy = dev_to_rphy(sdev->sdev_target->dev.parent); 775 rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
@@ -1608,11 +1591,7 @@ static int mptsas_probe_one_phy(struct device *dev,
1608 if (phy_info->sas_port_add_phy) { 1591 if (phy_info->sas_port_add_phy) {
1609 1592
1610 if (!port) { 1593 if (!port) {
1611 port = sas_port_alloc(dev, 1594 port = sas_port_alloc_num(dev);
1612 phy_info->port_details->port_id);
1613 dsaswideprintk((KERN_DEBUG
1614 "sas_port_alloc: port=%p dev=%p port_id=%d\n",
1615 port, dev, phy_info->port_details->port_id));
1616 if (!port) { 1595 if (!port) {
1617 error = -ENOMEM; 1596 error = -ENOMEM;
1618 goto out; 1597 goto out;
@@ -1625,6 +1604,9 @@ static int mptsas_probe_one_phy(struct device *dev,
1625 goto out; 1604 goto out;
1626 } 1605 }
1627 mptsas_set_port(phy_info, port); 1606 mptsas_set_port(phy_info, port);
1607 dsaswideprintk((KERN_DEBUG
1608 "sas_port_alloc: port=%p dev=%p port_id=%d\n",
1609 port, dev, port->port_identifier));
1628 } 1610 }
1629 dsaswideprintk((KERN_DEBUG "sas_port_add_phy: phy_id=%d\n", 1611 dsaswideprintk((KERN_DEBUG "sas_port_add_phy: phy_id=%d\n",
1630 phy_info->phy_id)); 1612 phy_info->phy_id));
@@ -1736,7 +1718,6 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
1736 hba = NULL; 1718 hba = NULL;
1737 } 1719 }
1738 mutex_unlock(&ioc->sas_topology_mutex); 1720 mutex_unlock(&ioc->sas_topology_mutex);
1739 ioc->num_ports = port_info->num_phys;
1740 1721
1741 for (i = 0; i < port_info->num_phys; i++) { 1722 for (i = 0; i < port_info->num_phys; i++) {
1742 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], 1723 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
@@ -1939,7 +1920,8 @@ mptsas_delete_expander_phys(MPT_ADAPTER *ioc)
1939 expander_sas_address) 1920 expander_sas_address)
1940 continue; 1921 continue;
1941#ifdef MPT_DEBUG_SAS_WIDE 1922#ifdef MPT_DEBUG_SAS_WIDE
1942 dev_printk(KERN_DEBUG, &port->dev, "delete\n"); 1923 dev_printk(KERN_DEBUG, &port->dev,
1924 "delete port (%d)\n", port->port_identifier);
1943#endif 1925#endif
1944 sas_port_delete(port); 1926 sas_port_delete(port);
1945 mptsas_port_delete(phy_info->port_details); 1927 mptsas_port_delete(phy_info->port_details);
@@ -1984,7 +1966,7 @@ mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
1984 if (!ioc->raid_data.pIocPg2->NumActiveVolumes) 1966 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
1985 goto out; 1967 goto out;
1986 for (i=0; i<ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { 1968 for (i=0; i<ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
1987 scsi_add_device(ioc->sh, ioc->num_ports, 1969 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
1988 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); 1970 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
1989 } 1971 }
1990 out: 1972 out:
@@ -2185,7 +2167,8 @@ mptsas_hotplug_work(void *arg)
2185 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id); 2167 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
2186 2168
2187#ifdef MPT_DEBUG_SAS_WIDE 2169#ifdef MPT_DEBUG_SAS_WIDE
2188 dev_printk(KERN_DEBUG, &port->dev, "delete\n"); 2170 dev_printk(KERN_DEBUG, &port->dev,
2171 "delete port (%d)\n", port->port_identifier);
2189#endif 2172#endif
2190 sas_port_delete(port); 2173 sas_port_delete(port);
2191 mptsas_port_delete(phy_info->port_details); 2174 mptsas_port_delete(phy_info->port_details);
@@ -2289,35 +2272,26 @@ mptsas_hotplug_work(void *arg)
2289 mptsas_set_rphy(phy_info, rphy); 2272 mptsas_set_rphy(phy_info, rphy);
2290 break; 2273 break;
2291 case MPTSAS_ADD_RAID: 2274 case MPTSAS_ADD_RAID:
2292 sdev = scsi_device_lookup( 2275 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
2293 ioc->sh, 2276 ev->id, 0);
2294 ioc->num_ports,
2295 ev->id,
2296 0);
2297 if (sdev) { 2277 if (sdev) {
2298 scsi_device_put(sdev); 2278 scsi_device_put(sdev);
2299 break; 2279 break;
2300 } 2280 }
2301 printk(MYIOC_s_INFO_FMT 2281 printk(MYIOC_s_INFO_FMT
2302 "attaching raid volume, channel %d, id %d\n", 2282 "attaching raid volume, channel %d, id %d\n",
2303 ioc->name, ioc->num_ports, ev->id); 2283 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2304 scsi_add_device(ioc->sh, 2284 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
2305 ioc->num_ports,
2306 ev->id,
2307 0);
2308 mpt_findImVolumes(ioc); 2285 mpt_findImVolumes(ioc);
2309 break; 2286 break;
2310 case MPTSAS_DEL_RAID: 2287 case MPTSAS_DEL_RAID:
2311 sdev = scsi_device_lookup( 2288 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
2312 ioc->sh, 2289 ev->id, 0);
2313 ioc->num_ports,
2314 ev->id,
2315 0);
2316 if (!sdev) 2290 if (!sdev)
2317 break; 2291 break;
2318 printk(MYIOC_s_INFO_FMT 2292 printk(MYIOC_s_INFO_FMT
2319 "removing raid volume, channel %d, id %d\n", 2293 "removing raid volume, channel %d, id %d\n",
2320 ioc->name, ioc->num_ports, ev->id); 2294 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2321 vdevice = sdev->hostdata; 2295 vdevice = sdev->hostdata;
2322 vdevice->vtarget->deleted = 1; 2296 vdevice->vtarget->deleted = 1;
2323 mptsas_target_reset(ioc, vdevice->vtarget); 2297 mptsas_target_reset(ioc, vdevice->vtarget);
@@ -2723,7 +2697,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2723 hd->timer.data = (unsigned long) hd; 2697 hd->timer.data = (unsigned long) hd;
2724 hd->timer.function = mptscsih_timer_expired; 2698 hd->timer.function = mptscsih_timer_expired;
2725 2699
2726 hd->mpt_pq_filter = mpt_pq_filter;
2727 ioc->sas_data.ptClear = mpt_pt_clear; 2700 ioc->sas_data.ptClear = mpt_pt_clear;
2728 2701
2729 if (ioc->sas_data.ptClear==1) { 2702 if (ioc->sas_data.ptClear==1) {
@@ -2731,12 +2704,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2731 ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT); 2704 ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
2732 } 2705 }
2733 2706
2734 ddvprintk((MYIOC_s_INFO_FMT
2735 "mpt_pq_filter %x mpt_pq_filter %x\n",
2736 ioc->name,
2737 mpt_pq_filter,
2738 mpt_pq_filter));
2739
2740 init_waitqueue_head(&hd->scandv_waitq); 2707 init_waitqueue_head(&hd->scandv_waitq);
2741 hd->scandv_wait_done = 0; 2708 hd->scandv_wait_done = 0;
2742 hd->last_queue_full = 0; 2709 hd->last_queue_full = 0;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 8242b16e3168..30524dc54b16 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -66,6 +66,7 @@
66 66
67#include "mptbase.h" 67#include "mptbase.h"
68#include "mptscsih.h" 68#include "mptscsih.h"
69#include "lsi/mpi_log_sas.h"
69 70
70/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 71/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
71#define my_NAME "Fusion MPT SCSI Host driver" 72#define my_NAME "Fusion MPT SCSI Host driver"
@@ -127,7 +128,7 @@ static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
127static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); 128static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
128static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd); 129static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
129static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout ); 130static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
130static u32 SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc); 131static int SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc);
131 132
132static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout); 133static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
133 134
@@ -497,6 +498,34 @@ nextSGEset:
497 return SUCCESS; 498 return SUCCESS;
498} /* mptscsih_AddSGE() */ 499} /* mptscsih_AddSGE() */
499 500
501static void
502mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
503 U32 SlotStatus)
504{
505 MPT_FRAME_HDR *mf;
506 SEPRequest_t *SEPMsg;
507
508 if (ioc->bus_type == FC)
509 return;
510
511 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
512 dfailprintk((MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
513 ioc->name,__FUNCTION__));
514 return;
515 }
516
517 SEPMsg = (SEPRequest_t *)mf;
518 SEPMsg->Function = MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
519 SEPMsg->Bus = vtarget->bus_id;
520 SEPMsg->TargetID = vtarget->target_id;
521 SEPMsg->Action = MPI_SEP_REQ_ACTION_WRITE_STATUS;
522 SEPMsg->SlotStatus = SlotStatus;
523 devtverboseprintk((MYIOC_s_WARN_FMT
524 "Sending SEP cmd=%x id=%d bus=%d\n",
525 ioc->name, SlotStatus, SEPMsg->TargetID, SEPMsg->Bus));
526 mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
527}
528
500/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 529/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
501/* 530/*
502 * mptscsih_io_done - Main SCSI IO callback routine registered to 531 * mptscsih_io_done - Main SCSI IO callback routine registered to
@@ -520,6 +549,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
520 SCSIIORequest_t *pScsiReq; 549 SCSIIORequest_t *pScsiReq;
521 SCSIIOReply_t *pScsiReply; 550 SCSIIOReply_t *pScsiReply;
522 u16 req_idx, req_idx_MR; 551 u16 req_idx, req_idx_MR;
552 VirtDevice *vdev;
553 VirtTarget *vtarget;
523 554
524 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; 555 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
525 556
@@ -538,6 +569,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
538 } 569 }
539 570
540 sc = hd->ScsiLookup[req_idx]; 571 sc = hd->ScsiLookup[req_idx];
572 hd->ScsiLookup[req_idx] = NULL;
541 if (sc == NULL) { 573 if (sc == NULL) {
542 MPIHeader_t *hdr = (MPIHeader_t *)mf; 574 MPIHeader_t *hdr = (MPIHeader_t *)mf;
543 575
@@ -553,6 +585,12 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
553 return 1; 585 return 1;
554 } 586 }
555 587
588 if ((unsigned char *)mf != sc->host_scribble) {
589 mptscsih_freeChainBuffers(ioc, req_idx);
590 return 1;
591 }
592
593 sc->host_scribble = NULL;
556 sc->result = DID_OK << 16; /* Set default reply as OK */ 594 sc->result = DID_OK << 16; /* Set default reply as OK */
557 pScsiReq = (SCSIIORequest_t *) mf; 595 pScsiReq = (SCSIIORequest_t *) mf;
558 pScsiReply = (SCSIIOReply_t *) mr; 596 pScsiReply = (SCSIIOReply_t *) mr;
@@ -640,10 +678,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
640 678
641 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) 679 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
642 hd->sel_timeout[pScsiReq->TargetID]++; 680 hd->sel_timeout[pScsiReq->TargetID]++;
681
682 vdev = sc->device->hostdata;
683 if (!vdev)
684 break;
685 vtarget = vdev->vtarget;
686 if (vtarget->tflags & MPT_TARGET_FLAGS_LED_ON) {
687 mptscsih_issue_sep_command(ioc, vtarget,
688 MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED);
689 vtarget->tflags &= ~MPT_TARGET_FLAGS_LED_ON;
690 }
643 break; 691 break;
644 692
645 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
646 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ 693 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
694 if ( ioc->bus_type == SAS ) {
695 u16 ioc_status = le16_to_cpu(pScsiReply->IOCStatus);
696 if (ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
697 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
698 log_info &=SAS_LOGINFO_MASK;
699 if (log_info == SAS_LOGINFO_NEXUS_LOSS) {
700 sc->result = (DID_BUS_BUSY << 16);
701 break;
702 }
703 }
704 }
705
706 /*
707 * Allow non-SAS & non-NEXUS_LOSS to drop into below code
708 */
709
710 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
647 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ 711 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
648 /* Linux handles an unsolicited DID_RESET better 712 /* Linux handles an unsolicited DID_RESET better
649 * than an unsolicited DID_ABORT. 713 * than an unsolicited DID_ABORT.
@@ -658,7 +722,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
658 sc->result=DID_SOFT_ERROR << 16; 722 sc->result=DID_SOFT_ERROR << 16;
659 else /* Sufficient data transfer occurred */ 723 else /* Sufficient data transfer occurred */
660 sc->result = (DID_OK << 16) | scsi_status; 724 sc->result = (DID_OK << 16) | scsi_status;
661 dreplyprintk((KERN_NOTICE 725 dreplyprintk((KERN_NOTICE
662 "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id)); 726 "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id));
663 break; 727 break;
664 728
@@ -784,8 +848,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
784 sc->request_bufflen, sc->sc_data_direction); 848 sc->request_bufflen, sc->sc_data_direction);
785 } 849 }
786 850
787 hd->ScsiLookup[req_idx] = NULL;
788
789 sc->scsi_done(sc); /* Issue the command callback */ 851 sc->scsi_done(sc); /* Issue the command callback */
790 852
791 /* Free Chain buffers */ 853 /* Free Chain buffers */
@@ -827,9 +889,17 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
827 dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n", 889 dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n",
828 mf, SCpnt)); 890 mf, SCpnt));
829 891
892 /* Free Chain buffers */
893 mptscsih_freeChainBuffers(ioc, ii);
894
895 /* Free Message frames */
896 mpt_free_msg_frame(ioc, mf);
897
898 if ((unsigned char *)mf != SCpnt->host_scribble)
899 continue;
900
830 /* Set status, free OS resources (SG DMA buffers) 901 /* Set status, free OS resources (SG DMA buffers)
831 * Do OS callback 902 * Do OS callback
832 * Free driver resources (chain, msg buffers)
833 */ 903 */
834 if (SCpnt->use_sg) { 904 if (SCpnt->use_sg) {
835 pci_unmap_sg(ioc->pcidev, 905 pci_unmap_sg(ioc->pcidev,
@@ -845,12 +915,6 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
845 SCpnt->result = DID_RESET << 16; 915 SCpnt->result = DID_RESET << 16;
846 SCpnt->host_scribble = NULL; 916 SCpnt->host_scribble = NULL;
847 917
848 /* Free Chain buffers */
849 mptscsih_freeChainBuffers(ioc, ii);
850
851 /* Free Message frames */
852 mpt_free_msg_frame(ioc, mf);
853
854 SCpnt->scsi_done(SCpnt); /* Issue the command callback */ 918 SCpnt->scsi_done(SCpnt); /* Issue the command callback */
855 } 919 }
856 } 920 }
@@ -887,10 +951,10 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
887 if ((sc = hd->ScsiLookup[ii]) != NULL) { 951 if ((sc = hd->ScsiLookup[ii]) != NULL) {
888 952
889 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii); 953 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
890 954 if (mf == NULL)
955 continue;
891 dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n", 956 dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n",
892 hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1])); 957 hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1]));
893
894 if ((mf->TargetID != ((u8)vdevice->vtarget->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun))) 958 if ((mf->TargetID != ((u8)vdevice->vtarget->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun)))
895 continue; 959 continue;
896 960
@@ -899,6 +963,8 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
899 hd->ScsiLookup[ii] = NULL; 963 hd->ScsiLookup[ii] = NULL;
900 mptscsih_freeChainBuffers(hd->ioc, ii); 964 mptscsih_freeChainBuffers(hd->ioc, ii);
901 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf); 965 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
966 if ((unsigned char *)mf != sc->host_scribble)
967 continue;
902 if (sc->use_sg) { 968 if (sc->use_sg) {
903 pci_unmap_sg(hd->ioc->pcidev, 969 pci_unmap_sg(hd->ioc->pcidev,
904 (struct scatterlist *) sc->request_buffer, 970 (struct scatterlist *) sc->request_buffer,
@@ -1341,8 +1407,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1341 goto fail; 1407 goto fail;
1342 } 1408 }
1343 1409
1410 SCpnt->host_scribble = (unsigned char *)mf;
1344 hd->ScsiLookup[my_idx] = SCpnt; 1411 hd->ScsiLookup[my_idx] = SCpnt;
1345 SCpnt->host_scribble = NULL;
1346 1412
1347 mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf); 1413 mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf);
1348 dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n", 1414 dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n",
@@ -1529,6 +1595,12 @@ mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, in
1529 rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP); 1595 rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP);
1530 } 1596 }
1531 1597
1598 /*
1599 * Check IOCStatus from TM reply message
1600 */
1601 if (hd->tm_iocstatus != MPI_IOCSTATUS_SUCCESS)
1602 rc = FAILED;
1603
1532 dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc)); 1604 dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc));
1533 1605
1534 return rc; 1606 return rc;
@@ -1654,6 +1726,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1654 int scpnt_idx; 1726 int scpnt_idx;
1655 int retval; 1727 int retval;
1656 VirtDevice *vdev; 1728 VirtDevice *vdev;
1729 ulong sn = SCpnt->serial_number;
1657 1730
1658 /* If we can't locate our host adapter structure, return FAILED status. 1731 /* If we can't locate our host adapter structure, return FAILED status.
1659 */ 1732 */
@@ -1707,6 +1780,11 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1707 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun, 1780 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun,
1708 ctx2abort, mptscsih_get_tm_timeout(hd->ioc)); 1781 ctx2abort, mptscsih_get_tm_timeout(hd->ioc));
1709 1782
1783 if (SCPNT_TO_LOOKUP_IDX(SCpnt) == scpnt_idx &&
1784 SCpnt->serial_number == sn) {
1785 retval = FAILED;
1786 }
1787
1710 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n", 1788 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
1711 hd->ioc->name, 1789 hd->ioc->name,
1712 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1790 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
@@ -2023,6 +2101,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m
2023 DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply); 2101 DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply);
2024 2102
2025 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; 2103 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2104 hd->tm_iocstatus = iocstatus;
2026 dtmprintk((MYIOC_s_WARN_FMT " SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n", 2105 dtmprintk((MYIOC_s_WARN_FMT " SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n",
2027 ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo))); 2106 ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo)));
2028 /* Error? (anything non-zero?) */ 2107 /* Error? (anything non-zero?) */
@@ -2401,6 +2480,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2401 ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12]; 2480 ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12];
2402 2481
2403 ioc->eventContext++; 2482 ioc->eventContext++;
2483 if (hd->ioc->pcidev->vendor ==
2484 PCI_VENDOR_ID_IBM) {
2485 mptscsih_issue_sep_command(hd->ioc,
2486 vdev->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
2487 vdev->vtarget->tflags |=
2488 MPT_TARGET_FLAGS_LED_ON;
2489 }
2404 } 2490 }
2405 } 2491 }
2406 } else { 2492 } else {
@@ -2409,7 +2495,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2409 } 2495 }
2410} 2496}
2411 2497
2412static u32 2498static int
2413SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc) 2499SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc)
2414{ 2500{
2415 MPT_SCSI_HOST *hd; 2501 MPT_SCSI_HOST *hd;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 0a1ff762205f..e4cc3dd5fc9f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -83,10 +83,6 @@ static int mpt_saf_te = MPTSCSIH_SAF_TE;
83module_param(mpt_saf_te, int, 0); 83module_param(mpt_saf_te, int, 0);
84MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)"); 84MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)");
85 85
86static int mpt_pq_filter = 0;
87module_param(mpt_pq_filter, int, 0);
88MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
89
90static void mptspi_write_offset(struct scsi_target *, int); 86static void mptspi_write_offset(struct scsi_target *, int);
91static void mptspi_write_width(struct scsi_target *, int); 87static void mptspi_write_width(struct scsi_target *, int);
92static int mptspi_write_spi_device_pg1(struct scsi_target *, 88static int mptspi_write_spi_device_pg1(struct scsi_target *,
@@ -1047,14 +1043,12 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1047 hd->timer.function = mptscsih_timer_expired; 1043 hd->timer.function = mptscsih_timer_expired;
1048 1044
1049 ioc->spi_data.Saf_Te = mpt_saf_te; 1045 ioc->spi_data.Saf_Te = mpt_saf_te;
1050 hd->mpt_pq_filter = mpt_pq_filter;
1051 1046
1052 hd->negoNvram = MPT_SCSICFG_USE_NVRAM; 1047 hd->negoNvram = MPT_SCSICFG_USE_NVRAM;
1053 ddvprintk((MYIOC_s_INFO_FMT 1048 ddvprintk((MYIOC_s_INFO_FMT
1054 "saf_te %x mpt_pq_filter %x\n", 1049 "saf_te %x\n",
1055 ioc->name, 1050 ioc->name,
1056 mpt_saf_te, 1051 mpt_saf_te));
1057 mpt_pq_filter));
1058 ioc->spi_data.noQas = 0; 1052 ioc->spi_data.noQas = 0;
1059 1053
1060 init_waitqueue_head(&hd->scandv_waitq); 1054 init_waitqueue_head(&hd->scandv_waitq);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 36d511729f71..2146cf74425e 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -132,6 +132,7 @@ static int __init dummy_init_module(void)
132 for (i = 0; i < numdummies && !err; i++) 132 for (i = 0; i < numdummies && !err; i++)
133 err = dummy_init_one(i); 133 err = dummy_init_one(i);
134 if (err) { 134 if (err) {
135 i--;
135 while (--i >= 0) 136 while (--i >= 0)
136 dummy_free_one(i); 137 dummy_free_one(i);
137 } 138 }
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index f411bbb44f86..d304297c496c 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -110,6 +110,9 @@ struct e1000_adapter;
110#define E1000_MIN_RXD 80 110#define E1000_MIN_RXD 80
111#define E1000_MAX_82544_RXD 4096 111#define E1000_MAX_82544_RXD 4096
112 112
113/* this is the size past which hardware will drop packets when setting LPE=0 */
114#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
115
113/* Supported Rx Buffer Sizes */ 116/* Supported Rx Buffer Sizes */
114#define E1000_RXBUFFER_128 128 /* Used for packet split */ 117#define E1000_RXBUFFER_128 128 /* Used for packet split */
115#define E1000_RXBUFFER_256 256 /* Used for packet split */ 118#define E1000_RXBUFFER_256 256 /* Used for packet split */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6d3d41934503..da62db897426 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "7.1.9-k2"DRIVERNAPI 39#define DRV_VERSION "7.1.9-k4"DRIVERNAPI
40char e1000_driver_version[] = DRV_VERSION; 40char e1000_driver_version[] = DRV_VERSION;
41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -1068,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
1068 1068
1069 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1069 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1070 1070
1071 adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; 1071 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1072 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; 1072 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1073 hw->max_frame_size = netdev->mtu + 1073 hw->max_frame_size = netdev->mtu +
1074 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 1074 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
@@ -3148,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3148 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3148 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3149 3149
3150 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3150 /* adjust allocation if LPE protects us, and we aren't using SBP */
3151#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
3152 if (!adapter->hw.tbi_compatibility_on && 3151 if (!adapter->hw.tbi_compatibility_on &&
3153 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || 3152 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3154 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3153 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
@@ -3387,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3387 E1000_WRITE_REG(hw, IMC, ~0); 3386 E1000_WRITE_REG(hw, IMC, ~0);
3388 E1000_WRITE_FLUSH(hw); 3387 E1000_WRITE_FLUSH(hw);
3389 } 3388 }
3390 if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) 3389 if (likely(netif_rx_schedule_prep(netdev)))
3391 __netif_rx_schedule(&adapter->polling_netdev[0]); 3390 __netif_rx_schedule(netdev);
3392 else 3391 else
3393 e1000_irq_enable(adapter); 3392 e1000_irq_enable(adapter);
3394#else 3393#else
@@ -3431,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3431{ 3430{
3432 struct e1000_adapter *adapter; 3431 struct e1000_adapter *adapter;
3433 int work_to_do = min(*budget, poll_dev->quota); 3432 int work_to_do = min(*budget, poll_dev->quota);
3434 int tx_cleaned = 0, i = 0, work_done = 0; 3433 int tx_cleaned = 0, work_done = 0;
3435 3434
3436 /* Must NOT use netdev_priv macro here. */ 3435 /* Must NOT use netdev_priv macro here. */
3437 adapter = poll_dev->priv; 3436 adapter = poll_dev->priv;
3438 3437
3439 /* Keep link state information with original netdev */ 3438 /* Keep link state information with original netdev */
3440 if (!netif_carrier_ok(adapter->netdev)) 3439 if (!netif_carrier_ok(poll_dev))
3441 goto quit_polling; 3440 goto quit_polling;
3442 3441
3443 while (poll_dev != &adapter->polling_netdev[i]) { 3442 /* e1000_clean is called per-cpu. This lock protects
3444 i++; 3443 * tx_ring[0] from being cleaned by multiple cpus
3445 BUG_ON(i == adapter->num_rx_queues); 3444 * simultaneously. A failure obtaining the lock means
3445 * tx_ring[0] is currently being cleaned anyway. */
3446 if (spin_trylock(&adapter->tx_queue_lock)) {
3447 tx_cleaned = e1000_clean_tx_irq(adapter,
3448 &adapter->tx_ring[0]);
3449 spin_unlock(&adapter->tx_queue_lock);
3446 } 3450 }
3447 3451
3448 if (likely(adapter->num_tx_queues == 1)) { 3452 adapter->clean_rx(adapter, &adapter->rx_ring[0],
3449 /* e1000_clean is called per-cpu. This lock protects
3450 * tx_ring[0] from being cleaned by multiple cpus
3451 * simultaneously. A failure obtaining the lock means
3452 * tx_ring[0] is currently being cleaned anyway. */
3453 if (spin_trylock(&adapter->tx_queue_lock)) {
3454 tx_cleaned = e1000_clean_tx_irq(adapter,
3455 &adapter->tx_ring[0]);
3456 spin_unlock(&adapter->tx_queue_lock);
3457 }
3458 } else
3459 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3460
3461 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3462 &work_done, work_to_do); 3453 &work_done, work_to_do);
3463 3454
3464 *budget -= work_done; 3455 *budget -= work_done;
@@ -3466,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3466 3457
3467 /* If no Tx and not enough Rx work done, exit the polling mode */ 3458 /* If no Tx and not enough Rx work done, exit the polling mode */
3468 if ((!tx_cleaned && (work_done == 0)) || 3459 if ((!tx_cleaned && (work_done == 0)) ||
3469 !netif_running(adapter->netdev)) { 3460 !netif_running(poll_dev)) {
3470quit_polling: 3461quit_polling:
3471 netif_rx_complete(poll_dev); 3462 netif_rx_complete(poll_dev);
3472 e1000_irq_enable(adapter); 3463 e1000_irq_enable(adapter);
@@ -3681,6 +3672,9 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3681 3672
3682 length = le16_to_cpu(rx_desc->length); 3673 length = le16_to_cpu(rx_desc->length);
3683 3674
3675 /* adjust length to remove Ethernet CRC */
3676 length -= 4;
3677
3684 if (unlikely(!(status & E1000_RXD_STAT_EOP))) { 3678 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
3685 /* All receives must fit into a single buffer */ 3679 /* All receives must fit into a single buffer */
3686 E1000_DBG("%s: Receive packet consumed multiple" 3680 E1000_DBG("%s: Receive packet consumed multiple"
@@ -3885,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3885 pci_dma_sync_single_for_device(pdev, 3879 pci_dma_sync_single_for_device(pdev,
3886 ps_page_dma->ps_page_dma[0], 3880 ps_page_dma->ps_page_dma[0],
3887 PAGE_SIZE, PCI_DMA_FROMDEVICE); 3881 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3882 /* remove the CRC */
3883 l1 -= 4;
3888 skb_put(skb, l1); 3884 skb_put(skb, l1);
3889 length += l1;
3890 goto copydone; 3885 goto copydone;
3891 } /* if */ 3886 } /* if */
3892 } 3887 }
@@ -3905,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3905 skb->truesize += length; 3900 skb->truesize += length;
3906 } 3901 }
3907 3902
3903 /* strip the ethernet crc, problem is we're using pages now so
3904 * this whole operation can get a little cpu intensive */
3905 pskb_trim(skb, skb->len - 4);
3906
3908copydone: 3907copydone:
3909 e1000_rx_checksum(adapter, staterr, 3908 e1000_rx_checksum(adapter, staterr,
3910 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); 3909 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
@@ -4752,6 +4751,7 @@ static void
4752e1000_netpoll(struct net_device *netdev) 4751e1000_netpoll(struct net_device *netdev)
4753{ 4752{
4754 struct e1000_adapter *adapter = netdev_priv(netdev); 4753 struct e1000_adapter *adapter = netdev_priv(netdev);
4754
4755 disable_irq(adapter->pdev->irq); 4755 disable_irq(adapter->pdev->irq);
4756 e1000_intr(adapter->pdev->irq, netdev, NULL); 4756 e1000_intr(adapter->pdev->irq, netdev, NULL);
4757 e1000_clean_tx_irq(adapter, adapter->tx_ring); 4757 e1000_clean_tx_irq(adapter, adapter->tx_ring);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 3a42afab5036..43e3f33ed5e2 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -271,6 +271,7 @@ static int __init ifb_init_module(void)
271 for (i = 0; i < numifbs && !err; i++) 271 for (i = 0; i < numifbs && !err; i++)
272 err = ifb_init_one(i); 272 err = ifb_init_one(i);
273 if (err) { 273 if (err) {
274 i--;
274 while (--i >= 0) 275 while (--i >= 0)
275 ifb_free_one(i); 276 ifb_free_one(i);
276 } 277 }
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 07ca9480a6fe..c3e52c806b13 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -620,7 +620,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
620 return -ENXIO; 620 return -ENXIO;
621 } 621 }
622 dev_info(&mgp->pdev->dev, "handoff confirmed\n"); 622 dev_info(&mgp->pdev->dev, "handoff confirmed\n");
623 myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); 623 myri10ge_dummy_rdma(mgp, 1);
624 624
625 return 0; 625 return 0;
626} 626}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 82200bfaa8ed..7de9a07b2ac2 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev,
516/* Chip internal frequency for clock calculations */ 516/* Chip internal frequency for clock calculations */
517static inline u32 hwkhz(const struct skge_hw *hw) 517static inline u32 hwkhz(const struct skge_hw *hw)
518{ 518{
519 if (hw->chip_id == CHIP_ID_GENESIS) 519 return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
520 return 53215; /* or: 53.125 MHz */
521 else
522 return 78215; /* or: 78.125 MHz */
523} 520}
524 521
525/* Chip HZ to microseconds */ 522/* Chip HZ to microseconds */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d98f28c34e5c..de91609ca112 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.4" 53#define DRV_VERSION "1.5"
54#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
55 55
56/* 56/*
@@ -2204,9 +2204,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2204 int work_done = 0; 2204 int work_done = 0;
2205 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2205 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2206 2206
2207 if (!~status)
2208 goto out;
2209
2210 if (status & Y2_IS_HW_ERR) 2207 if (status & Y2_IS_HW_ERR)
2211 sky2_hw_intr(hw); 2208 sky2_hw_intr(hw);
2212 2209
@@ -2243,7 +2240,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2243 2240
2244 if (sky2_more_work(hw)) 2241 if (sky2_more_work(hw))
2245 return 1; 2242 return 1;
2246out: 2243
2247 netif_rx_complete(dev0); 2244 netif_rx_complete(dev0);
2248 2245
2249 sky2_read32(hw, B0_Y2_SP_LISR); 2246 sky2_read32(hw, B0_Y2_SP_LISR);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index fb1d5a8a45cf..647f62e9707d 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
84 * 84 *
85 * returns the content of the specified SMMIO register. 85 * returns the content of the specified SMMIO register.
86 */ 86 */
87static u32 87static inline u32
88spider_net_read_reg(struct spider_net_card *card, u32 reg) 88spider_net_read_reg(struct spider_net_card *card, u32 reg)
89{ 89{
90 u32 value; 90 u32 value;
@@ -101,7 +101,7 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg)
101 * @reg: register to write to 101 * @reg: register to write to
102 * @value: value to write into the specified SMMIO register 102 * @value: value to write into the specified SMMIO register
103 */ 103 */
104static void 104static inline void
105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) 105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
106{ 106{
107 value = cpu_to_le32(value); 107 value = cpu_to_le32(value);
@@ -259,39 +259,10 @@ spider_net_get_mac_address(struct net_device *netdev)
259 * 259 *
260 * returns the status as in the dmac_cmd_status field of the descriptor 260 * returns the status as in the dmac_cmd_status field of the descriptor
261 */ 261 */
262static enum spider_net_descr_status 262static inline int
263spider_net_get_descr_status(struct spider_net_descr *descr) 263spider_net_get_descr_status(struct spider_net_descr *descr)
264{ 264{
265 u32 cmd_status; 265 return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
266
267 cmd_status = descr->dmac_cmd_status;
268 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
269 /* no need to mask out any bits, as cmd_status is 32 bits wide only
270 * (and unsigned) */
271 return cmd_status;
272}
273
274/**
275 * spider_net_set_descr_status -- sets the status of a descriptor
276 * @descr: descriptor to change
277 * @status: status to set in the descriptor
278 *
279 * changes the status to the specified value. Doesn't change other bits
280 * in the status
281 */
282static void
283spider_net_set_descr_status(struct spider_net_descr *descr,
284 enum spider_net_descr_status status)
285{
286 u32 cmd_status;
287 /* read the status */
288 cmd_status = descr->dmac_cmd_status;
289 /* clean the upper 4 bits */
290 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
291 /* add the status to it */
292 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
293 /* and write it back */
294 descr->dmac_cmd_status = cmd_status;
295} 266}
296 267
297/** 268/**
@@ -328,24 +299,23 @@ spider_net_free_chain(struct spider_net_card *card,
328static int 299static int
329spider_net_init_chain(struct spider_net_card *card, 300spider_net_init_chain(struct spider_net_card *card,
330 struct spider_net_descr_chain *chain, 301 struct spider_net_descr_chain *chain,
331 struct spider_net_descr *start_descr, int no) 302 struct spider_net_descr *start_descr,
303 int direction, int no)
332{ 304{
333 int i; 305 int i;
334 struct spider_net_descr *descr; 306 struct spider_net_descr *descr;
335 dma_addr_t buf; 307 dma_addr_t buf;
336 308
337 atomic_set(&card->rx_chain_refill,0);
338
339 descr = start_descr; 309 descr = start_descr;
340 memset(descr, 0, sizeof(*descr) * no); 310 memset(descr, 0, sizeof(*descr) * no);
341 311
342 /* set up the hardware pointers in each descriptor */ 312 /* set up the hardware pointers in each descriptor */
343 for (i=0; i<no; i++, descr++) { 313 for (i=0; i<no; i++, descr++) {
344 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 314 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
345 315
346 buf = pci_map_single(card->pdev, descr, 316 buf = pci_map_single(card->pdev, descr,
347 SPIDER_NET_DESCR_SIZE, 317 SPIDER_NET_DESCR_SIZE,
348 PCI_DMA_BIDIRECTIONAL); 318 direction);
349 319
350 if (buf == DMA_ERROR_CODE) 320 if (buf == DMA_ERROR_CODE)
351 goto iommu_error; 321 goto iommu_error;
@@ -360,10 +330,11 @@ spider_net_init_chain(struct spider_net_card *card,
360 start_descr->prev = descr-1; 330 start_descr->prev = descr-1;
361 331
362 descr = start_descr; 332 descr = start_descr;
363 for (i=0; i < no; i++, descr++) { 333 if (direction == PCI_DMA_FROMDEVICE)
364 descr->next_descr_addr = descr->next->bus_addr; 334 for (i=0; i < no; i++, descr++)
365 } 335 descr->next_descr_addr = descr->next->bus_addr;
366 336
337 spin_lock_init(&chain->lock);
367 chain->head = start_descr; 338 chain->head = start_descr;
368 chain->tail = start_descr; 339 chain->tail = start_descr;
369 340
@@ -375,7 +346,7 @@ iommu_error:
375 if (descr->bus_addr) 346 if (descr->bus_addr)
376 pci_unmap_single(card->pdev, descr->bus_addr, 347 pci_unmap_single(card->pdev, descr->bus_addr,
377 SPIDER_NET_DESCR_SIZE, 348 SPIDER_NET_DESCR_SIZE,
378 PCI_DMA_BIDIRECTIONAL); 349 direction);
379 return -ENOMEM; 350 return -ENOMEM;
380} 351}
381 352
@@ -396,7 +367,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
396 dev_kfree_skb(descr->skb); 367 dev_kfree_skb(descr->skb);
397 pci_unmap_single(card->pdev, descr->buf_addr, 368 pci_unmap_single(card->pdev, descr->buf_addr,
398 SPIDER_NET_MAX_FRAME, 369 SPIDER_NET_MAX_FRAME,
399 PCI_DMA_BIDIRECTIONAL); 370 PCI_DMA_FROMDEVICE);
400 } 371 }
401 descr = descr->next; 372 descr = descr->next;
402 } 373 }
@@ -446,15 +417,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
446 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 417 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
447 /* io-mmu-map the skb */ 418 /* io-mmu-map the skb */
448 buf = pci_map_single(card->pdev, descr->skb->data, 419 buf = pci_map_single(card->pdev, descr->skb->data,
449 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); 420 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
450 descr->buf_addr = buf; 421 descr->buf_addr = buf;
451 if (buf == DMA_ERROR_CODE) { 422 if (buf == DMA_ERROR_CODE) {
452 dev_kfree_skb_any(descr->skb); 423 dev_kfree_skb_any(descr->skb);
453 if (netif_msg_rx_err(card) && net_ratelimit()) 424 if (netif_msg_rx_err(card) && net_ratelimit())
454 pr_err("Could not iommu-map rx buffer\n"); 425 pr_err("Could not iommu-map rx buffer\n");
455 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 426 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
456 } else { 427 } else {
457 descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED; 428 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
429 SPIDER_NET_DMAC_NOINTR_COMPLETE;
458 } 430 }
459 431
460 return error; 432 return error;
@@ -468,7 +440,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
468 * chip by writing to the appropriate register. DMA is enabled in 440 * chip by writing to the appropriate register. DMA is enabled in
469 * spider_net_enable_rxdmac. 441 * spider_net_enable_rxdmac.
470 */ 442 */
471static void 443static inline void
472spider_net_enable_rxchtails(struct spider_net_card *card) 444spider_net_enable_rxchtails(struct spider_net_card *card)
473{ 445{
474 /* assume chain is aligned correctly */ 446 /* assume chain is aligned correctly */
@@ -483,7 +455,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
483 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN 455 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
484 * in the GDADMACCNTR register 456 * in the GDADMACCNTR register
485 */ 457 */
486static void 458static inline void
487spider_net_enable_rxdmac(struct spider_net_card *card) 459spider_net_enable_rxdmac(struct spider_net_card *card)
488{ 460{
489 wmb(); 461 wmb();
@@ -500,23 +472,24 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
500static void 472static void
501spider_net_refill_rx_chain(struct spider_net_card *card) 473spider_net_refill_rx_chain(struct spider_net_card *card)
502{ 474{
503 struct spider_net_descr_chain *chain; 475 struct spider_net_descr_chain *chain = &card->rx_chain;
504 476 unsigned long flags;
505 chain = &card->rx_chain;
506 477
507 /* one context doing the refill (and a second context seeing that 478 /* one context doing the refill (and a second context seeing that
508 * and omitting it) is ok. If called by NAPI, we'll be called again 479 * and omitting it) is ok. If called by NAPI, we'll be called again
509 * as spider_net_decode_one_descr is called several times. If some 480 * as spider_net_decode_one_descr is called several times. If some
510 * interrupt calls us, the NAPI is about to clean up anyway. */ 481 * interrupt calls us, the NAPI is about to clean up anyway. */
511 if (atomic_inc_return(&card->rx_chain_refill) == 1) 482 if (!spin_trylock_irqsave(&chain->lock, flags))
512 while (spider_net_get_descr_status(chain->head) == 483 return;
513 SPIDER_NET_DESCR_NOT_IN_USE) { 484
514 if (spider_net_prepare_rx_descr(card, chain->head)) 485 while (spider_net_get_descr_status(chain->head) ==
515 break; 486 SPIDER_NET_DESCR_NOT_IN_USE) {
516 chain->head = chain->head->next; 487 if (spider_net_prepare_rx_descr(card, chain->head))
517 } 488 break;
489 chain->head = chain->head->next;
490 }
518 491
519 atomic_dec(&card->rx_chain_refill); 492 spin_unlock_irqrestore(&chain->lock, flags);
520} 493}
521 494
522/** 495/**
@@ -554,111 +527,6 @@ error:
554} 527}
555 528
556/** 529/**
557 * spider_net_release_tx_descr - processes a used tx descriptor
558 * @card: card structure
559 * @descr: descriptor to release
560 *
561 * releases a used tx descriptor (unmapping, freeing of skb)
562 */
563static void
564spider_net_release_tx_descr(struct spider_net_card *card,
565 struct spider_net_descr *descr)
566{
567 struct sk_buff *skb;
568
569 /* unmap the skb */
570 skb = descr->skb;
571 pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
572 PCI_DMA_BIDIRECTIONAL);
573
574 dev_kfree_skb_any(skb);
575
576 /* set status to not used */
577 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
578}
579
580/**
581 * spider_net_release_tx_chain - processes sent tx descriptors
582 * @card: adapter structure
583 * @brutal: if set, don't care about whether descriptor seems to be in use
584 *
585 * returns 0 if the tx ring is empty, otherwise 1.
586 *
587 * spider_net_release_tx_chain releases the tx descriptors that spider has
588 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
589 * If some other context is calling this function, we return 1 so that we're
590 * scheduled again (if we were scheduled) and will not loose initiative.
591 */
592static int
593spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
594{
595 struct spider_net_descr_chain *tx_chain = &card->tx_chain;
596 enum spider_net_descr_status status;
597
598 if (atomic_inc_return(&card->tx_chain_release) != 1) {
599 atomic_dec(&card->tx_chain_release);
600 return 1;
601 }
602
603 for (;;) {
604 status = spider_net_get_descr_status(tx_chain->tail);
605 switch (status) {
606 case SPIDER_NET_DESCR_CARDOWNED:
607 if (!brutal)
608 goto out;
609 /* fallthrough, if we release the descriptors
610 * brutally (then we don't care about
611 * SPIDER_NET_DESCR_CARDOWNED) */
612 case SPIDER_NET_DESCR_RESPONSE_ERROR:
613 case SPIDER_NET_DESCR_PROTECTION_ERROR:
614 case SPIDER_NET_DESCR_FORCE_END:
615 if (netif_msg_tx_err(card))
616 pr_err("%s: forcing end of tx descriptor "
617 "with status x%02x\n",
618 card->netdev->name, status);
619 card->netdev_stats.tx_dropped++;
620 break;
621
622 case SPIDER_NET_DESCR_COMPLETE:
623 card->netdev_stats.tx_packets++;
624 card->netdev_stats.tx_bytes +=
625 tx_chain->tail->skb->len;
626 break;
627
628 default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */
629 goto out;
630 }
631 spider_net_release_tx_descr(card, tx_chain->tail);
632 tx_chain->tail = tx_chain->tail->next;
633 }
634out:
635 atomic_dec(&card->tx_chain_release);
636
637 netif_wake_queue(card->netdev);
638
639 if (status == SPIDER_NET_DESCR_CARDOWNED)
640 return 1;
641 return 0;
642}
643
644/**
645 * spider_net_cleanup_tx_ring - cleans up the TX ring
646 * @card: card structure
647 *
648 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
649 * interrupts to cleanup our TX ring) and returns sent packets to the stack
650 * by freeing them
651 */
652static void
653spider_net_cleanup_tx_ring(struct spider_net_card *card)
654{
655 if ( (spider_net_release_tx_chain(card, 0)) &&
656 (card->netdev->flags & IFF_UP) ) {
657 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
658 }
659}
660
661/**
662 * spider_net_get_multicast_hash - generates hash for multicast filter table 530 * spider_net_get_multicast_hash - generates hash for multicast filter table
663 * @addr: multicast address 531 * @addr: multicast address
664 * 532 *
@@ -761,97 +629,6 @@ spider_net_disable_rxdmac(struct spider_net_card *card)
761} 629}
762 630
763/** 631/**
764 * spider_net_stop - called upon ifconfig down
765 * @netdev: interface device structure
766 *
767 * always returns 0
768 */
769int
770spider_net_stop(struct net_device *netdev)
771{
772 struct spider_net_card *card = netdev_priv(netdev);
773
774 tasklet_kill(&card->rxram_full_tl);
775 netif_poll_disable(netdev);
776 netif_carrier_off(netdev);
777 netif_stop_queue(netdev);
778 del_timer_sync(&card->tx_timer);
779
780 /* disable/mask all interrupts */
781 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
782 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
783 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
784
785 /* free_irq(netdev->irq, netdev);*/
786 free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
787
788 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
789 SPIDER_NET_DMA_TX_FEND_VALUE);
790
791 /* turn off DMA, force end */
792 spider_net_disable_rxdmac(card);
793
794 /* release chains */
795 spider_net_release_tx_chain(card, 1);
796
797 spider_net_free_chain(card, &card->tx_chain);
798 spider_net_free_chain(card, &card->rx_chain);
799
800 return 0;
801}
802
803/**
804 * spider_net_get_next_tx_descr - returns the next available tx descriptor
805 * @card: device structure to get descriptor from
806 *
807 * returns the address of the next descriptor, or NULL if not available.
808 */
809static struct spider_net_descr *
810spider_net_get_next_tx_descr(struct spider_net_card *card)
811{
812 /* check, if head points to not-in-use descr */
813 if ( spider_net_get_descr_status(card->tx_chain.head) ==
814 SPIDER_NET_DESCR_NOT_IN_USE ) {
815 return card->tx_chain.head;
816 } else {
817 return NULL;
818 }
819}
820
821/**
822 * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field
823 * @descr: descriptor structure to fill out
824 * @skb: packet to consider
825 *
826 * fills out the command and status field of the descriptor structure,
827 * depending on hardware checksum settings.
828 */
829static void
830spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
831 struct sk_buff *skb)
832{
833 /* make sure the other fields in the descriptor are written */
834 wmb();
835
836 if (skb->ip_summed != CHECKSUM_HW) {
837 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
838 return;
839 }
840
841 /* is packet ip?
842 * if yes: tcp? udp? */
843 if (skb->protocol == htons(ETH_P_IP)) {
844 if (skb->nh.iph->protocol == IPPROTO_TCP)
845 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
846 else if (skb->nh.iph->protocol == IPPROTO_UDP)
847 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
848 else /* the stack should checksum non-tcp and non-udp
849 packets on his own: NETIF_F_IP_CSUM */
850 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
851 }
852}
853
854/**
855 * spider_net_prepare_tx_descr - fill tx descriptor with skb data 632 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
856 * @card: card structure 633 * @card: card structure
857 * @descr: descriptor structure to fill out 634 * @descr: descriptor structure to fill out
@@ -864,13 +641,12 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
864 */ 641 */
865static int 642static int
866spider_net_prepare_tx_descr(struct spider_net_card *card, 643spider_net_prepare_tx_descr(struct spider_net_card *card,
867 struct spider_net_descr *descr,
868 struct sk_buff *skb) 644 struct sk_buff *skb)
869{ 645{
646 struct spider_net_descr *descr = card->tx_chain.head;
870 dma_addr_t buf; 647 dma_addr_t buf;
871 648
872 buf = pci_map_single(card->pdev, skb->data, 649 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
873 skb->len, PCI_DMA_BIDIRECTIONAL);
874 if (buf == DMA_ERROR_CODE) { 650 if (buf == DMA_ERROR_CODE) {
875 if (netif_msg_tx_err(card) && net_ratelimit()) 651 if (netif_msg_tx_err(card) && net_ratelimit())
876 pr_err("could not iommu-map packet (%p, %i). " 652 pr_err("could not iommu-map packet (%p, %i). "
@@ -880,10 +656,101 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
880 656
881 descr->buf_addr = buf; 657 descr->buf_addr = buf;
882 descr->buf_size = skb->len; 658 descr->buf_size = skb->len;
659 descr->next_descr_addr = 0;
883 descr->skb = skb; 660 descr->skb = skb;
884 descr->data_status = 0; 661 descr->data_status = 0;
885 662
886 spider_net_set_txdescr_cmdstat(descr,skb); 663 descr->dmac_cmd_status =
664 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
665 if (skb->protocol == htons(ETH_P_IP))
666 switch (skb->nh.iph->protocol) {
667 case IPPROTO_TCP:
668 descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
669 break;
670 case IPPROTO_UDP:
671 descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
672 break;
673 }
674
675 descr->prev->next_descr_addr = descr->bus_addr;
676
677 return 0;
678}
679
680/**
681 * spider_net_release_tx_descr - processes a used tx descriptor
682 * @card: card structure
683 * @descr: descriptor to release
684 *
685 * releases a used tx descriptor (unmapping, freeing of skb)
686 */
687static inline void
688spider_net_release_tx_descr(struct spider_net_card *card)
689{
690 struct spider_net_descr *descr = card->tx_chain.tail;
691 struct sk_buff *skb;
692
693 card->tx_chain.tail = card->tx_chain.tail->next;
694 descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
695
696 /* unmap the skb */
697 skb = descr->skb;
698 pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
699 PCI_DMA_TODEVICE);
700 dev_kfree_skb_any(skb);
701}
702
703/**
704 * spider_net_release_tx_chain - processes sent tx descriptors
705 * @card: adapter structure
706 * @brutal: if set, don't care about whether descriptor seems to be in use
707 *
708 * returns 0 if the tx ring is empty, otherwise 1.
709 *
710 * spider_net_release_tx_chain releases the tx descriptors that spider has
711 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
712 * If some other context is calling this function, we return 1 so that we're
713 * scheduled again (if we were scheduled) and will not loose initiative.
714 */
715static int
716spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
717{
718 struct spider_net_descr_chain *chain = &card->tx_chain;
719 int status;
720
721 spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);
722
723 while (chain->tail != chain->head) {
724 status = spider_net_get_descr_status(chain->tail);
725 switch (status) {
726 case SPIDER_NET_DESCR_COMPLETE:
727 card->netdev_stats.tx_packets++;
728 card->netdev_stats.tx_bytes += chain->tail->skb->len;
729 break;
730
731 case SPIDER_NET_DESCR_CARDOWNED:
732 if (!brutal)
733 return 1;
734 /* fallthrough, if we release the descriptors
735 * brutally (then we don't care about
736 * SPIDER_NET_DESCR_CARDOWNED) */
737
738 case SPIDER_NET_DESCR_RESPONSE_ERROR:
739 case SPIDER_NET_DESCR_PROTECTION_ERROR:
740 case SPIDER_NET_DESCR_FORCE_END:
741 if (netif_msg_tx_err(card))
742 pr_err("%s: forcing end of tx descriptor "
743 "with status x%02x\n",
744 card->netdev->name, status);
745 card->netdev_stats.tx_errors++;
746 break;
747
748 default:
749 card->netdev_stats.tx_dropped++;
750 return 1;
751 }
752 spider_net_release_tx_descr(card);
753 }
887 754
888 return 0; 755 return 0;
889} 756}
@@ -896,18 +763,32 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
896 * spider_net_kick_tx_dma writes the current tx chain head as start address 763 * spider_net_kick_tx_dma writes the current tx chain head as start address
897 * of the tx descriptor chain and enables the transmission DMA engine 764 * of the tx descriptor chain and enables the transmission DMA engine
898 */ 765 */
899static void 766static inline void
900spider_net_kick_tx_dma(struct spider_net_card *card, 767spider_net_kick_tx_dma(struct spider_net_card *card)
901 struct spider_net_descr *descr)
902{ 768{
903 /* this is the only descriptor in the output chain. 769 struct spider_net_descr *descr;
904 * Enable TX DMA */
905 770
906 spider_net_write_reg(card, SPIDER_NET_GDTDCHA, 771 if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
907 descr->bus_addr); 772 SPIDER_NET_TX_DMA_EN)
773 goto out;
908 774
909 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 775 descr = card->tx_chain.tail;
910 SPIDER_NET_DMA_TX_VALUE); 776 for (;;) {
777 if (spider_net_get_descr_status(descr) ==
778 SPIDER_NET_DESCR_CARDOWNED) {
779 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
780 descr->bus_addr);
781 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
782 SPIDER_NET_DMA_TX_VALUE);
783 break;
784 }
785 if (descr == card->tx_chain.head)
786 break;
787 descr = descr->next;
788 }
789
790out:
791 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
911} 792}
912 793
913/** 794/**
@@ -915,47 +796,69 @@ spider_net_kick_tx_dma(struct spider_net_card *card,
915 * @skb: packet to send out 796 * @skb: packet to send out
916 * @netdev: interface device structure 797 * @netdev: interface device structure
917 * 798 *
918 * returns 0 on success, <0 on failure 799 * returns 0 on success, !0 on failure
919 */ 800 */
920static int 801static int
921spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) 802spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
922{ 803{
923 struct spider_net_card *card = netdev_priv(netdev); 804 struct spider_net_card *card = netdev_priv(netdev);
924 struct spider_net_descr *descr; 805 struct spider_net_descr_chain *chain = &card->tx_chain;
806 struct spider_net_descr *descr = chain->head;
807 unsigned long flags;
925 int result; 808 int result;
926 809
810 spin_lock_irqsave(&chain->lock, flags);
811
927 spider_net_release_tx_chain(card, 0); 812 spider_net_release_tx_chain(card, 0);
928 813
929 descr = spider_net_get_next_tx_descr(card); 814 if (chain->head->next == chain->tail->prev) {
815 card->netdev_stats.tx_dropped++;
816 result = NETDEV_TX_LOCKED;
817 goto out;
818 }
930 819
931 if (!descr) 820 if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {
932 goto error; 821 result = NETDEV_TX_LOCKED;
822 goto out;
823 }
933 824
934 result = spider_net_prepare_tx_descr(card, descr, skb); 825 if (spider_net_prepare_tx_descr(card, skb) != 0) {
935 if (result) 826 card->netdev_stats.tx_dropped++;
936 goto error; 827 result = NETDEV_TX_BUSY;
828 goto out;
829 }
830
831 result = NETDEV_TX_OK;
937 832
833 spider_net_kick_tx_dma(card);
938 card->tx_chain.head = card->tx_chain.head->next; 834 card->tx_chain.head = card->tx_chain.head->next;
939 835
940 if (spider_net_get_descr_status(descr->prev) != 836out:
941 SPIDER_NET_DESCR_CARDOWNED) { 837 spin_unlock_irqrestore(&chain->lock, flags);
942 /* make sure the current descriptor is in memory. Then 838 netif_wake_queue(netdev);
943 * kicking it on again makes sense, if the previous is not 839 return result;
944 * card-owned anymore. Check the previous descriptor twice 840}
945 * to omit an mb() in heavy traffic cases */
946 mb();
947 if (spider_net_get_descr_status(descr->prev) !=
948 SPIDER_NET_DESCR_CARDOWNED)
949 spider_net_kick_tx_dma(card, descr);
950 }
951 841
952 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); 842/**
843 * spider_net_cleanup_tx_ring - cleans up the TX ring
844 * @card: card structure
845 *
846 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
847 * interrupts to cleanup our TX ring) and returns sent packets to the stack
848 * by freeing them
849 */
850static void
851spider_net_cleanup_tx_ring(struct spider_net_card *card)
852{
853 unsigned long flags;
953 854
954 return NETDEV_TX_OK; 855 spin_lock_irqsave(&card->tx_chain.lock, flags);
955 856
956error: 857 if ((spider_net_release_tx_chain(card, 0) != 0) &&
957 card->netdev_stats.tx_dropped++; 858 (card->netdev->flags & IFF_UP))
958 return NETDEV_TX_BUSY; 859 spider_net_kick_tx_dma(card);
860
861 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
959} 862}
960 863
961/** 864/**
@@ -1002,7 +905,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1002 905
1003 /* unmap descriptor */ 906 /* unmap descriptor */
1004 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, 907 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
1005 PCI_DMA_BIDIRECTIONAL); 908 PCI_DMA_FROMDEVICE);
1006 909
1007 /* the cases we'll throw away the packet immediately */ 910 /* the cases we'll throw away the packet immediately */
1008 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { 911 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
@@ -1067,14 +970,11 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1067static int 970static int
1068spider_net_decode_one_descr(struct spider_net_card *card, int napi) 971spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1069{ 972{
1070 enum spider_net_descr_status status; 973 struct spider_net_descr_chain *chain = &card->rx_chain;
1071 struct spider_net_descr *descr; 974 struct spider_net_descr *descr = chain->tail;
1072 struct spider_net_descr_chain *chain; 975 int status;
1073 int result; 976 int result;
1074 977
1075 chain = &card->rx_chain;
1076 descr = chain->tail;
1077
1078 status = spider_net_get_descr_status(descr); 978 status = spider_net_get_descr_status(descr);
1079 979
1080 if (status == SPIDER_NET_DESCR_CARDOWNED) { 980 if (status == SPIDER_NET_DESCR_CARDOWNED) {
@@ -1103,7 +1003,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1103 card->netdev->name, status); 1003 card->netdev->name, status);
1104 card->netdev_stats.rx_dropped++; 1004 card->netdev_stats.rx_dropped++;
1105 pci_unmap_single(card->pdev, descr->buf_addr, 1005 pci_unmap_single(card->pdev, descr->buf_addr,
1106 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); 1006 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1107 dev_kfree_skb_irq(descr->skb); 1007 dev_kfree_skb_irq(descr->skb);
1108 goto refill; 1008 goto refill;
1109 } 1009 }
@@ -1119,7 +1019,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1119 /* ok, we've got a packet in descr */ 1019 /* ok, we've got a packet in descr */
1120 result = spider_net_pass_skb_up(descr, card, napi); 1020 result = spider_net_pass_skb_up(descr, card, napi);
1121refill: 1021refill:
1122 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 1022 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1123 /* change the descriptor state: */ 1023 /* change the descriptor state: */
1124 if (!napi) 1024 if (!napi)
1125 spider_net_refill_rx_chain(card); 1025 spider_net_refill_rx_chain(card);
@@ -1291,21 +1191,6 @@ spider_net_set_mac(struct net_device *netdev, void *p)
1291} 1191}
1292 1192
1293/** 1193/**
1294 * spider_net_enable_txdmac - enables a TX DMA controller
1295 * @card: card structure
1296 *
1297 * spider_net_enable_txdmac enables the TX DMA controller by setting the
1298 * descriptor chain tail address
1299 */
1300static void
1301spider_net_enable_txdmac(struct spider_net_card *card)
1302{
1303 /* assume chain is aligned correctly */
1304 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
1305 card->tx_chain.tail->bus_addr);
1306}
1307
1308/**
1309 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt 1194 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1310 * @card: card structure 1195 * @card: card structure
1311 * 1196 *
@@ -1653,7 +1538,6 @@ spider_net_enable_card(struct spider_net_card *card)
1653 { SPIDER_NET_GMRWOLCTRL, 0 }, 1538 { SPIDER_NET_GMRWOLCTRL, 0 },
1654 { SPIDER_NET_GTESTMD, 0x10000000 }, 1539 { SPIDER_NET_GTESTMD, 0x10000000 },
1655 { SPIDER_NET_GTTQMSK, 0x00400040 }, 1540 { SPIDER_NET_GTTQMSK, 0x00400040 },
1656 { SPIDER_NET_GTESTMD, 0 },
1657 1541
1658 { SPIDER_NET_GMACINTEN, 0 }, 1542 { SPIDER_NET_GMACINTEN, 0 },
1659 1543
@@ -1692,9 +1576,6 @@ spider_net_enable_card(struct spider_net_card *card)
1692 1576
1693 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); 1577 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1694 1578
1695 /* set chain tail adress for TX chain */
1696 spider_net_enable_txdmac(card);
1697
1698 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, 1579 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1699 SPIDER_NET_LENLMT_VALUE); 1580 SPIDER_NET_LENLMT_VALUE);
1700 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 1581 spider_net_write_reg(card, SPIDER_NET_GMACMODE,
@@ -1709,6 +1590,9 @@ spider_net_enable_card(struct spider_net_card *card)
1709 SPIDER_NET_INT1_MASK_VALUE); 1590 SPIDER_NET_INT1_MASK_VALUE);
1710 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 1591 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1711 SPIDER_NET_INT2_MASK_VALUE); 1592 SPIDER_NET_INT2_MASK_VALUE);
1593
1594 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1595 SPIDER_NET_GDTDCEIDIS);
1712} 1596}
1713 1597
1714/** 1598/**
@@ -1728,10 +1612,12 @@ spider_net_open(struct net_device *netdev)
1728 1612
1729 result = -ENOMEM; 1613 result = -ENOMEM;
1730 if (spider_net_init_chain(card, &card->tx_chain, 1614 if (spider_net_init_chain(card, &card->tx_chain,
1731 card->descr, tx_descriptors)) 1615 card->descr,
1616 PCI_DMA_TODEVICE, tx_descriptors))
1732 goto alloc_tx_failed; 1617 goto alloc_tx_failed;
1733 if (spider_net_init_chain(card, &card->rx_chain, 1618 if (spider_net_init_chain(card, &card->rx_chain,
1734 card->descr + tx_descriptors, rx_descriptors)) 1619 card->descr + tx_descriptors,
1620 PCI_DMA_FROMDEVICE, rx_descriptors))
1735 goto alloc_rx_failed; 1621 goto alloc_rx_failed;
1736 1622
1737 /* allocate rx skbs */ 1623 /* allocate rx skbs */
@@ -1938,7 +1824,7 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1938 /* empty sequencer data */ 1824 /* empty sequencer data */
1939 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; 1825 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1940 sequencer++) { 1826 sequencer++) {
1941 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1827 spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
1942 sequencer * 8, 0x0); 1828 sequencer * 8, 0x0);
1943 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { 1829 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1944 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1830 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
@@ -1955,6 +1841,49 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1955} 1841}
1956 1842
1957/** 1843/**
1844 * spider_net_stop - called upon ifconfig down
1845 * @netdev: interface device structure
1846 *
1847 * always returns 0
1848 */
1849int
1850spider_net_stop(struct net_device *netdev)
1851{
1852 struct spider_net_card *card = netdev_priv(netdev);
1853
1854 tasklet_kill(&card->rxram_full_tl);
1855 netif_poll_disable(netdev);
1856 netif_carrier_off(netdev);
1857 netif_stop_queue(netdev);
1858 del_timer_sync(&card->tx_timer);
1859
1860 /* disable/mask all interrupts */
1861 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
1862 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
1863 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
1864
1865 /* free_irq(netdev->irq, netdev);*/
1866 free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
1867
1868 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1869 SPIDER_NET_DMA_TX_FEND_VALUE);
1870
1871 /* turn off DMA, force end */
1872 spider_net_disable_rxdmac(card);
1873
1874 /* release chains */
1875 if (spin_trylock(&card->tx_chain.lock)) {
1876 spider_net_release_tx_chain(card, 1);
1877 spin_unlock(&card->tx_chain.lock);
1878 }
1879
1880 spider_net_free_chain(card, &card->tx_chain);
1881 spider_net_free_chain(card, &card->rx_chain);
1882
1883 return 0;
1884}
1885
1886/**
1958 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout 1887 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
1959 * function (to be called not under interrupt status) 1888 * function (to be called not under interrupt status)
1960 * @data: data, is interface device structure 1889 * @data: data, is interface device structure
@@ -1982,7 +1911,7 @@ spider_net_tx_timeout_task(void *data)
1982 goto out; 1911 goto out;
1983 1912
1984 spider_net_open(netdev); 1913 spider_net_open(netdev);
1985 spider_net_kick_tx_dma(card, card->tx_chain.head); 1914 spider_net_kick_tx_dma(card);
1986 netif_device_attach(netdev); 1915 netif_device_attach(netdev);
1987 1916
1988out: 1917out:
@@ -2065,7 +1994,6 @@ spider_net_setup_netdev(struct spider_net_card *card)
2065 1994
2066 pci_set_drvdata(card->pdev, netdev); 1995 pci_set_drvdata(card->pdev, netdev);
2067 1996
2068 atomic_set(&card->tx_chain_release,0);
2069 card->rxram_full_tl.data = (unsigned long) card; 1997 card->rxram_full_tl.data = (unsigned long) card;
2070 card->rxram_full_tl.func = 1998 card->rxram_full_tl.func =
2071 (void (*)(unsigned long)) spider_net_handle_rxram_full; 1999 (void (*)(unsigned long)) spider_net_handle_rxram_full;
@@ -2079,7 +2007,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
2079 2007
2080 spider_net_setup_netdev_ops(netdev); 2008 spider_net_setup_netdev_ops(netdev);
2081 2009
2082 netdev->features = NETIF_F_HW_CSUM; 2010 netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
2083 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2011 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2084 * NETIF_F_HW_VLAN_FILTER */ 2012 * NETIF_F_HW_VLAN_FILTER */
2085 2013
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 3b8d951cf73c..f6dcf180ae3d 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -208,7 +208,10 @@ extern char spider_net_driver_name[];
208#define SPIDER_NET_DMA_RX_VALUE 0x80000000 208#define SPIDER_NET_DMA_RX_VALUE 0x80000000
209#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 209#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
210/* to set TX_DMA_EN */ 210/* to set TX_DMA_EN */
211#define SPIDER_NET_DMA_TX_VALUE 0x80000000 211#define SPIDER_NET_TX_DMA_EN 0x80000000
212#define SPIDER_NET_GDTDCEIDIS 0x00000002
213#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
214 SPIDER_NET_GDTDCEIDIS
212#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 215#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
213 216
214/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ 217/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
@@ -329,55 +332,23 @@ enum spider_net_int2_status {
329 (~SPIDER_NET_TXINT) & \ 332 (~SPIDER_NET_TXINT) & \
330 (~SPIDER_NET_RXINT) ) 333 (~SPIDER_NET_RXINT) )
331 334
332#define SPIDER_NET_GPREXEC 0x80000000 335#define SPIDER_NET_GPREXEC 0x80000000
333#define SPIDER_NET_GPRDAT_MASK 0x0000ffff 336#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
334 337
335/* descriptor bits 338#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
336 * 339#define SPIDER_NET_DMAC_NOCS 0x00040000
337 * 1010 descriptor ready 340#define SPIDER_NET_DMAC_TCP 0x00020000
338 * 0 descr in middle of chain 341#define SPIDER_NET_DMAC_UDP 0x00030000
339 * 000 fixed to 0 342#define SPIDER_NET_TXDCEST 0x08000000
340 * 343
341 * 0 no interrupt on completion 344#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
342 * 000 fixed to 0 345#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
343 * 1 no ipsec processing 346#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
344 * 1 last descriptor for this frame 347#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
345 * 00 no checksum 348#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
346 * 10 tcp checksum 349#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
347 * 11 udp checksum 350#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
348 * 351#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
349 * 00 fixed to 0
350 * 0 fixed to 0
351 * 0 no interrupt on response errors
352 * 0 no interrupt on invalid descr
353 * 0 no interrupt on dma process termination
354 * 0 no interrupt on descr chain end
355 * 0 no interrupt on descr complete
356 *
357 * 000 fixed to 0
358 * 0 response error interrupt status
359 * 0 invalid descr status
360 * 0 dma termination status
361 * 0 descr chain end status
362 * 0 descr complete status */
363#define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000
364#define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000
365#define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000
366#define SPIDER_NET_DESCR_IND_PROC_SHIFT 28
367#define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff
368
369/* descr ready, descr is in middle of chain, get interrupt on completion */
370#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
371
372enum spider_net_descr_status {
373 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
374 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
375 SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
376 SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */
377 SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
378 SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
379 SPIDER_NET_DESCR_NOT_IN_USE /* any other value */
380};
381 352
382struct spider_net_descr { 353struct spider_net_descr {
383 /* as defined by the hardware */ 354 /* as defined by the hardware */
@@ -398,7 +369,7 @@ struct spider_net_descr {
398} __attribute__((aligned(32))); 369} __attribute__((aligned(32)));
399 370
400struct spider_net_descr_chain { 371struct spider_net_descr_chain {
401 /* we walk from tail to head */ 372 spinlock_t lock;
402 struct spider_net_descr *head; 373 struct spider_net_descr *head;
403 struct spider_net_descr *tail; 374 struct spider_net_descr *tail;
404}; 375};
@@ -453,8 +424,6 @@ struct spider_net_card {
453 424
454 struct spider_net_descr_chain tx_chain; 425 struct spider_net_descr_chain tx_chain;
455 struct spider_net_descr_chain rx_chain; 426 struct spider_net_descr_chain rx_chain;
456 atomic_t rx_chain_refill;
457 atomic_t tx_chain_release;
458 427
459 struct net_device_stats netdev_stats; 428 struct net_device_stats netdev_stats;
460 429
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 8673fd4c08c7..c6f5bc3c042f 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3255,12 +3255,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
3255} 3255}
3256 3256
3257static struct pci_device_id happymeal_pci_ids[] = { 3257static struct pci_device_id happymeal_pci_ids[] = {
3258 { 3258 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3259 .vendor = PCI_VENDOR_ID_SUN,
3260 .device = PCI_DEVICE_ID_SUN_HAPPYMEAL,
3261 .subvendor = PCI_ANY_ID,
3262 .subdevice = PCI_ANY_ID,
3263 },
3264 { } /* Terminating entry */ 3259 { } /* Terminating entry */
3265}; 3260};
3266 3261
@@ -3275,7 +3270,7 @@ static struct pci_driver hme_pci_driver = {
3275 3270
3276static int __init happy_meal_pci_init(void) 3271static int __init happy_meal_pci_init(void)
3277{ 3272{
3278 return pci_module_init(&hme_pci_driver); 3273 return pci_register_driver(&hme_pci_driver);
3279} 3274}
3280 3275
3281static void happy_meal_pci_exit(void) 3276static void happy_meal_pci_exit(void)
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 1ef9fd39a79a..0e3fdf7c6dd3 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void)
1537{ 1537{
1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || 1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) { 1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) {
1540 memset(&sun4_sdev, 0, sizeof(sdev)); 1540 memset(&sun4_sdev, 0, sizeof(struct sbus_dev));
1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; 1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
1542 sun4_sdev.irqs[0] = 6; 1542 sun4_sdev.irqs[0] = 6;
1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); 1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL);
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void)
1547 1547
1548static int __exit sunlance_sun4_remove(void) 1548static int __exit sunlance_sun4_remove(void)
1549{ 1549{
1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); 1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
1551 struct net_device *net_dev = lp->dev; 1551 struct net_device *net_dev = lp->dev;
1552 1552
1553 unregister_netdevice(net_dev); 1553 unregister_netdevice(net_dev);
1554 1554
1555 lance_free_hwresources(root_lance_dev); 1555 lance_free_hwresources(lp);
1556 1556
1557 free_netdev(net_dev); 1557 free_netdev(net_dev);
1558 1558
1559 dev_set_drvdata(&sun4_sdev->dev, NULL); 1559 dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL);
1560 1560
1561 return 0; 1561 return 0;
1562} 1562}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ce6f3be86da0..1b8138f641e3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.62" 71#define DRV_MODULE_VERSION "3.63"
72#define DRV_MODULE_RELDATE "June 30, 2006" 72#define DRV_MODULE_RELDATE "July 25, 2006"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -3590,6 +3590,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3590static int tg3_init_hw(struct tg3 *, int); 3590static int tg3_init_hw(struct tg3 *, int);
3591static int tg3_halt(struct tg3 *, int, int); 3591static int tg3_halt(struct tg3 *, int, int);
3592 3592
3593/* Restart hardware after configuration changes, self-test, etc.
3594 * Invoked with tp->lock held.
3595 */
3596static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3597{
3598 int err;
3599
3600 err = tg3_init_hw(tp, reset_phy);
3601 if (err) {
3602 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3603 "aborting.\n", tp->dev->name);
3604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3605 tg3_full_unlock(tp);
3606 del_timer_sync(&tp->timer);
3607 tp->irq_sync = 0;
3608 netif_poll_enable(tp->dev);
3609 dev_close(tp->dev);
3610 tg3_full_lock(tp, 0);
3611 }
3612 return err;
3613}
3614
3593#ifdef CONFIG_NET_POLL_CONTROLLER 3615#ifdef CONFIG_NET_POLL_CONTROLLER
3594static void tg3_poll_controller(struct net_device *dev) 3616static void tg3_poll_controller(struct net_device *dev)
3595{ 3617{
@@ -3630,13 +3652,15 @@ static void tg3_reset_task(void *_data)
3630 } 3652 }
3631 3653
3632 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3654 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3633 tg3_init_hw(tp, 1); 3655 if (tg3_init_hw(tp, 1))
3656 goto out;
3634 3657
3635 tg3_netif_start(tp); 3658 tg3_netif_start(tp);
3636 3659
3637 if (restart_timer) 3660 if (restart_timer)
3638 mod_timer(&tp->timer, jiffies + 1); 3661 mod_timer(&tp->timer, jiffies + 1);
3639 3662
3663out:
3640 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; 3664 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3641 3665
3642 tg3_full_unlock(tp); 3666 tg3_full_unlock(tp);
@@ -4124,6 +4148,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4124static int tg3_change_mtu(struct net_device *dev, int new_mtu) 4148static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4125{ 4149{
4126 struct tg3 *tp = netdev_priv(dev); 4150 struct tg3 *tp = netdev_priv(dev);
4151 int err;
4127 4152
4128 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) 4153 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4129 return -EINVAL; 4154 return -EINVAL;
@@ -4144,13 +4169,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4144 4169
4145 tg3_set_mtu(dev, tp, new_mtu); 4170 tg3_set_mtu(dev, tp, new_mtu);
4146 4171
4147 tg3_init_hw(tp, 0); 4172 err = tg3_restart_hw(tp, 0);
4148 4173
4149 tg3_netif_start(tp); 4174 if (!err)
4175 tg3_netif_start(tp);
4150 4176
4151 tg3_full_unlock(tp); 4177 tg3_full_unlock(tp);
4152 4178
4153 return 0; 4179 return err;
4154} 4180}
4155 4181
4156/* Free up pending packets in all rx/tx rings. 4182/* Free up pending packets in all rx/tx rings.
@@ -4232,7 +4258,7 @@ static void tg3_free_rings(struct tg3 *tp)
4232 * end up in the driver. tp->{tx,}lock are held and thus 4258 * end up in the driver. tp->{tx,}lock are held and thus
4233 * we may not sleep. 4259 * we may not sleep.
4234 */ 4260 */
4235static void tg3_init_rings(struct tg3 *tp) 4261static int tg3_init_rings(struct tg3 *tp)
4236{ 4262{
4237 u32 i; 4263 u32 i;
4238 4264
@@ -4281,18 +4307,38 @@ static void tg3_init_rings(struct tg3 *tp)
4281 4307
4282 /* Now allocate fresh SKBs for each rx ring. */ 4308 /* Now allocate fresh SKBs for each rx ring. */
4283 for (i = 0; i < tp->rx_pending; i++) { 4309 for (i = 0; i < tp->rx_pending; i++) {
4284 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, 4310 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4285 -1, i) < 0) 4311 printk(KERN_WARNING PFX
4312 "%s: Using a smaller RX standard ring, "
4313 "only %d out of %d buffers were allocated "
4314 "successfully.\n",
4315 tp->dev->name, i, tp->rx_pending);
4316 if (i == 0)
4317 return -ENOMEM;
4318 tp->rx_pending = i;
4286 break; 4319 break;
4320 }
4287 } 4321 }
4288 4322
4289 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 4323 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4290 for (i = 0; i < tp->rx_jumbo_pending; i++) { 4324 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4291 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, 4325 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4292 -1, i) < 0) 4326 -1, i) < 0) {
4327 printk(KERN_WARNING PFX
4328 "%s: Using a smaller RX jumbo ring, "
4329 "only %d out of %d buffers were "
4330 "allocated successfully.\n",
4331 tp->dev->name, i, tp->rx_jumbo_pending);
4332 if (i == 0) {
4333 tg3_free_rings(tp);
4334 return -ENOMEM;
4335 }
4336 tp->rx_jumbo_pending = i;
4293 break; 4337 break;
4338 }
4294 } 4339 }
4295 } 4340 }
4341 return 0;
4296} 4342}
4297 4343
4298/* 4344/*
@@ -5815,6 +5861,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5815{ 5861{
5816 struct tg3 *tp = netdev_priv(dev); 5862 struct tg3 *tp = netdev_priv(dev);
5817 struct sockaddr *addr = p; 5863 struct sockaddr *addr = p;
5864 int err = 0;
5818 5865
5819 if (!is_valid_ether_addr(addr->sa_data)) 5866 if (!is_valid_ether_addr(addr->sa_data))
5820 return -EINVAL; 5867 return -EINVAL;
@@ -5832,9 +5879,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5832 tg3_full_lock(tp, 1); 5879 tg3_full_lock(tp, 1);
5833 5880
5834 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5881 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5835 tg3_init_hw(tp, 0); 5882 err = tg3_restart_hw(tp, 0);
5836 5883 if (!err)
5837 tg3_netif_start(tp); 5884 tg3_netif_start(tp);
5838 tg3_full_unlock(tp); 5885 tg3_full_unlock(tp);
5839 } else { 5886 } else {
5840 spin_lock_bh(&tp->lock); 5887 spin_lock_bh(&tp->lock);
@@ -5842,7 +5889,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5842 spin_unlock_bh(&tp->lock); 5889 spin_unlock_bh(&tp->lock);
5843 } 5890 }
5844 5891
5845 return 0; 5892 return err;
5846} 5893}
5847 5894
5848/* tp->lock is held. */ 5895/* tp->lock is held. */
@@ -5942,7 +5989,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5942 * can only do this after the hardware has been 5989 * can only do this after the hardware has been
5943 * successfully reset. 5990 * successfully reset.
5944 */ 5991 */
5945 tg3_init_rings(tp); 5992 err = tg3_init_rings(tp);
5993 if (err)
5994 return err;
5946 5995
5947 /* This value is determined during the probe time DMA 5996 /* This value is determined during the probe time DMA
5948 * engine test, tg3_test_dma. 5997 * engine test, tg3_test_dma.
@@ -7956,7 +8005,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
7956static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 8005static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7957{ 8006{
7958 struct tg3 *tp = netdev_priv(dev); 8007 struct tg3 *tp = netdev_priv(dev);
7959 int irq_sync = 0; 8008 int irq_sync = 0, err = 0;
7960 8009
7961 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 8010 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7962 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || 8011 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
@@ -7980,13 +8029,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7980 8029
7981 if (netif_running(dev)) { 8030 if (netif_running(dev)) {
7982 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8031 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7983 tg3_init_hw(tp, 1); 8032 err = tg3_restart_hw(tp, 1);
7984 tg3_netif_start(tp); 8033 if (!err)
8034 tg3_netif_start(tp);
7985 } 8035 }
7986 8036
7987 tg3_full_unlock(tp); 8037 tg3_full_unlock(tp);
7988 8038
7989 return 0; 8039 return err;
7990} 8040}
7991 8041
7992static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 8042static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
@@ -8001,7 +8051,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8001static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 8051static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8002{ 8052{
8003 struct tg3 *tp = netdev_priv(dev); 8053 struct tg3 *tp = netdev_priv(dev);
8004 int irq_sync = 0; 8054 int irq_sync = 0, err = 0;
8005 8055
8006 if (netif_running(dev)) { 8056 if (netif_running(dev)) {
8007 tg3_netif_stop(tp); 8057 tg3_netif_stop(tp);
@@ -8025,13 +8075,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8025 8075
8026 if (netif_running(dev)) { 8076 if (netif_running(dev)) {
8027 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8077 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8028 tg3_init_hw(tp, 1); 8078 err = tg3_restart_hw(tp, 1);
8029 tg3_netif_start(tp); 8079 if (!err)
8080 tg3_netif_start(tp);
8030 } 8081 }
8031 8082
8032 tg3_full_unlock(tp); 8083 tg3_full_unlock(tp);
8033 8084
8034 return 0; 8085 return err;
8035} 8086}
8036 8087
8037static u32 tg3_get_rx_csum(struct net_device *dev) 8088static u32 tg3_get_rx_csum(struct net_device *dev)
@@ -8666,7 +8717,9 @@ static int tg3_test_loopback(struct tg3 *tp)
8666 if (!netif_running(tp->dev)) 8717 if (!netif_running(tp->dev))
8667 return TG3_LOOPBACK_FAILED; 8718 return TG3_LOOPBACK_FAILED;
8668 8719
8669 tg3_reset_hw(tp, 1); 8720 err = tg3_reset_hw(tp, 1);
8721 if (err)
8722 return TG3_LOOPBACK_FAILED;
8670 8723
8671 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 8724 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8672 err |= TG3_MAC_LOOPBACK_FAILED; 8725 err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8740,8 +8793,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8740 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8793 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8741 if (netif_running(dev)) { 8794 if (netif_running(dev)) {
8742 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 8795 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8743 tg3_init_hw(tp, 1); 8796 if (!tg3_restart_hw(tp, 1))
8744 tg3_netif_start(tp); 8797 tg3_netif_start(tp);
8745 } 8798 }
8746 8799
8747 tg3_full_unlock(tp); 8800 tg3_full_unlock(tp);
@@ -11699,7 +11752,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11699 tg3_full_lock(tp, 0); 11752 tg3_full_lock(tp, 0);
11700 11753
11701 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11754 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11702 tg3_init_hw(tp, 1); 11755 if (tg3_restart_hw(tp, 1))
11756 goto out;
11703 11757
11704 tp->timer.expires = jiffies + tp->timer_offset; 11758 tp->timer.expires = jiffies + tp->timer_offset;
11705 add_timer(&tp->timer); 11759 add_timer(&tp->timer);
@@ -11707,6 +11761,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11707 netif_device_attach(dev); 11761 netif_device_attach(dev);
11708 tg3_netif_start(tp); 11762 tg3_netif_start(tp);
11709 11763
11764out:
11710 tg3_full_unlock(tp); 11765 tg3_full_unlock(tp);
11711 } 11766 }
11712 11767
@@ -11733,16 +11788,19 @@ static int tg3_resume(struct pci_dev *pdev)
11733 tg3_full_lock(tp, 0); 11788 tg3_full_lock(tp, 0);
11734 11789
11735 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11790 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11736 tg3_init_hw(tp, 1); 11791 err = tg3_restart_hw(tp, 1);
11792 if (err)
11793 goto out;
11737 11794
11738 tp->timer.expires = jiffies + tp->timer_offset; 11795 tp->timer.expires = jiffies + tp->timer_offset;
11739 add_timer(&tp->timer); 11796 add_timer(&tp->timer);
11740 11797
11741 tg3_netif_start(tp); 11798 tg3_netif_start(tp);
11742 11799
11800out:
11743 tg3_full_unlock(tp); 11801 tg3_full_unlock(tp);
11744 11802
11745 return 0; 11803 return err;
11746} 11804}
11747 11805
11748static struct pci_driver tg3_driver = { 11806static struct pci_driver tg3_driver = {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index f5b0078eb4ad..aa9cd92f46b2 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2742,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs)
2742 2742
2743 if (PHYSR0 & PHYSR0_SPDG) 2743 if (PHYSR0 & PHYSR0_SPDG)
2744 status |= VELOCITY_SPEED_1000; 2744 status |= VELOCITY_SPEED_1000;
2745 if (PHYSR0 & PHYSR0_SPD10) 2745 else if (PHYSR0 & PHYSR0_SPD10)
2746 status |= VELOCITY_SPEED_10; 2746 status |= VELOCITY_SPEED_10;
2747 else 2747 else
2748 status |= VELOCITY_SPEED_100; 2748 status |= VELOCITY_SPEED_100;
@@ -2851,8 +2851,17 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
2851 u32 status; 2851 u32 status;
2852 status = check_connection_type(vptr->mac_regs); 2852 status = check_connection_type(vptr->mac_regs);
2853 2853
2854 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; 2854 cmd->supported = SUPPORTED_TP |
2855 if (status & VELOCITY_SPEED_100) 2855 SUPPORTED_Autoneg |
2856 SUPPORTED_10baseT_Half |
2857 SUPPORTED_10baseT_Full |
2858 SUPPORTED_100baseT_Half |
2859 SUPPORTED_100baseT_Full |
2860 SUPPORTED_1000baseT_Half |
2861 SUPPORTED_1000baseT_Full;
2862 if (status & VELOCITY_SPEED_1000)
2863 cmd->speed = SPEED_1000;
2864 else if (status & VELOCITY_SPEED_100)
2856 cmd->speed = SPEED_100; 2865 cmd->speed = SPEED_100;
2857 else 2866 else
2858 cmd->speed = SPEED_10; 2867 cmd->speed = SPEED_10;
@@ -2896,7 +2905,7 @@ static u32 velocity_get_link(struct net_device *dev)
2896{ 2905{
2897 struct velocity_info *vptr = netdev_priv(dev); 2906 struct velocity_info *vptr = netdev_priv(dev);
2898 struct mac_regs __iomem * regs = vptr->mac_regs; 2907 struct mac_regs __iomem * regs = vptr->mac_regs;
2899 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 0 : 1; 2908 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2900} 2909}
2901 2910
2902static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2911static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 2c09ec908a3f..435e91ec4620 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -197,7 +197,6 @@ static int c101_open(struct net_device *dev)
197 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); 197 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
198 198
199 set_carrier(port); 199 set_carrier(port);
200 printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port));
201 200
202 /* enable MSCI1 CDCD interrupt */ 201 /* enable MSCI1 CDCD interrupt */
203 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); 202 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
@@ -449,4 +448,5 @@ module_exit(c101_cleanup);
449MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 448MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
450MODULE_DESCRIPTION("Moxa C101 serial port driver"); 449MODULE_DESCRIPTION("Moxa C101 serial port driver");
451MODULE_LICENSE("GPL v2"); 450MODULE_LICENSE("GPL v2");
452module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */ 451module_param(hw, charp, 0444);
452MODULE_PARM_DESC(hw, "irq,ram:irq,...");
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b81263eaede0..fbaab5bf71eb 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -107,6 +107,7 @@ int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
107 dev->hard_header = NULL; 107 dev->hard_header = NULL;
108 dev->type = ARPHRD_PPP; 108 dev->type = ARPHRD_PPP;
109 dev->addr_len = 0; 109 dev->addr_len = 0;
110 netif_dormant_off(dev);
110 return 0; 111 return 0;
111 } 112 }
112 113
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 9456d31cb1c1..f15aa6ba77f1 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -82,6 +82,7 @@ int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr)
82 dev->type = ARPHRD_RAWHDLC; 82 dev->type = ARPHRD_RAWHDLC;
83 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 83 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
84 dev->addr_len = 0; 84 dev->addr_len = 0;
85 netif_dormant_off(dev);
85 return 0; 86 return 0;
86 } 87 }
87 88
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index b1285cc8fee6..d1884987f94e 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -100,6 +100,7 @@ int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
100 dev->tx_queue_len = old_qlen; 100 dev->tx_queue_len = old_qlen;
101 memcpy(dev->dev_addr, "\x00\x01", 2); 101 memcpy(dev->dev_addr, "\x00\x01", 2);
102 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); 102 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
103 netif_dormant_off(dev);
103 return 0; 104 return 0;
104 } 105 }
105 106
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 07e5eef1fe0f..a867fb411f89 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -212,6 +212,7 @@ int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr)
212 dev->hard_header = NULL; 212 dev->hard_header = NULL;
213 dev->type = ARPHRD_X25; 213 dev->type = ARPHRD_X25;
214 dev->addr_len = 0; 214 dev->addr_len = 0;
215 netif_dormant_off(dev);
215 return 0; 216 return 0;
216 } 217 }
217 218
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index e013b817cab8..dcf46add3adf 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -564,4 +564,5 @@ module_exit(n2_cleanup);
564MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 564MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
565MODULE_DESCRIPTION("RISCom/N2 serial port driver"); 565MODULE_DESCRIPTION("RISCom/N2 serial port driver");
566MODULE_LICENSE("GPL v2"); 566MODULE_LICENSE("GPL v2");
567module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */ 567module_param(hw, charp, 0444);
568MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index fa9d2c4edc93..2e8ac995d56f 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -447,6 +447,7 @@ config AIRO_CS
447 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 447 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
448 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) 448 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
449 select CRYPTO 449 select CRYPTO
450 select CRYPTO_AES
450 ---help--- 451 ---help---
451 This is the standard Linux driver to support Cisco/Aironet PCMCIA 452 This is the standard Linux driver to support Cisco/Aironet PCMCIA
452 802.11 wireless cards. This driver is the same as the Aironet 453 802.11 wireless cards. This driver is the same as the Aironet
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 3889f79e7128..df317c1e12a8 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3701,7 +3701,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3701 } 3701 }
3702 if (sec->flags & SEC_AUTH_MODE) { 3702 if (sec->flags & SEC_AUTH_MODE) {
3703 secinfo->auth_mode = sec->auth_mode; 3703 secinfo->auth_mode = sec->auth_mode;
3704 dprintk(", .auth_mode = %d\n", sec->auth_mode); 3704 dprintk(", .auth_mode = %d", sec->auth_mode);
3705 } 3705 }
3706 dprintk("\n"); 3706 dprintk("\n");
3707 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && 3707 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED &&
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index d6ed5781b93a..317ace7f9aae 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
2875 if (orinoco_lock(priv, &flags) != 0) 2875 if (orinoco_lock(priv, &flags) != 0)
2876 return -EBUSY; 2876 return -EBUSY;
2877 2877
2878 if (erq->pointer) { 2878 if (erq->length > 0) {
2879 if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) 2879 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
2880 index = priv->tx_key; 2880 index = priv->tx_key;
2881 2881
@@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
2918 if (erq->flags & IW_ENCODE_RESTRICTED) 2918 if (erq->flags & IW_ENCODE_RESTRICTED)
2919 restricted = 1; 2919 restricted = 1;
2920 2920
2921 if (erq->pointer) { 2921 if (erq->pointer && erq->length > 0) {
2922 priv->keys[index].len = cpu_to_le16(xlen); 2922 priv->keys[index].len = cpu_to_le16(xlen);
2923 memset(priv->keys[index].data, 0, 2923 memset(priv->keys[index].data, 0,
2924 sizeof(priv->keys[index].data)); 2924 sizeof(priv->keys[index].data));
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 662ecc8a33ff..c52e9bcf8d02 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface,
1820 zd->dev->name); 1820 zd->dev->name);
1821 1821
1822 usb_set_intfdata(interface, zd); 1822 usb_set_intfdata(interface, zd);
1823 zd1201_enable(zd); /* zd1201 likes to startup enabled, */
1824 zd1201_disable(zd); /* interfering with all the wifis in range */
1823 return 0; 1825 return 0;
1824 1826
1825err_net: 1827err_net:
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f5b9f187a930..7ff1d88094b6 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -121,6 +121,16 @@ config RTC_DRV_DS1553
121 This driver can also be built as a module. If so, the module 121 This driver can also be built as a module. If so, the module
122 will be called rtc-ds1553. 122 will be called rtc-ds1553.
123 123
124config RTC_DRV_ISL1208
125 tristate "Intersil 1208"
126 depends on RTC_CLASS && I2C
127 help
128 If you say yes here you get support for the
129 Intersil 1208 RTC chip.
130
131 This driver can also be built as a module. If so, the module
132 will be called rtc-isl1208.
133
124config RTC_DRV_DS1672 134config RTC_DRV_DS1672
125 tristate "Dallas/Maxim DS1672" 135 tristate "Dallas/Maxim DS1672"
126 depends on RTC_CLASS && I2C 136 depends on RTC_CLASS && I2C
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 54220714ff49..bbcfb09d81d9 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o
12obj-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o 12obj-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o
13 13
14obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o 14obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
15obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
15obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o 16obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
16obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o 17obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
17obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o 18obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
new file mode 100644
index 000000000000..f324d0a635d4
--- /dev/null
+++ b/drivers/rtc/rtc-isl1208.c
@@ -0,0 +1,591 @@
1/*
2 * Intersil ISL1208 rtc class driver
3 *
4 * Copyright 2005,2006 Hebert Valerio Riedel <hvr@gnu.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/i2c.h>
15#include <linux/bcd.h>
16#include <linux/rtc.h>
17
18#define DRV_NAME "isl1208"
19#define DRV_VERSION "0.2"
20
21/* Register map */
22/* rtc section */
23#define ISL1208_REG_SC 0x00
24#define ISL1208_REG_MN 0x01
25#define ISL1208_REG_HR 0x02
26#define ISL1208_REG_HR_MIL (1<<7) /* 24h/12h mode */
27#define ISL1208_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */
28#define ISL1208_REG_DT 0x03
29#define ISL1208_REG_MO 0x04
30#define ISL1208_REG_YR 0x05
31#define ISL1208_REG_DW 0x06
32#define ISL1208_RTC_SECTION_LEN 7
33
34/* control/status section */
35#define ISL1208_REG_SR 0x07
36#define ISL1208_REG_SR_ARST (1<<7) /* auto reset */
37#define ISL1208_REG_SR_XTOSCB (1<<6) /* crystal oscillator */
38#define ISL1208_REG_SR_WRTC (1<<4) /* write rtc */
39#define ISL1208_REG_SR_ALM (1<<2) /* alarm */
40#define ISL1208_REG_SR_BAT (1<<1) /* battery */
41#define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */
42#define ISL1208_REG_INT 0x08
43#define ISL1208_REG_09 0x09 /* reserved */
44#define ISL1208_REG_ATR 0x0a
45#define ISL1208_REG_DTR 0x0b
46
47/* alarm section */
48#define ISL1208_REG_SCA 0x0c
49#define ISL1208_REG_MNA 0x0d
50#define ISL1208_REG_HRA 0x0e
51#define ISL1208_REG_DTA 0x0f
52#define ISL1208_REG_MOA 0x10
53#define ISL1208_REG_DWA 0x11
54#define ISL1208_ALARM_SECTION_LEN 6
55
56/* user section */
57#define ISL1208_REG_USR1 0x12
58#define ISL1208_REG_USR2 0x13
59#define ISL1208_USR_SECTION_LEN 2
60
61/* i2c configuration */
62#define ISL1208_I2C_ADDR 0xde
63
64static unsigned short normal_i2c[] = {
65 ISL1208_I2C_ADDR>>1, I2C_CLIENT_END
66};
67I2C_CLIENT_INSMOD; /* defines addr_data */
68
69static int isl1208_attach_adapter(struct i2c_adapter *adapter);
70static int isl1208_detach_client(struct i2c_client *client);
71
72static struct i2c_driver isl1208_driver = {
73 .driver = {
74 .name = DRV_NAME,
75 },
76 .id = I2C_DRIVERID_ISL1208,
77 .attach_adapter = &isl1208_attach_adapter,
78 .detach_client = &isl1208_detach_client,
79};
80
81/* block read */
82static int
83isl1208_i2c_read_regs(struct i2c_client *client, u8 reg, u8 buf[],
84 unsigned len)
85{
86 u8 reg_addr[1] = { reg };
87 struct i2c_msg msgs[2] = {
88 { client->addr, client->flags, sizeof(reg_addr), reg_addr },
89 { client->addr, client->flags | I2C_M_RD, len, buf }
90 };
91 int ret;
92
93 BUG_ON(len == 0);
94 BUG_ON(reg > ISL1208_REG_USR2);
95 BUG_ON(reg + len > ISL1208_REG_USR2 + 1);
96
97 ret = i2c_transfer(client->adapter, msgs, 2);
98 if (ret > 0)
99 ret = 0;
100 return ret;
101}
102
103/* block write */
104static int
105isl1208_i2c_set_regs(struct i2c_client *client, u8 reg, u8 const buf[],
106 unsigned len)
107{
108 u8 i2c_buf[ISL1208_REG_USR2 + 2];
109 struct i2c_msg msgs[1] = {
110 { client->addr, client->flags, len + 1, i2c_buf }
111 };
112 int ret;
113
114 BUG_ON(len == 0);
115 BUG_ON(reg > ISL1208_REG_USR2);
116 BUG_ON(reg + len > ISL1208_REG_USR2 + 1);
117
118 i2c_buf[0] = reg;
119 memcpy(&i2c_buf[1], &buf[0], len);
120
121 ret = i2c_transfer(client->adapter, msgs, 1);
122 if (ret > 0)
123 ret = 0;
124 return ret;
125}
126
127/* simple check to see wether we have a isl1208 */
128static int isl1208_i2c_validate_client(struct i2c_client *client)
129{
130 u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, };
131 u8 zero_mask[ISL1208_RTC_SECTION_LEN] = {
132 0x80, 0x80, 0x40, 0xc0, 0xe0, 0x00, 0xf8
133 };
134 int i;
135 int ret;
136
137 ret = isl1208_i2c_read_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN);
138 if (ret < 0)
139 return ret;
140
141 for (i = 0; i < ISL1208_RTC_SECTION_LEN; ++i) {
142 if (regs[i] & zero_mask[i]) /* check if bits are cleared */
143 return -ENODEV;
144 }
145
146 return 0;
147}
148
149static int isl1208_i2c_get_sr(struct i2c_client *client)
150{
151 return i2c_smbus_read_byte_data(client, ISL1208_REG_SR) == -1 ? -EIO:0;
152}
153
154static int isl1208_i2c_get_atr(struct i2c_client *client)
155{
156 int atr = i2c_smbus_read_byte_data(client, ISL1208_REG_ATR);
157
158 if (atr < 0)
159 return -EIO;
160
161 /* The 6bit value in the ATR register controls the load
162 * capacitance C_load * in steps of 0.25pF
163 *
164 * bit (1<<5) of the ATR register is inverted
165 *
166 * C_load(ATR=0x20) = 4.50pF
167 * C_load(ATR=0x00) = 12.50pF
168 * C_load(ATR=0x1f) = 20.25pF
169 *
170 */
171
172 atr &= 0x3f; /* mask out lsb */
173 atr ^= 1<<5; /* invert 6th bit */
174 atr += 2*9; /* add offset of 4.5pF; unit[atr] = 0.25pF */
175
176 return atr;
177}
178
179static int isl1208_i2c_get_dtr(struct i2c_client *client)
180{
181 int dtr = i2c_smbus_read_byte_data(client, ISL1208_REG_DTR);
182
183 if (dtr < 0)
184 return -EIO;
185
186 /* dtr encodes adjustments of {-60,-40,-20,0,20,40,60} ppm */
187 dtr = ((dtr & 0x3) * 20) * (dtr & (1<<2) ? -1 : 1);
188
189 return dtr;
190}
191
192static int isl1208_i2c_get_usr(struct i2c_client *client)
193{
194 u8 buf[ISL1208_USR_SECTION_LEN] = { 0, };
195 int ret;
196
197 ret = isl1208_i2c_read_regs (client, ISL1208_REG_USR1, buf,
198 ISL1208_USR_SECTION_LEN);
199 if (ret < 0)
200 return ret;
201
202 return (buf[1] << 8) | buf[0];
203}
204
205static int isl1208_i2c_set_usr(struct i2c_client *client, u16 usr)
206{
207 u8 buf[ISL1208_USR_SECTION_LEN];
208
209 buf[0] = usr & 0xff;
210 buf[1] = (usr >> 8) & 0xff;
211
212 return isl1208_i2c_set_regs (client, ISL1208_REG_USR1, buf,
213 ISL1208_USR_SECTION_LEN);
214}
215
216static int isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
217{
218 struct i2c_client *const client = to_i2c_client(dev);
219 int sr, dtr, atr, usr;
220
221 sr = isl1208_i2c_get_sr(client);
222 if (sr < 0) {
223 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
224 return sr;
225 }
226
227 seq_printf(seq, "status_reg\t:%s%s%s%s%s%s (0x%.2x)\n",
228 (sr & ISL1208_REG_SR_RTCF) ? " RTCF" : "",
229 (sr & ISL1208_REG_SR_BAT) ? " BAT" : "",
230 (sr & ISL1208_REG_SR_ALM) ? " ALM" : "",
231 (sr & ISL1208_REG_SR_WRTC) ? " WRTC" : "",
232 (sr & ISL1208_REG_SR_XTOSCB) ? " XTOSCB" : "",
233 (sr & ISL1208_REG_SR_ARST) ? " ARST" : "",
234 sr);
235
236 seq_printf(seq, "batt_status\t: %s\n",
237 (sr & ISL1208_REG_SR_RTCF) ? "bad" : "okay");
238
239 dtr = isl1208_i2c_get_dtr(client);
240 if (dtr >= 0 -1)
241 seq_printf(seq, "digital_trim\t: %d ppm\n", dtr);
242
243 atr = isl1208_i2c_get_atr(client);
244 if (atr >= 0)
245 seq_printf(seq, "analog_trim\t: %d.%.2d pF\n",
246 atr>>2, (atr&0x3)*25);
247
248 usr = isl1208_i2c_get_usr(client);
249 if (usr >= 0)
250 seq_printf(seq, "user_data\t: 0x%.4x\n", usr);
251
252 return 0;
253}
254
255
256static int isl1208_i2c_read_time(struct i2c_client *client,
257 struct rtc_time *tm)
258{
259 int sr;
260 u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, };
261
262 sr = isl1208_i2c_get_sr(client);
263 if (sr < 0) {
264 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
265 return -EIO;
266 }
267
268 sr = isl1208_i2c_read_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN);
269 if (sr < 0) {
270 dev_err(&client->dev, "%s: reading RTC section failed\n",
271 __func__);
272 return sr;
273 }
274
275 tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SC]);
276 tm->tm_min = BCD2BIN(regs[ISL1208_REG_MN]);
277 { /* HR field has a more complex interpretation */
278 const u8 _hr = regs[ISL1208_REG_HR];
279 if (_hr & ISL1208_REG_HR_MIL) /* 24h format */
280 tm->tm_hour = BCD2BIN(_hr & 0x3f);
281 else { // 12h format
282 tm->tm_hour = BCD2BIN(_hr & 0x1f);
283 if (_hr & ISL1208_REG_HR_PM) /* PM flag set */
284 tm->tm_hour += 12;
285 }
286 }
287
288 tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DT]);
289 tm->tm_mon = BCD2BIN(regs[ISL1208_REG_MO]) - 1; /* rtc starts at 1 */
290 tm->tm_year = BCD2BIN(regs[ISL1208_REG_YR]) + 100;
291 tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DW]);
292
293 return 0;
294}
295
296static int isl1208_i2c_read_alarm(struct i2c_client *client,
297 struct rtc_wkalrm *alarm)
298{
299 struct rtc_time *const tm = &alarm->time;
300 u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, };
301 int sr;
302
303 sr = isl1208_i2c_get_sr(client);
304 if (sr < 0) {
305 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
306 return sr;
307 }
308
309 sr = isl1208_i2c_read_regs(client, ISL1208_REG_SCA, regs,
310 ISL1208_ALARM_SECTION_LEN);
311 if (sr < 0) {
312 dev_err(&client->dev, "%s: reading alarm section failed\n",
313 __func__);
314 return sr;
315 }
316
317 /* MSB of each alarm register is an enable bit */
318 tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SCA-ISL1208_REG_SCA] & 0x7f);
319 tm->tm_min = BCD2BIN(regs[ISL1208_REG_MNA-ISL1208_REG_SCA] & 0x7f);
320 tm->tm_hour = BCD2BIN(regs[ISL1208_REG_HRA-ISL1208_REG_SCA] & 0x3f);
321 tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DTA-ISL1208_REG_SCA] & 0x3f);
322 tm->tm_mon = BCD2BIN(regs[ISL1208_REG_MOA-ISL1208_REG_SCA] & 0x1f)-1;
323 tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DWA-ISL1208_REG_SCA] & 0x03);
324
325 return 0;
326}
327
328static int isl1208_rtc_read_time(struct device *dev, struct rtc_time *tm)
329{
330 return isl1208_i2c_read_time(to_i2c_client(dev), tm);
331}
332
333static int isl1208_i2c_set_time(struct i2c_client *client,
334 struct rtc_time const *tm)
335{
336 int sr;
337 u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, };
338
339 regs[ISL1208_REG_SC] = BIN2BCD(tm->tm_sec);
340 regs[ISL1208_REG_MN] = BIN2BCD(tm->tm_min);
341 regs[ISL1208_REG_HR] = BIN2BCD(tm->tm_hour) | ISL1208_REG_HR_MIL;
342
343 regs[ISL1208_REG_DT] = BIN2BCD(tm->tm_mday);
344 regs[ISL1208_REG_MO] = BIN2BCD(tm->tm_mon + 1);
345 regs[ISL1208_REG_YR] = BIN2BCD(tm->tm_year - 100);
346
347 regs[ISL1208_REG_DW] = BIN2BCD(tm->tm_wday & 7);
348
349 sr = isl1208_i2c_get_sr(client);
350 if (sr < 0) {
351 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
352 return sr;
353 }
354
355 /* set WRTC */
356 sr = i2c_smbus_write_byte_data (client, ISL1208_REG_SR,
357 sr | ISL1208_REG_SR_WRTC);
358 if (sr < 0) {
359 dev_err(&client->dev, "%s: writing SR failed\n", __func__);
360 return sr;
361 }
362
363 /* write RTC registers */
364 sr = isl1208_i2c_set_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN);
365 if (sr < 0) {
366 dev_err(&client->dev, "%s: writing RTC section failed\n",
367 __func__);
368 return sr;
369 }
370
371 /* clear WRTC again */
372 sr = i2c_smbus_write_byte_data (client, ISL1208_REG_SR,
373 sr & ~ISL1208_REG_SR_WRTC);
374 if (sr < 0) {
375 dev_err(&client->dev, "%s: writing SR failed\n", __func__);
376 return sr;
377 }
378
379 return 0;
380}
381
382
383static int isl1208_rtc_set_time(struct device *dev, struct rtc_time *tm)
384{
385 return isl1208_i2c_set_time(to_i2c_client(dev), tm);
386}
387
388static int isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
389{
390 return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm);
391}
392
393static struct rtc_class_ops isl1208_rtc_ops = {
394 .proc = isl1208_rtc_proc,
395 .read_time = isl1208_rtc_read_time,
396 .set_time = isl1208_rtc_set_time,
397 .read_alarm = isl1208_rtc_read_alarm,
398 //.set_alarm = isl1208_rtc_set_alarm,
399};
400
401/* sysfs interface */
402
403static ssize_t isl1208_sysfs_show_atrim(struct device *dev,
404 struct device_attribute *attr,
405 char *buf)
406{
407 int atr;
408
409 atr = isl1208_i2c_get_atr(to_i2c_client(dev));
410 if (atr < 0)
411 return atr;
412
413 return sprintf(buf, "%d.%.2d pF\n", atr>>2, (atr&0x3)*25);
414}
415static DEVICE_ATTR(atrim, S_IRUGO, isl1208_sysfs_show_atrim, NULL);
416
417static ssize_t isl1208_sysfs_show_dtrim(struct device *dev,
418 struct device_attribute *attr,
419 char *buf)
420{
421 int dtr;
422
423 dtr = isl1208_i2c_get_dtr(to_i2c_client(dev));
424 if (dtr < 0)
425 return dtr;
426
427 return sprintf(buf, "%d ppm\n", dtr);
428}
429static DEVICE_ATTR(dtrim, S_IRUGO, isl1208_sysfs_show_dtrim, NULL);
430
431static ssize_t isl1208_sysfs_show_usr(struct device *dev,
432 struct device_attribute *attr,
433 char *buf)
434{
435 int usr;
436
437 usr = isl1208_i2c_get_usr(to_i2c_client(dev));
438 if (usr < 0)
439 return usr;
440
441 return sprintf(buf, "0x%.4x\n", usr);
442}
443
444static ssize_t isl1208_sysfs_store_usr(struct device *dev,
445 struct device_attribute *attr,
446 const char *buf, size_t count)
447{
448 int usr = -1;
449
450 if (buf[0] == '0' && (buf[1] == 'x' || buf[1] == 'X')) {
451 if (sscanf(buf, "%x", &usr) != 1)
452 return -EINVAL;
453 } else {
454 if (sscanf(buf, "%d", &usr) != 1)
455 return -EINVAL;
456 }
457
458 if (usr < 0 || usr > 0xffff)
459 return -EINVAL;
460
461 return isl1208_i2c_set_usr(to_i2c_client(dev), usr) ? -EIO : count;
462}
463static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr,
464 isl1208_sysfs_store_usr);
465
466static int
467isl1208_probe(struct i2c_adapter *adapter, int addr, int kind)
468{
469 int rc = 0;
470 struct i2c_client *new_client = NULL;
471 struct rtc_device *rtc = NULL;
472
473 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
474 rc = -ENODEV;
475 goto failout;
476 }
477
478 new_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
479 if (new_client == NULL) {
480 rc = -ENOMEM;
481 goto failout;
482 }
483
484 new_client->addr = addr;
485 new_client->adapter = adapter;
486 new_client->driver = &isl1208_driver;
487 new_client->flags = 0;
488 strcpy(new_client->name, DRV_NAME);
489
490 if (kind < 0) {
491 rc = isl1208_i2c_validate_client(new_client);
492 if (rc < 0)
493 goto failout;
494 }
495
496 rc = i2c_attach_client(new_client);
497 if (rc < 0)
498 goto failout;
499
500 dev_info(&new_client->dev,
501 "chip found, driver version " DRV_VERSION "\n");
502
503 rtc = rtc_device_register(isl1208_driver.driver.name,
504 &new_client->dev,
505 &isl1208_rtc_ops, THIS_MODULE);
506
507 if (IS_ERR(rtc)) {
508 rc = PTR_ERR(rtc);
509 goto failout_detach;
510 }
511
512 i2c_set_clientdata(new_client, rtc);
513
514 rc = isl1208_i2c_get_sr(new_client);
515 if (rc < 0) {
516 dev_err(&new_client->dev, "reading status failed\n");
517 goto failout_unregister;
518 }
519
520 if (rc & ISL1208_REG_SR_RTCF)
521 dev_warn(&new_client->dev, "rtc power failure detected, "
522 "please set clock.\n");
523
524 rc = device_create_file(&new_client->dev, &dev_attr_atrim);
525 if (rc < 0)
526 goto failout_unregister;
527 rc = device_create_file(&new_client->dev, &dev_attr_dtrim);
528 if (rc < 0)
529 goto failout_atrim;
530 rc = device_create_file(&new_client->dev, &dev_attr_usr);
531 if (rc < 0)
532 goto failout_dtrim;
533
534 return 0;
535
536 failout_dtrim:
537 device_remove_file(&new_client->dev, &dev_attr_dtrim);
538 failout_atrim:
539 device_remove_file(&new_client->dev, &dev_attr_atrim);
540 failout_unregister:
541 rtc_device_unregister(rtc);
542 failout_detach:
543 i2c_detach_client(new_client);
544 failout:
545 kfree(new_client);
546 return rc;
547}
548
549static int
550isl1208_attach_adapter (struct i2c_adapter *adapter)
551{
552 return i2c_probe(adapter, &addr_data, isl1208_probe);
553}
554
555static int
556isl1208_detach_client(struct i2c_client *client)
557{
558 int rc;
559 struct rtc_device *const rtc = i2c_get_clientdata(client);
560
561 if (rtc)
562 rtc_device_unregister(rtc); /* do we need to kfree? */
563
564 rc = i2c_detach_client(client);
565 if (rc)
566 return rc;
567
568 kfree(client);
569
570 return 0;
571}
572
573/* module management */
574
575static int __init isl1208_init(void)
576{
577 return i2c_add_driver(&isl1208_driver);
578}
579
580static void __exit isl1208_exit(void)
581{
582 i2c_del_driver(&isl1208_driver);
583}
584
585MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
586MODULE_DESCRIPTION("Intersil ISL1208 RTC driver");
587MODULE_LICENSE("GPL");
588MODULE_VERSION(DRV_VERSION);
589
590module_init(isl1208_init);
591module_exit(isl1208_exit);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 4cd879cb9bdd..1140302ff11d 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -304,6 +304,7 @@ static int __init xpram_setup_sizes(unsigned long pages)
304{ 304{
305 unsigned long mem_needed; 305 unsigned long mem_needed;
306 unsigned long mem_auto; 306 unsigned long mem_auto;
307 unsigned long long size;
307 int mem_auto_no; 308 int mem_auto_no;
308 int i; 309 int i;
309 310
@@ -321,9 +322,19 @@ static int __init xpram_setup_sizes(unsigned long pages)
321 mem_needed = 0; 322 mem_needed = 0;
322 mem_auto_no = 0; 323 mem_auto_no = 0;
323 for (i = 0; i < xpram_devs; i++) { 324 for (i = 0; i < xpram_devs; i++) {
324 if (sizes[i]) 325 if (sizes[i]) {
325 xpram_sizes[i] = 326 size = simple_strtoull(sizes[i], &sizes[i], 0);
326 (memparse(sizes[i], &sizes[i]) + 3) & -4UL; 327 switch (sizes[i][0]) {
328 case 'g':
329 case 'G':
330 size <<= 20;
331 break;
332 case 'm':
333 case 'M':
334 size <<= 10;
335 }
336 xpram_sizes[i] = (size + 3) & -4UL;
337 }
327 if (xpram_sizes[i]) 338 if (xpram_sizes[i])
328 mem_needed += xpram_sizes[i]; 339 mem_needed += xpram_sizes[i];
329 else 340 else
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 95e285b2e25c..7a84014f2037 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1106,10 +1106,10 @@ raw3270_delete_device(struct raw3270 *rp)
1106 1106
1107 /* Remove from device chain. */ 1107 /* Remove from device chain. */
1108 mutex_lock(&raw3270_mutex); 1108 mutex_lock(&raw3270_mutex);
1109 if (rp->clttydev) 1109 if (rp->clttydev && !IS_ERR(rp->clttydev))
1110 class_device_destroy(class3270, 1110 class_device_destroy(class3270,
1111 MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1111 MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1112 if (rp->cltubdev) 1112 if (rp->cltubdev && !IS_ERR(rp->cltubdev))
1113 class_device_destroy(class3270, 1113 class_device_destroy(class3270,
1114 MKDEV(IBM_FS3270_MAJOR, rp->minor)); 1114 MKDEV(IBM_FS3270_MAJOR, rp->minor));
1115 list_del_init(&rp->list); 1115 list_del_init(&rp->list);
@@ -1173,21 +1173,37 @@ static struct attribute_group raw3270_attr_group = {
1173 .attrs = raw3270_attrs, 1173 .attrs = raw3270_attrs,
1174}; 1174};
1175 1175
1176static void 1176static int raw3270_create_attributes(struct raw3270 *rp)
1177raw3270_create_attributes(struct raw3270 *rp)
1178{ 1177{
1179 //FIXME: check return code 1178 int rc;
1180 sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1179
1181 rp->clttydev = 1180 rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1182 class_device_create(class3270, NULL, 1181 if (rc)
1183 MKDEV(IBM_TTY3270_MAJOR, rp->minor), 1182 goto out;
1184 &rp->cdev->dev, "tty%s", 1183
1185 rp->cdev->dev.bus_id); 1184 rp->clttydev = class_device_create(class3270, NULL,
1186 rp->cltubdev = 1185 MKDEV(IBM_TTY3270_MAJOR, rp->minor),
1187 class_device_create(class3270, NULL, 1186 &rp->cdev->dev, "tty%s",
1188 MKDEV(IBM_FS3270_MAJOR, rp->minor), 1187 rp->cdev->dev.bus_id);
1189 &rp->cdev->dev, "tub%s", 1188 if (IS_ERR(rp->clttydev)) {
1190 rp->cdev->dev.bus_id); 1189 rc = PTR_ERR(rp->clttydev);
1190 goto out_ttydev;
1191 }
1192
1193 rp->cltubdev = class_device_create(class3270, NULL,
1194 MKDEV(IBM_FS3270_MAJOR, rp->minor),
1195 &rp->cdev->dev, "tub%s",
1196 rp->cdev->dev.bus_id);
1197 if (!IS_ERR(rp->cltubdev))
1198 goto out;
1199
1200 rc = PTR_ERR(rp->cltubdev);
1201 class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1202
1203out_ttydev:
1204 sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1205out:
1206 return rc;
1191} 1207}
1192 1208
1193/* 1209/*
@@ -1255,7 +1271,9 @@ raw3270_set_online (struct ccw_device *cdev)
1255 rc = raw3270_reset_device(rp); 1271 rc = raw3270_reset_device(rp);
1256 if (rc) 1272 if (rc)
1257 goto failure; 1273 goto failure;
1258 raw3270_create_attributes(rp); 1274 rc = raw3270_create_attributes(rp);
1275 if (rc)
1276 goto failure;
1259 set_bit(RAW3270_FLAGS_READY, &rp->flags); 1277 set_bit(RAW3270_FLAGS_READY, &rp->flags);
1260 mutex_lock(&raw3270_mutex); 1278 mutex_lock(&raw3270_mutex);
1261 list_for_each_entry(np, &raw3270_notifier, list) 1279 list_for_each_entry(np, &raw3270_notifier, list)
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index a5c68e60fcf4..643b6d078563 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -76,14 +76,22 @@ struct tape_class_device *register_tape_dev(
76 device, 76 device,
77 "%s", tcd->device_name 77 "%s", tcd->device_name
78 ); 78 );
79 sysfs_create_link( 79 rc = PTR_ERR(tcd->class_device);
80 if (rc)
81 goto fail_with_cdev;
82 rc = sysfs_create_link(
80 &device->kobj, 83 &device->kobj,
81 &tcd->class_device->kobj, 84 &tcd->class_device->kobj,
82 tcd->mode_name 85 tcd->mode_name
83 ); 86 );
87 if (rc)
88 goto fail_with_class_device;
84 89
85 return tcd; 90 return tcd;
86 91
92fail_with_class_device:
93 class_device_destroy(tape_class, tcd->char_device->dev);
94
87fail_with_cdev: 95fail_with_cdev:
88 cdev_del(tcd->char_device); 96 cdev_del(tcd->char_device);
89 97
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 122b4d8965c3..2826aed91043 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -543,20 +543,24 @@ int
543tape_generic_probe(struct ccw_device *cdev) 543tape_generic_probe(struct ccw_device *cdev)
544{ 544{
545 struct tape_device *device; 545 struct tape_device *device;
546 int ret;
546 547
547 device = tape_alloc_device(); 548 device = tape_alloc_device();
548 if (IS_ERR(device)) 549 if (IS_ERR(device))
549 return -ENODEV; 550 return -ENODEV;
550 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); 551 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
552 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
553 if (ret) {
554 tape_put_device(device);
555 PRINT_ERR("probe failed for tape device %s\n", cdev->dev.bus_id);
556 return ret;
557 }
551 cdev->dev.driver_data = device; 558 cdev->dev.driver_data = device;
559 cdev->handler = __tape_do_irq;
552 device->cdev = cdev; 560 device->cdev = cdev;
553 device->cdev_id = busid_to_int(cdev->dev.bus_id); 561 device->cdev_id = busid_to_int(cdev->dev.bus_id);
554 cdev->handler = __tape_do_irq; 562 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id);
555 563 return ret;
556 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
557 sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
558
559 return 0;
560} 564}
561 565
562static inline void 566static inline void
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index f26a2ee3aad8..3cba6c9fab11 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -152,7 +152,6 @@ ccwgroup_create(struct device *root,
152 struct ccwgroup_device *gdev; 152 struct ccwgroup_device *gdev;
153 int i; 153 int i;
154 int rc; 154 int rc;
155 int del_drvdata;
156 155
157 if (argc > 256) /* disallow dumb users */ 156 if (argc > 256) /* disallow dumb users */
158 return -EINVAL; 157 return -EINVAL;
@@ -163,7 +162,6 @@ ccwgroup_create(struct device *root,
163 162
164 atomic_set(&gdev->onoff, 0); 163 atomic_set(&gdev->onoff, 0);
165 164
166 del_drvdata = 0;
167 for (i = 0; i < argc; i++) { 165 for (i = 0; i < argc; i++) {
168 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 166 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
169 167
@@ -180,10 +178,8 @@ ccwgroup_create(struct device *root,
180 rc = -EINVAL; 178 rc = -EINVAL;
181 goto free_dev; 179 goto free_dev;
182 } 180 }
183 }
184 for (i = 0; i < argc; i++)
185 gdev->cdev[i]->dev.driver_data = gdev; 181 gdev->cdev[i]->dev.driver_data = gdev;
186 del_drvdata = 1; 182 }
187 183
188 gdev->creator_id = creator_id; 184 gdev->creator_id = creator_id;
189 gdev->count = argc; 185 gdev->count = argc;
@@ -226,9 +222,9 @@ error:
226free_dev: 222free_dev:
227 for (i = 0; i < argc; i++) 223 for (i = 0; i < argc; i++)
228 if (gdev->cdev[i]) { 224 if (gdev->cdev[i]) {
229 put_device(&gdev->cdev[i]->dev); 225 if (gdev->cdev[i]->dev.driver_data == gdev)
230 if (del_drvdata)
231 gdev->cdev[i]->dev.driver_data = NULL; 226 gdev->cdev[i]->dev.driver_data = NULL;
227 put_device(&gdev->cdev[i]->dev);
232 } 228 }
233 kfree(gdev); 229 kfree(gdev);
234 return rc; 230 return rc;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 0df3af1f08de..828b2d334f0a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1068,6 +1068,7 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr,
1068 if (count) { 1068 if (count) {
1069 interval = cmb_data->last_update - 1069 interval = cmb_data->last_update -
1070 cdev->private->cmb_start_time; 1070 cdev->private->cmb_start_time;
1071 interval = (interval * 1000) >> 12;
1071 interval /= count; 1072 interval /= count;
1072 } else 1073 } else
1073 interval = -1; 1074 interval = -1;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ac6e0c7e43d9..7a39e0b0386c 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
152 if (cdev->private->iretry) { 152 if (cdev->private->iretry) {
153 cdev->private->iretry--; 153 cdev->private->iretry--;
154 ret = cio_halt(sch); 154 ret = cio_halt(sch);
155 return (ret == 0) ? -EBUSY : ret; 155 if (ret != -EBUSY)
156 return (ret == 0) ? -EBUSY : ret;
156 } 157 }
157 /* halt io unsuccessful. */ 158 /* halt io unsuccessful. */
158 cdev->private->iretry = 255; /* 255 clear retries. */ 159 cdev->private->iretry = 255; /* 255 clear retries. */
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 20c8eb16f464..8a4b58120146 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -2686,9 +2686,17 @@ static struct attribute_group ctc_attr_group = {
2686static int 2686static int
2687ctc_add_attributes(struct device *dev) 2687ctc_add_attributes(struct device *dev)
2688{ 2688{
2689 device_create_file(dev, &dev_attr_loglevel); 2689 int rc;
2690 device_create_file(dev, &dev_attr_stats); 2690
2691 return 0; 2691 rc = device_create_file(dev, &dev_attr_loglevel);
2692 if (rc)
2693 goto out;
2694 rc = device_create_file(dev, &dev_attr_stats);
2695 if (!rc)
2696 goto out;
2697 device_remove_file(dev, &dev_attr_loglevel);
2698out:
2699 return rc;
2692} 2700}
2693 2701
2694static void 2702static void
@@ -2901,7 +2909,12 @@ ctc_new_device(struct ccwgroup_device *cgdev)
2901 goto out; 2909 goto out;
2902 } 2910 }
2903 2911
2904 ctc_add_attributes(&cgdev->dev); 2912 if (ctc_add_attributes(&cgdev->dev)) {
2913 ctc_netdev_unregister(dev);
2914 dev->priv = NULL;
2915 ctc_free_netdevice(dev, 1);
2916 goto out;
2917 }
2905 2918
2906 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name)); 2919 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2907 2920
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 103c41470bd2..5fff1f93973a 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -8451,10 +8451,11 @@ __qeth_reboot_event_card(struct device *dev, void *data)
8451static int 8451static int
8452qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) 8452qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
8453{ 8453{
8454 int ret;
8454 8455
8455 driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL, 8456 ret = driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL,
8456 __qeth_reboot_event_card); 8457 __qeth_reboot_event_card);
8457 return NOTIFY_DONE; 8458 return ret ? NOTIFY_BAD : NOTIFY_DONE;
8458} 8459}
8459 8460
8460 8461
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 16b59773c0bb..935952ef88f1 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -233,7 +233,7 @@ static void __init build_one_sbus(struct device_node *dp, int num_sbus)
233 sbus->ofdev.node = dp; 233 sbus->ofdev.node = dp;
234 sbus->ofdev.dev.parent = NULL; 234 sbus->ofdev.dev.parent = NULL;
235 sbus->ofdev.dev.bus = &sbus_bus_type; 235 sbus->ofdev.dev.bus = &sbus_bus_type;
236 strcpy(sbus->ofdev.dev.bus_id, dp->path_component_name); 236 sprintf(sbus->ofdev.dev.bus_id, "sbus%d", num_sbus);
237 237
238 if (of_device_register(&sbus->ofdev) != 0) 238 if (of_device_register(&sbus->ofdev) != 0)
239 printk(KERN_DEBUG "sbus: device registration error for %s!\n", 239 printk(KERN_DEBUG "sbus: device registration error for %s!\n",
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c
index c690c2b89e41..acf292736b4e 100644
--- a/drivers/scsi/53c7xx.c
+++ b/drivers/scsi/53c7xx.c
@@ -3451,12 +3451,12 @@ create_cmd (Scsi_Cmnd *cmd) {
3451 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4, 3451 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
3452 cmd_dataout += 4, ++i) { 3452 cmd_dataout += 4, ++i) {
3453 u32 vbuf = cmd->use_sg 3453 u32 vbuf = cmd->use_sg
3454 ? (u32)page_address(((struct scatterlist *)cmd->buffer)[i].page)+ 3454 ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+
3455 ((struct scatterlist *)cmd->buffer)[i].offset 3455 ((struct scatterlist *)cmd->request_buffer)[i].offset
3456 : (u32)(cmd->request_buffer); 3456 : (u32)(cmd->request_buffer);
3457 u32 bbuf = virt_to_bus((void *)vbuf); 3457 u32 bbuf = virt_to_bus((void *)vbuf);
3458 u32 count = cmd->use_sg ? 3458 u32 count = cmd->use_sg ?
3459 ((struct scatterlist *)cmd->buffer)[i].length : 3459 ((struct scatterlist *)cmd->request_buffer)[i].length :
3460 cmd->request_bufflen; 3460 cmd->request_bufflen;
3461 3461
3462 /* 3462 /*
@@ -5417,7 +5417,7 @@ insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
5417 5417
5418 if ((buffers = cmd->use_sg)) { 5418 if ((buffers = cmd->use_sg)) {
5419 for (offset = 0, 5419 for (offset = 0,
5420 segment = (struct scatterlist *) cmd->buffer; 5420 segment = (struct scatterlist *) cmd->request_buffer;
5421 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) && 5421 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) &&
5422 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length))))); 5422 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length)))));
5423 --buffers, offset += segment->length, ++segment) 5423 --buffers, offset += segment->length, ++segment)
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 8a4659e94105..bdc6bb262bce 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -911,7 +911,7 @@ static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
911 sp->SCp.ptr = 911 sp->SCp.ptr =
912 (char *) virt_to_phys(sp->request_buffer); 912 (char *) virt_to_phys(sp->request_buffer);
913 } else { 913 } else {
914 sp->SCp.buffer = (struct scatterlist *) sp->buffer; 914 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
915 sp->SCp.buffers_residual = sp->use_sg - 1; 915 sp->SCp.buffers_residual = sp->use_sg - 1;
916 sp->SCp.this_residual = sp->SCp.buffer->length; 916 sp->SCp.this_residual = sp->SCp.buffer->length;
917 if (esp->dma_mmu_get_scsi_sgl) 917 if (esp->dma_mmu_get_scsi_sgl)
@@ -2152,29 +2152,23 @@ static int esp_do_data_finale(struct NCR_ESP *esp,
2152 */ 2152 */
2153static int esp_should_clear_sync(Scsi_Cmnd *sp) 2153static int esp_should_clear_sync(Scsi_Cmnd *sp)
2154{ 2154{
2155 unchar cmd1 = sp->cmnd[0]; 2155 unchar cmd = sp->cmnd[0];
2156 unchar cmd2 = sp->data_cmnd[0];
2157 2156
2158 /* These cases are for spinning up a disk and 2157 /* These cases are for spinning up a disk and
2159 * waiting for that spinup to complete. 2158 * waiting for that spinup to complete.
2160 */ 2159 */
2161 if(cmd1 == START_STOP || 2160 if(cmd == START_STOP)
2162 cmd2 == START_STOP)
2163 return 0; 2161 return 0;
2164 2162
2165 if(cmd1 == TEST_UNIT_READY || 2163 if(cmd == TEST_UNIT_READY)
2166 cmd2 == TEST_UNIT_READY)
2167 return 0; 2164 return 0;
2168 2165
2169 /* One more special case for SCSI tape drives, 2166 /* One more special case for SCSI tape drives,
2170 * this is what is used to probe the device for 2167 * this is what is used to probe the device for
2171 * completion of a rewind or tape load operation. 2168 * completion of a rewind or tape load operation.
2172 */ 2169 */
2173 if(sp->device->type == TYPE_TAPE) { 2170 if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE)
2174 if(cmd1 == MODE_SENSE || 2171 return 0;
2175 cmd2 == MODE_SENSE)
2176 return 0;
2177 }
2178 2172
2179 return 1; 2173 return 1;
2180} 2174}
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index a06f547e87f7..d05681f9d81a 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -114,7 +114,7 @@ MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
114MODULE_LICENSE("GPL"); 114MODULE_LICENSE("GPL");
115module_param(NCR_D700, charp, 0); 115module_param(NCR_D700, charp, 0);
116 116
117static __u8 __initdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = 117static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =
118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 }; 118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
119 119
120#ifdef MODULE 120#ifdef MODULE
@@ -173,7 +173,7 @@ struct NCR_D700_private {
173 char pad; 173 char pad;
174}; 174};
175 175
176static int 176static int __devinit
177NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, 177NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
178 int slot, u32 region, int differential) 178 int slot, u32 region, int differential)
179{ 179{
@@ -243,7 +243,7 @@ NCR_D700_intr(int irq, void *data, struct pt_regs *regs)
243 * essentially connectecd to the MCA bus independently, it is easier 243 * essentially connectecd to the MCA bus independently, it is easier
244 * to set them up as two separate host adapters, rather than one 244 * to set them up as two separate host adapters, rather than one
245 * adapter with two channels */ 245 * adapter with two channels */
246static int 246static int __devinit
247NCR_D700_probe(struct device *dev) 247NCR_D700_probe(struct device *dev)
248{ 248{
249 struct NCR_D700_private *p; 249 struct NCR_D700_private *p;
@@ -329,7 +329,7 @@ NCR_D700_probe(struct device *dev)
329 for (i = 0; i < 2; i++) { 329 for (i = 0; i < 2; i++) {
330 int err; 330 int err;
331 331
332 if ((err = NCR_D700_probe_one(p, i, slot, irq, 332 if ((err = NCR_D700_probe_one(p, i, irq, slot,
333 offset_addr + (0x80 * i), 333 offset_addr + (0x80 * i),
334 differential)) != 0) 334 differential)) != 0)
335 printk("D700: SIOP%d: probe failed, error = %d\n", 335 printk("D700: SIOP%d: probe failed, error = %d\n",
@@ -349,7 +349,7 @@ NCR_D700_probe(struct device *dev)
349 return 0; 349 return 0;
350} 350}
351 351
352static void 352static void __devexit
353NCR_D700_remove_one(struct Scsi_Host *host) 353NCR_D700_remove_one(struct Scsi_Host *host)
354{ 354{
355 scsi_remove_host(host); 355 scsi_remove_host(host);
@@ -359,7 +359,7 @@ NCR_D700_remove_one(struct Scsi_Host *host)
359 release_region(host->base, 64); 359 release_region(host->base, 64);
360} 360}
361 361
362static int 362static int __devexit
363NCR_D700_remove(struct device *dev) 363NCR_D700_remove(struct device *dev)
364{ 364{
365 struct NCR_D700_private *p = dev_get_drvdata(dev); 365 struct NCR_D700_private *p = dev_get_drvdata(dev);
@@ -380,7 +380,7 @@ static struct mca_driver NCR_D700_driver = {
380 .name = "NCR_D700", 380 .name = "NCR_D700",
381 .bus = &mca_bus_type, 381 .bus = &mca_bus_type,
382 .probe = NCR_D700_probe, 382 .probe = NCR_D700_probe,
383 .remove = NCR_D700_remove, 383 .remove = __devexit_p(NCR_D700_remove),
384 }, 384 },
385}; 385};
386 386
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 36e63f82d9f8..f974869ea323 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -551,6 +551,11 @@ struct aha152x_hostdata {
551struct aha152x_scdata { 551struct aha152x_scdata {
552 Scsi_Cmnd *next; /* next sc in queue */ 552 Scsi_Cmnd *next; /* next sc in queue */
553 struct semaphore *sem; /* semaphore to block on */ 553 struct semaphore *sem; /* semaphore to block on */
554 unsigned char cmd_len;
555 unsigned char cmnd[MAX_COMMAND_SIZE];
556 unsigned short use_sg;
557 unsigned request_bufflen;
558 void *request_buffer;
554}; 559};
555 560
556 561
@@ -1006,11 +1011,20 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p
1006 return FAILED; 1011 return FAILED;
1007 } 1012 }
1008 } else { 1013 } else {
1014 struct aha152x_scdata *sc;
1015
1009 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); 1016 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
1010 if(SCpnt->host_scribble==0) { 1017 if(SCpnt->host_scribble==0) {
1011 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); 1018 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt));
1012 return FAILED; 1019 return FAILED;
1013 } 1020 }
1021
1022 sc = SCDATA(SCpnt);
1023 memcpy(sc->cmnd, SCpnt->cmnd, sizeof(sc->cmnd));
1024 sc->request_buffer = SCpnt->request_buffer;
1025 sc->request_bufflen = SCpnt->request_bufflen;
1026 sc->use_sg = SCpnt->use_sg;
1027 sc->cmd_len = SCpnt->cmd_len;
1014 } 1028 }
1015 1029
1016 SCNEXT(SCpnt) = NULL; 1030 SCNEXT(SCpnt) = NULL;
@@ -1165,6 +1179,10 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1165 DECLARE_MUTEX_LOCKED(sem); 1179 DECLARE_MUTEX_LOCKED(sem);
1166 struct timer_list timer; 1180 struct timer_list timer;
1167 int ret, issued, disconnected; 1181 int ret, issued, disconnected;
1182 unsigned char old_cmd_len = SCpnt->cmd_len;
1183 unsigned short old_use_sg = SCpnt->use_sg;
1184 void *old_buffer = SCpnt->request_buffer;
1185 unsigned old_bufflen = SCpnt->request_bufflen;
1168 unsigned long flags; 1186 unsigned long flags;
1169 1187
1170#if defined(AHA152X_DEBUG) 1188#if defined(AHA152X_DEBUG)
@@ -1198,11 +1216,11 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1198 add_timer(&timer); 1216 add_timer(&timer);
1199 down(&sem); 1217 down(&sem);
1200 del_timer(&timer); 1218 del_timer(&timer);
1201 1219
1202 SCpnt->cmd_len = SCpnt->old_cmd_len; 1220 SCpnt->cmd_len = old_cmd_len;
1203 SCpnt->use_sg = SCpnt->old_use_sg; 1221 SCpnt->use_sg = old_use_sg;
1204 SCpnt->request_buffer = SCpnt->buffer; 1222 SCpnt->request_buffer = old_buffer;
1205 SCpnt->request_bufflen = SCpnt->bufflen; 1223 SCpnt->request_bufflen = old_bufflen;
1206 1224
1207 DO_LOCK(flags); 1225 DO_LOCK(flags);
1208 1226
@@ -1565,6 +1583,9 @@ static void busfree_run(struct Scsi_Host *shpnt)
1565#endif 1583#endif
1566 1584
1567 if(DONE_SC->SCp.phase & check_condition) { 1585 if(DONE_SC->SCp.phase & check_condition) {
1586 struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
1587 struct aha152x_scdata *sc = SCDATA(cmd);
1588
1568#if 0 1589#if 0
1569 if(HOSTDATA(shpnt)->debug & debug_eh) { 1590 if(HOSTDATA(shpnt)->debug & debug_eh) {
1570 printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC)); 1591 printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC));
@@ -1573,13 +1594,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
1573#endif 1594#endif
1574 1595
1575 /* restore old command */ 1596 /* restore old command */
1576 memcpy((void *) DONE_SC->cmnd, (void *) DONE_SC->data_cmnd, sizeof(DONE_SC->data_cmnd)); 1597 memcpy(cmd->cmnd, sc->cmnd, sizeof(sc->cmnd));
1577 DONE_SC->request_buffer = DONE_SC->buffer; 1598 cmd->request_buffer = sc->request_buffer;
1578 DONE_SC->request_bufflen = DONE_SC->bufflen; 1599 cmd->request_bufflen = sc->request_bufflen;
1579 DONE_SC->use_sg = DONE_SC->old_use_sg; 1600 cmd->use_sg = sc->use_sg;
1580 DONE_SC->cmd_len = DONE_SC->old_cmd_len; 1601 cmd->cmd_len = sc->cmd_len;
1581 1602
1582 DONE_SC->SCp.Status = 0x02; 1603 cmd->SCp.Status = 0x02;
1583 1604
1584 HOSTDATA(shpnt)->commands--; 1605 HOSTDATA(shpnt)->commands--;
1585 if (!HOSTDATA(shpnt)->commands) 1606 if (!HOSTDATA(shpnt)->commands)
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index a1e8ca758594..653818d2f802 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -7289,7 +7289,7 @@ ahd_reset_cmds_pending(struct ahd_softc *ahd)
7289 ahd->flags &= ~AHD_UPDATE_PEND_CMDS; 7289 ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
7290} 7290}
7291 7291
7292void 7292static void
7293ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) 7293ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status)
7294{ 7294{
7295 cam_status ostat; 7295 cam_status ostat;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index b244c7124179..998999c0a972 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -243,25 +243,6 @@ ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
243static uint32_t aic79xx_no_reset; 243static uint32_t aic79xx_no_reset;
244 244
245/* 245/*
246 * Certain PCI motherboards will scan PCI devices from highest to lowest,
247 * others scan from lowest to highest, and they tend to do all kinds of
248 * strange things when they come into contact with PCI bridge chips. The
249 * net result of all this is that the PCI card that is actually used to boot
250 * the machine is very hard to detect. Most motherboards go from lowest
251 * PCI slot number to highest, and the first SCSI controller found is the
252 * one you boot from. The only exceptions to this are when a controller
253 * has its BIOS disabled. So, we by default sort all of our SCSI controllers
254 * from lowest PCI slot number to highest PCI slot number. We also force
255 * all controllers with their BIOS disabled to the end of the list. This
256 * works on *almost* all computers. Where it doesn't work, we have this
257 * option. Setting this option to non-0 will reverse the order of the sort
258 * to highest first, then lowest, but will still leave cards with their BIOS
259 * disabled at the very end. That should fix everyone up unless there are
260 * really strange cirumstances.
261 */
262static uint32_t aic79xx_reverse_scan;
263
264/*
265 * Should we force EXTENDED translation on a controller. 246 * Should we force EXTENDED translation on a controller.
266 * 0 == Use whatever is in the SEEPROM or default to off 247 * 0 == Use whatever is in the SEEPROM or default to off
267 * 1 == Use whatever is in the SEEPROM or default to on 248 * 1 == Use whatever is in the SEEPROM or default to on
@@ -350,7 +331,6 @@ MODULE_PARM_DESC(aic79xx,
350" periodically to prevent tag starvation.\n" 331" periodically to prevent tag starvation.\n"
351" This may be required by some older disk\n" 332" This may be required by some older disk\n"
352" or drives/RAID arrays.\n" 333" or drives/RAID arrays.\n"
353" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
354" tag_info:<tag_str> Set per-target tag depth\n" 334" tag_info:<tag_str> Set per-target tag depth\n"
355" global_tag_depth:<int> Global tag depth for all targets on all buses\n" 335" global_tag_depth:<int> Global tag depth for all targets on all buses\n"
356" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" 336" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
@@ -1031,7 +1011,6 @@ aic79xx_setup(char *s)
1031#ifdef AHD_DEBUG 1011#ifdef AHD_DEBUG
1032 { "debug", &ahd_debug }, 1012 { "debug", &ahd_debug },
1033#endif 1013#endif
1034 { "reverse_scan", &aic79xx_reverse_scan },
1035 { "periodic_otag", &aic79xx_periodic_otag }, 1014 { "periodic_otag", &aic79xx_periodic_otag },
1036 { "pci_parity", &aic79xx_pci_parity }, 1015 { "pci_parity", &aic79xx_pci_parity },
1037 { "seltime", &aic79xx_seltime }, 1016 { "seltime", &aic79xx_seltime },
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index debf3e2a0798..aa4be8a31415 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -353,7 +353,6 @@ MODULE_PARM_DESC(aic7xxx,
353" periodically to prevent tag starvation.\n" 353" periodically to prevent tag starvation.\n"
354" This may be required by some older disk\n" 354" This may be required by some older disk\n"
355" drives or RAID arrays.\n" 355" drives or RAID arrays.\n"
356" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
357" tag_info:<tag_str> Set per-target tag depth\n" 356" tag_info:<tag_str> Set per-target tag depth\n"
358" global_tag_depth:<int> Global tag depth for every target\n" 357" global_tag_depth:<int> Global tag depth for every target\n"
359" on every bus\n" 358" on every bus\n"
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 3e1053f111dc..4cf7afc31cc7 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2427,7 +2427,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt)
2427 info->stats.aborts += 1; 2427 info->stats.aborts += 1;
2428 2428
2429 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); 2429 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no);
2430 __scsi_print_command(SCpnt->data_cmnd); 2430 __scsi_print_command(SCpnt->cmnd);
2431 2431
2432 print_debug_list(); 2432 print_debug_list();
2433 fas216_dumpstate(info); 2433 fas216_dumpstate(info);
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 94b1261a259d..19745a31072b 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -105,9 +105,6 @@ enum {
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */ 105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ 106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ 107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
109 /* ICH6/7 use different scheme for map value */
110 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
111 108
112 /* combined mode. if set, PATA is channel 0. 109 /* combined mode. if set, PATA is channel 0.
113 * if clear, PATA is channel 1. 110 * if clear, PATA is channel 1.
@@ -126,6 +123,7 @@ enum {
126 ich6_sata = 4, 123 ich6_sata = 4,
127 ich6_sata_ahci = 5, 124 ich6_sata_ahci = 5,
128 ich6m_sata_ahci = 6, 125 ich6m_sata_ahci = 6,
126 ich8_sata_ahci = 7,
129 127
130 /* constants for mapping table */ 128 /* constants for mapping table */
131 P0 = 0, /* port 0 */ 129 P0 = 0, /* port 0 */
@@ -141,11 +139,19 @@ enum {
141 139
142struct piix_map_db { 140struct piix_map_db {
143 const u32 mask; 141 const u32 mask;
142 const u16 port_enable;
143 const int present_shift;
144 const int map[][4]; 144 const int map[][4];
145}; 145};
146 146
147struct piix_host_priv {
148 const int *map;
149 const struct piix_map_db *map_db;
150};
151
147static int piix_init_one (struct pci_dev *pdev, 152static int piix_init_one (struct pci_dev *pdev,
148 const struct pci_device_id *ent); 153 const struct pci_device_id *ent);
154static void piix_host_stop(struct ata_host_set *host_set);
149static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 155static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
150static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 156static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
151static void piix_pata_error_handler(struct ata_port *ap); 157static void piix_pata_error_handler(struct ata_port *ap);
@@ -186,11 +192,11 @@ static const struct pci_device_id piix_pci_tbl[] = {
186 /* Enterprise Southbridge 2 (where's the datasheet?) */ 192 /* Enterprise Southbridge 2 (where's the datasheet?) */
187 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 193 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
188 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */ 194 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
189 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 195 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
190 /* SATA Controller 2 IDE (ICH8, ditto) */ 196 /* SATA Controller 2 IDE (ICH8, ditto) */
191 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 197 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
192 /* Mobile SATA Controller IDE (ICH8M, ditto) */ 198 /* Mobile SATA Controller IDE (ICH8M, ditto) */
193 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, 199 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
194 200
195 { } /* terminate list */ 201 { } /* terminate list */
196}; 202};
@@ -254,7 +260,7 @@ static const struct ata_port_operations piix_pata_ops = {
254 260
255 .port_start = ata_port_start, 261 .port_start = ata_port_start,
256 .port_stop = ata_port_stop, 262 .port_stop = ata_port_stop,
257 .host_stop = ata_host_stop, 263 .host_stop = piix_host_stop,
258}; 264};
259 265
260static const struct ata_port_operations piix_sata_ops = { 266static const struct ata_port_operations piix_sata_ops = {
@@ -284,11 +290,13 @@ static const struct ata_port_operations piix_sata_ops = {
284 290
285 .port_start = ata_port_start, 291 .port_start = ata_port_start,
286 .port_stop = ata_port_stop, 292 .port_stop = ata_port_stop,
287 .host_stop = ata_host_stop, 293 .host_stop = piix_host_stop,
288}; 294};
289 295
290static struct piix_map_db ich5_map_db = { 296static const struct piix_map_db ich5_map_db = {
291 .mask = 0x7, 297 .mask = 0x7,
298 .port_enable = 0x3,
299 .present_shift = 4,
292 .map = { 300 .map = {
293 /* PM PS SM SS MAP */ 301 /* PM PS SM SS MAP */
294 { P0, NA, P1, NA }, /* 000b */ 302 { P0, NA, P1, NA }, /* 000b */
@@ -302,8 +310,10 @@ static struct piix_map_db ich5_map_db = {
302 }, 310 },
303}; 311};
304 312
305static struct piix_map_db ich6_map_db = { 313static const struct piix_map_db ich6_map_db = {
306 .mask = 0x3, 314 .mask = 0x3,
315 .port_enable = 0xf,
316 .present_shift = 4,
307 .map = { 317 .map = {
308 /* PM PS SM SS MAP */ 318 /* PM PS SM SS MAP */
309 { P0, P2, P1, P3 }, /* 00b */ 319 { P0, P2, P1, P3 }, /* 00b */
@@ -313,8 +323,10 @@ static struct piix_map_db ich6_map_db = {
313 }, 323 },
314}; 324};
315 325
316static struct piix_map_db ich6m_map_db = { 326static const struct piix_map_db ich6m_map_db = {
317 .mask = 0x3, 327 .mask = 0x3,
328 .port_enable = 0x5,
329 .present_shift = 4,
318 .map = { 330 .map = {
319 /* PM PS SM SS MAP */ 331 /* PM PS SM SS MAP */
320 { P0, P2, RV, RV }, /* 00b */ 332 { P0, P2, RV, RV }, /* 00b */
@@ -324,6 +336,28 @@ static struct piix_map_db ich6m_map_db = {
324 }, 336 },
325}; 337};
326 338
339static const struct piix_map_db ich8_map_db = {
340 .mask = 0x3,
341 .port_enable = 0x3,
342 .present_shift = 8,
343 .map = {
344 /* PM PS SM SS MAP */
345 { P0, NA, P1, NA }, /* 00b (hardwired) */
346 { RV, RV, RV, RV },
347 { RV, RV, RV, RV }, /* 10b (never) */
348 { RV, RV, RV, RV },
349 },
350};
351
352static const struct piix_map_db *piix_map_db_table[] = {
353 [ich5_sata] = &ich5_map_db,
354 [esb_sata] = &ich5_map_db,
355 [ich6_sata] = &ich6_map_db,
356 [ich6_sata_ahci] = &ich6_map_db,
357 [ich6m_sata_ahci] = &ich6m_map_db,
358 [ich8_sata_ahci] = &ich8_map_db,
359};
360
327static struct ata_port_info piix_port_info[] = { 361static struct ata_port_info piix_port_info[] = {
328 /* piix4_pata */ 362 /* piix4_pata */
329 { 363 {
@@ -356,63 +390,69 @@ static struct ata_port_info piix_port_info[] = {
356 /* ich5_sata */ 390 /* ich5_sata */
357 { 391 {
358 .sht = &piix_sht, 392 .sht = &piix_sht,
359 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | 393 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
360 PIIX_FLAG_CHECKINTR,
361 .pio_mask = 0x1f, /* pio0-4 */ 394 .pio_mask = 0x1f, /* pio0-4 */
362 .mwdma_mask = 0x07, /* mwdma0-2 */ 395 .mwdma_mask = 0x07, /* mwdma0-2 */
363 .udma_mask = 0x7f, /* udma0-6 */ 396 .udma_mask = 0x7f, /* udma0-6 */
364 .port_ops = &piix_sata_ops, 397 .port_ops = &piix_sata_ops,
365 .private_data = &ich5_map_db,
366 }, 398 },
367 399
368 /* i6300esb_sata */ 400 /* i6300esb_sata */
369 { 401 {
370 .sht = &piix_sht, 402 .sht = &piix_sht,
371 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | 403 .host_flags = ATA_FLAG_SATA |
372 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS, 404 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
373 .pio_mask = 0x1f, /* pio0-4 */ 405 .pio_mask = 0x1f, /* pio0-4 */
374 .mwdma_mask = 0x07, /* mwdma0-2 */ 406 .mwdma_mask = 0x07, /* mwdma0-2 */
375 .udma_mask = 0x7f, /* udma0-6 */ 407 .udma_mask = 0x7f, /* udma0-6 */
376 .port_ops = &piix_sata_ops, 408 .port_ops = &piix_sata_ops,
377 .private_data = &ich5_map_db,
378 }, 409 },
379 410
380 /* ich6_sata */ 411 /* ich6_sata */
381 { 412 {
382 .sht = &piix_sht, 413 .sht = &piix_sht,
383 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | 414 .host_flags = ATA_FLAG_SATA |
384 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR, 415 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
385 .pio_mask = 0x1f, /* pio0-4 */ 416 .pio_mask = 0x1f, /* pio0-4 */
386 .mwdma_mask = 0x07, /* mwdma0-2 */ 417 .mwdma_mask = 0x07, /* mwdma0-2 */
387 .udma_mask = 0x7f, /* udma0-6 */ 418 .udma_mask = 0x7f, /* udma0-6 */
388 .port_ops = &piix_sata_ops, 419 .port_ops = &piix_sata_ops,
389 .private_data = &ich6_map_db,
390 }, 420 },
391 421
392 /* ich6_sata_ahci */ 422 /* ich6_sata_ahci */
393 { 423 {
394 .sht = &piix_sht, 424 .sht = &piix_sht,
395 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | 425 .host_flags = ATA_FLAG_SATA |
396 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 426 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
397 PIIX_FLAG_AHCI, 427 PIIX_FLAG_AHCI,
398 .pio_mask = 0x1f, /* pio0-4 */ 428 .pio_mask = 0x1f, /* pio0-4 */
399 .mwdma_mask = 0x07, /* mwdma0-2 */ 429 .mwdma_mask = 0x07, /* mwdma0-2 */
400 .udma_mask = 0x7f, /* udma0-6 */ 430 .udma_mask = 0x7f, /* udma0-6 */
401 .port_ops = &piix_sata_ops, 431 .port_ops = &piix_sata_ops,
402 .private_data = &ich6_map_db,
403 }, 432 },
404 433
405 /* ich6m_sata_ahci */ 434 /* ich6m_sata_ahci */
406 { 435 {
407 .sht = &piix_sht, 436 .sht = &piix_sht,
408 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | 437 .host_flags = ATA_FLAG_SATA |
438 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
439 PIIX_FLAG_AHCI,
440 .pio_mask = 0x1f, /* pio0-4 */
441 .mwdma_mask = 0x07, /* mwdma0-2 */
442 .udma_mask = 0x7f, /* udma0-6 */
443 .port_ops = &piix_sata_ops,
444 },
445
446 /* ich8_sata_ahci */
447 {
448 .sht = &piix_sht,
449 .host_flags = ATA_FLAG_SATA |
409 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 450 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
410 PIIX_FLAG_AHCI, 451 PIIX_FLAG_AHCI,
411 .pio_mask = 0x1f, /* pio0-4 */ 452 .pio_mask = 0x1f, /* pio0-4 */
412 .mwdma_mask = 0x07, /* mwdma0-2 */ 453 .mwdma_mask = 0x07, /* mwdma0-2 */
413 .udma_mask = 0x7f, /* udma0-6 */ 454 .udma_mask = 0x7f, /* udma0-6 */
414 .port_ops = &piix_sata_ops, 455 .port_ops = &piix_sata_ops,
415 .private_data = &ich6m_map_db,
416 }, 456 },
417}; 457};
418 458
@@ -508,46 +548,29 @@ static void piix_pata_error_handler(struct ata_port *ap)
508static int piix_sata_prereset(struct ata_port *ap) 548static int piix_sata_prereset(struct ata_port *ap)
509{ 549{
510 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 550 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
511 const unsigned int *map = ap->host_set->private_data; 551 struct piix_host_priv *hpriv = ap->host_set->private_data;
552 const unsigned int *map = hpriv->map;
512 int base = 2 * ap->hard_port_no; 553 int base = 2 * ap->hard_port_no;
513 unsigned int present_mask = 0; 554 unsigned int present = 0;
514 int port, i; 555 int port, i;
515 u8 pcs; 556 u16 pcs;
516 557
517 pci_read_config_byte(pdev, ICH5_PCS, &pcs); 558 pci_read_config_word(pdev, ICH5_PCS, &pcs);
518 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base); 559 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
519 560
520 /* enable all ports on this ap and wait for them to settle */
521 for (i = 0; i < 2; i++) {
522 port = map[base + i];
523 if (port >= 0)
524 pcs |= 1 << port;
525 }
526
527 pci_write_config_byte(pdev, ICH5_PCS, pcs);
528 msleep(100);
529
530 /* let's see which devices are present */
531 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
532
533 for (i = 0; i < 2; i++) { 561 for (i = 0; i < 2; i++) {
534 port = map[base + i]; 562 port = map[base + i];
535 if (port < 0) 563 if (port < 0)
536 continue; 564 continue;
537 if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port)) 565 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
538 present_mask |= 1 << i; 566 (pcs & 1 << (hpriv->map_db->present_shift + port)))
539 else 567 present = 1;
540 pcs &= ~(1 << port);
541 } 568 }
542 569
543 /* disable offline ports on non-AHCI controllers */
544 if (!(ap->flags & PIIX_FLAG_AHCI))
545 pci_write_config_byte(pdev, ICH5_PCS, pcs);
546
547 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", 570 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
548 ap->id, pcs, present_mask); 571 ap->id, pcs, present_mask);
549 572
550 if (!present_mask) { 573 if (!present) {
551 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); 574 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
552 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; 575 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
553 return 0; 576 return 0;
@@ -761,10 +784,27 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
761 return no_piix_dma; 784 return no_piix_dma;
762} 785}
763 786
787static void __devinit piix_init_pcs(struct pci_dev *pdev,
788 const struct piix_map_db *map_db)
789{
790 u16 pcs, new_pcs;
791
792 pci_read_config_word(pdev, ICH5_PCS, &pcs);
793
794 new_pcs = pcs | map_db->port_enable;
795
796 if (new_pcs != pcs) {
797 DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
798 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
799 msleep(150);
800 }
801}
802
764static void __devinit piix_init_sata_map(struct pci_dev *pdev, 803static void __devinit piix_init_sata_map(struct pci_dev *pdev,
765 struct ata_port_info *pinfo) 804 struct ata_port_info *pinfo,
805 const struct piix_map_db *map_db)
766{ 806{
767 struct piix_map_db *map_db = pinfo[0].private_data; 807 struct piix_host_priv *hpriv = pinfo[0].private_data;
768 const unsigned int *map; 808 const unsigned int *map;
769 int i, invalid_map = 0; 809 int i, invalid_map = 0;
770 u8 map_value; 810 u8 map_value;
@@ -805,8 +845,8 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
805 dev_printk(KERN_ERR, &pdev->dev, 845 dev_printk(KERN_ERR, &pdev->dev,
806 "invalid MAP value %u\n", map_value); 846 "invalid MAP value %u\n", map_value);
807 847
808 pinfo[0].private_data = (void *)map; 848 hpriv->map = map;
809 pinfo[1].private_data = (void *)map; 849 hpriv->map_db = map_db;
810} 850}
811 851
812/** 852/**
@@ -829,6 +869,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
829 static int printed_version; 869 static int printed_version;
830 struct ata_port_info port_info[2]; 870 struct ata_port_info port_info[2];
831 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; 871 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
872 struct piix_host_priv *hpriv;
832 unsigned long host_flags; 873 unsigned long host_flags;
833 874
834 if (!printed_version++) 875 if (!printed_version++)
@@ -839,8 +880,14 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
839 if (!in_module_init) 880 if (!in_module_init)
840 return -ENODEV; 881 return -ENODEV;
841 882
883 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
884 if (!hpriv)
885 return -ENOMEM;
886
842 port_info[0] = piix_port_info[ent->driver_data]; 887 port_info[0] = piix_port_info[ent->driver_data];
843 port_info[1] = piix_port_info[ent->driver_data]; 888 port_info[1] = piix_port_info[ent->driver_data];
889 port_info[0].private_data = hpriv;
890 port_info[1].private_data = hpriv;
844 891
845 host_flags = port_info[0].host_flags; 892 host_flags = port_info[0].host_flags;
846 893
@@ -855,8 +902,11 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
855 } 902 }
856 903
857 /* Initialize SATA map */ 904 /* Initialize SATA map */
858 if (host_flags & ATA_FLAG_SATA) 905 if (host_flags & ATA_FLAG_SATA) {
859 piix_init_sata_map(pdev, port_info); 906 piix_init_sata_map(pdev, port_info,
907 piix_map_db_table[ent->driver_data]);
908 piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]);
909 }
860 910
861 /* On ICH5, some BIOSen disable the interrupt using the 911 /* On ICH5, some BIOSen disable the interrupt using the
862 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. 912 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
@@ -879,6 +929,13 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
879 return ata_pci_init_one(pdev, ppinfo, 2); 929 return ata_pci_init_one(pdev, ppinfo, 2);
880} 930}
881 931
932static void piix_host_stop(struct ata_host_set *host_set)
933{
934 if (host_set->next == NULL)
935 kfree(host_set->private_data);
936 ata_host_stop(host_set);
937}
938
882static int __init piix_init(void) 939static int __init piix_init(void)
883{ 940{
884 int rc; 941 int rc;
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 007a14e5c3fd..e397129c90d1 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -507,7 +507,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
507 */ 507 */
508 508
509 if (cmd->use_sg) { 509 if (cmd->use_sg) {
510 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 510 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
511 cmd->SCp.buffers_residual = cmd->use_sg - 1; 511 cmd->SCp.buffers_residual = cmd->use_sg - 1;
512 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+ 512 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+
513 cmd->SCp.buffer->offset; 513 cmd->SCp.buffer->offset;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index dddd2acce76f..61f6024b61ba 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -5,6 +5,7 @@
5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) 5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002)
6 * by D. Gilbert and aeb (20020609) 6 * by D. Gilbert and aeb (20020609)
7 * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025 7 * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025
8 * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702
8 */ 9 */
9 10
10#include <linux/blkdev.h> 11#include <linux/blkdev.h>
@@ -36,55 +37,56 @@ static const char * cdb_byte0_names[] = {
36/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", 37/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense",
37/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, 38/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL,
38 "Reasssign Blocks", 39 "Reasssign Blocks",
39/* 08-0d */ "Read (6)", NULL, "Write (6)", "Seek (6)", NULL, NULL, 40/* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL,
40/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", 41/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
41/* 13-16 */ "Verify (6)", "Recover Buffered Data", "Mode Select (6)", 42/* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)",
42 "Reserve (6)", 43 "Reserve(6)",
43/* 17-1a */ "Release (6)", "Copy", "Erase", "Mode Sense (6)", 44/* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)",
44/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", 45/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic",
45/* 1e-1f */ "Prevent/Allow Medium Removal", NULL, 46/* 1e-1f */ "Prevent/Allow Medium Removal", NULL,
46/* 20-22 */ NULL, NULL, NULL, 47/* 20-22 */ NULL, NULL, NULL,
47/* 23-28 */ "Read Format Capacities", "Set Window", 48/* 23-28 */ "Read Format Capacities", "Set Window",
48 "Read Capacity (10)", NULL, NULL, "Read (10)", 49 "Read Capacity(10)", NULL, NULL, "Read(10)",
49/* 29-2d */ "Read Generation", "Write (10)", "Seek (10)", "Erase (10)", 50/* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)",
50 "Read updated block", 51 "Read updated block",
51/* 2e-31 */ "Write Verify (10)", "Verify (10)", "Search High", "Search Equal", 52/* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal",
52/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", 53/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position",
53/* 35-37 */ "Synchronize Cache (10)", "Lock/Unlock Cache (10)", 54/* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)",
54 "Read Defect Data(10)", 55 "Read Defect Data(10)",
55/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", 56/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer",
56 "Read Buffer", 57 "Read Buffer",
57/* 3d-3f */ "Update Block", "Read Long (10)", "Write Long (10)", 58/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)",
58/* 40-41 */ "Change Definition", "Write Same (10)", 59/* 40-41 */ "Change Definition", "Write Same(10)",
59/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", 60/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support",
60 "Play audio (10)", "Get configuration", "Play audio msf", 61 "Play audio(10)", "Get configuration", "Play audio msf",
61 "Play audio track/index", 62 "Play audio track/index",
62/* 49-4f */ "Play track relative (10)", "Get event status notification", 63/* 49-4f */ "Play track relative(10)", "Get event status notification",
63 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", 64 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
64 NULL, 65 NULL,
65/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", 66/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info",
66 "Reserve track", "Send OPC info", "Mode Select (10)", 67 "Reserve track", "Send OPC info", "Mode Select(10)",
67/* 56-5b */ "Reserve (10)", "Release (10)", "Repair track", "Read master cue", 68/* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue",
68 "Mode Sense (10)", "Close track/session", 69 "Mode Sense(10)", "Close track/session",
69/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", 70/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in",
70 "Persistent reserve out", 71 "Persistent reserve out",
71/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 72/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
72/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 73/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
73/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 74/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
74/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length", 75/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length",
75/* 80-84 */ "Xdwrite (16)", "Rebuild (16)", "Regenerate (16)", "Extended copy", 76/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy",
76 "Receive copy results", 77 "Receive copy results",
77/* 85-89 */ "Memory Export In (16)", "Access control in", "Access control out", 78/* 85-89 */ "ATA command pass through(16)", "Access control in",
78 "Read (16)", "Memory Export Out (16)", 79 "Access control out", "Read(16)", "Memory Export Out(16)",
79/* 8a-8f */ "Write (16)", NULL, "Read attributes", "Write attributes", 80/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes",
80 "Write and verify (16)", "Verify (16)", 81 "Write and verify(16)", "Verify(16)",
81/* 90-94 */ "Pre-fetch (16)", "Synchronize cache (16)", 82/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)",
82 "Lock/unlock cache (16)", "Write same (16)", NULL, 83 "Lock/unlock cache(16)", "Write same(16)", NULL,
83/* 95-99 */ NULL, NULL, NULL, NULL, NULL, 84/* 95-99 */ NULL, NULL, NULL, NULL, NULL,
84/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in (16)", 85/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in(16)",
85 "Service action out (16)", 86 "Service action out(16)",
86/* a0-a5 */ "Report luns", "Blank", "Send event", "Maintenance in", 87/* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank",
87 "Maintenance out", "Move medium/play audio(12)", 88 "Security protocol in", "Maintenance in", "Maintenance out",
89 "Move medium/play audio(12)",
88/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", 90/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)",
89 "Play track relative(12)", 91 "Play track relative(12)",
90/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", 92/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance",
@@ -92,12 +94,12 @@ static const char * cdb_byte0_names[] = {
92/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", 94/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)",
93/* b2-b4 */ "Search data low(12)", "Set limits(12)", 95/* b2-b4 */ "Search data low(12)", "Set limits(12)",
94 "Read element status attached", 96 "Read element status attached",
95/* b5-b6 */ "Request volume element address", "Send volume tag, set streaming", 97/* b5-b6 */ "Security protocol out", "Send volume tag, set streaming",
96/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", 98/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf",
97/* ba-bc */ "Redundancy group (in), Scan", 99/* ba-bc */ "Redundancy group (in), Scan",
98 "Redundancy group (out), Set cd-rom speed", "Spare in, Play cd", 100 "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd",
99/* bd-bf */ "Spare out, Mechanism status", "Volume set in, Read cd", 101/* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd",
100 "Volume set out, Send DVD structure", 102 "Volume set (out), Send DVD structure",
101}; 103};
102 104
103struct value_name_pair { 105struct value_name_pair {
@@ -112,6 +114,7 @@ static const struct value_name_pair maint_in_arr[] = {
112 {0xc, "Report supported operation codes"}, 114 {0xc, "Report supported operation codes"},
113 {0xd, "Report supported task management functions"}, 115 {0xd, "Report supported task management functions"},
114 {0xe, "Report priority"}, 116 {0xe, "Report priority"},
117 {0xf, "Report timestamp"},
115}; 118};
116#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) 119#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr)
117 120
@@ -120,6 +123,7 @@ static const struct value_name_pair maint_out_arr[] = {
120 {0xa, "Set target port groups"}, 123 {0xa, "Set target port groups"},
121 {0xb, "Change aliases"}, 124 {0xb, "Change aliases"},
122 {0xe, "Set priority"}, 125 {0xe, "Set priority"},
126 {0xe, "Set timestamp"},
123}; 127};
124#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) 128#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr)
125 129
@@ -427,6 +431,7 @@ static struct error_info additional[] =
427 {0x001A, "Rewind operation in progress"}, 431 {0x001A, "Rewind operation in progress"},
428 {0x001B, "Set capacity operation in progress"}, 432 {0x001B, "Set capacity operation in progress"},
429 {0x001C, "Verify operation in progress"}, 433 {0x001C, "Verify operation in progress"},
434 {0x001D, "ATA pass through information available"},
430 435
431 {0x0100, "No index/sector signal"}, 436 {0x0100, "No index/sector signal"},
432 437
@@ -438,7 +443,7 @@ static struct error_info additional[] =
438 443
439 {0x0400, "Logical unit not ready, cause not reportable"}, 444 {0x0400, "Logical unit not ready, cause not reportable"},
440 {0x0401, "Logical unit is in process of becoming ready"}, 445 {0x0401, "Logical unit is in process of becoming ready"},
441 {0x0402, "Logical unit not ready, initializing cmd. required"}, 446 {0x0402, "Logical unit not ready, initializing command required"},
442 {0x0403, "Logical unit not ready, manual intervention required"}, 447 {0x0403, "Logical unit not ready, manual intervention required"},
443 {0x0404, "Logical unit not ready, format in progress"}, 448 {0x0404, "Logical unit not ready, format in progress"},
444 {0x0405, "Logical unit not ready, rebuild in progress"}, 449 {0x0405, "Logical unit not ready, rebuild in progress"},
@@ -478,6 +483,9 @@ static struct error_info additional[] =
478 {0x0B00, "Warning"}, 483 {0x0B00, "Warning"},
479 {0x0B01, "Warning - specified temperature exceeded"}, 484 {0x0B01, "Warning - specified temperature exceeded"},
480 {0x0B02, "Warning - enclosure degraded"}, 485 {0x0B02, "Warning - enclosure degraded"},
486 {0x0B03, "Warning - background self-test failed"},
487 {0x0B04, "Warning - background pre-scan detected medium error"},
488 {0x0B05, "Warning - background medium scan detected medium error"},
481 489
482 {0x0C00, "Write error"}, 490 {0x0C00, "Write error"},
483 {0x0C01, "Write error - recovered with auto reallocation"}, 491 {0x0C01, "Write error - recovered with auto reallocation"},
@@ -493,6 +501,7 @@ static struct error_info additional[] =
493 {0x0C0B, "Auxiliary memory write error"}, 501 {0x0C0B, "Auxiliary memory write error"},
494 {0x0C0C, "Write error - unexpected unsolicited data"}, 502 {0x0C0C, "Write error - unexpected unsolicited data"},
495 {0x0C0D, "Write error - not enough unsolicited data"}, 503 {0x0C0D, "Write error - not enough unsolicited data"},
504 {0x0C0F, "Defects in error window"},
496 505
497 {0x0D00, "Error detected by third party temporary initiator"}, 506 {0x0D00, "Error detected by third party temporary initiator"},
498 {0x0D01, "Third party device failure"}, 507 {0x0D01, "Third party device failure"},
@@ -504,11 +513,12 @@ static struct error_info additional[] =
504 {0x0E00, "Invalid information unit"}, 513 {0x0E00, "Invalid information unit"},
505 {0x0E01, "Information unit too short"}, 514 {0x0E01, "Information unit too short"},
506 {0x0E02, "Information unit too long"}, 515 {0x0E02, "Information unit too long"},
516 {0x0E03, "Invalid field in command information unit"},
507 517
508 {0x1000, "Id CRC or ECC error"}, 518 {0x1000, "Id CRC or ECC error"},
509 {0x1001, "Data block guard check failed"}, 519 {0x1001, "Logical block guard check failed"},
510 {0x1002, "Data block application tag check failed"}, 520 {0x1002, "Logical block application tag check failed"},
511 {0x1003, "Data block reference tag check failed"}, 521 {0x1003, "Logical block reference tag check failed"},
512 522
513 {0x1100, "Unrecovered read error"}, 523 {0x1100, "Unrecovered read error"},
514 {0x1101, "Read retries exhausted"}, 524 {0x1101, "Read retries exhausted"},
@@ -530,6 +540,7 @@ static struct error_info additional[] =
530 {0x1111, "Read error - loss of streaming"}, 540 {0x1111, "Read error - loss of streaming"},
531 {0x1112, "Auxiliary memory read error"}, 541 {0x1112, "Auxiliary memory read error"},
532 {0x1113, "Read error - failed retransmission request"}, 542 {0x1113, "Read error - failed retransmission request"},
543 {0x1114, "Read error - lba marked bad by application client"},
533 544
534 {0x1200, "Address mark not found for id field"}, 545 {0x1200, "Address mark not found for id field"},
535 546
@@ -610,11 +621,14 @@ static struct error_info additional[] =
610 {0x2100, "Logical block address out of range"}, 621 {0x2100, "Logical block address out of range"},
611 {0x2101, "Invalid element address"}, 622 {0x2101, "Invalid element address"},
612 {0x2102, "Invalid address for write"}, 623 {0x2102, "Invalid address for write"},
624 {0x2103, "Invalid write crossing layer jump"},
613 625
614 {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, 626 {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"},
615 627
616 {0x2400, "Invalid field in cdb"}, 628 {0x2400, "Invalid field in cdb"},
617 {0x2401, "CDB decryption error"}, 629 {0x2401, "CDB decryption error"},
630 {0x2402, "Obsolete"},
631 {0x2403, "Obsolete"},
618 {0x2404, "Security audit value frozen"}, 632 {0x2404, "Security audit value frozen"},
619 {0x2405, "Security working key frozen"}, 633 {0x2405, "Security working key frozen"},
620 {0x2406, "Nonce not unique"}, 634 {0x2406, "Nonce not unique"},
@@ -637,7 +651,10 @@ static struct error_info additional[] =
637 {0x260C, "Invalid operation for copy source or destination"}, 651 {0x260C, "Invalid operation for copy source or destination"},
638 {0x260D, "Copy segment granularity violation"}, 652 {0x260D, "Copy segment granularity violation"},
639 {0x260E, "Invalid parameter while port is enabled"}, 653 {0x260E, "Invalid parameter while port is enabled"},
640 {0x260F, "Invalid data-out buffer integrity"}, 654 {0x260F, "Invalid data-out buffer integrity check value"},
655 {0x2610, "Data decryption key fail limit reached"},
656 {0x2611, "Incomplete key-associated data set"},
657 {0x2612, "Vendor specific key reference not found"},
641 658
642 {0x2700, "Write protected"}, 659 {0x2700, "Write protected"},
643 {0x2701, "Hardware write protected"}, 660 {0x2701, "Hardware write protected"},
@@ -649,6 +666,7 @@ static struct error_info additional[] =
649 666
650 {0x2800, "Not ready to ready change, medium may have changed"}, 667 {0x2800, "Not ready to ready change, medium may have changed"},
651 {0x2801, "Import or export element accessed"}, 668 {0x2801, "Import or export element accessed"},
669 {0x2802, "Format-layer may have changed"},
652 670
653 {0x2900, "Power on, reset, or bus device reset occurred"}, 671 {0x2900, "Power on, reset, or bus device reset occurred"},
654 {0x2901, "Power on occurred"}, 672 {0x2901, "Power on occurred"},
@@ -669,6 +687,11 @@ static struct error_info additional[] =
669 {0x2A07, "Implicit asymmetric access state transition failed"}, 687 {0x2A07, "Implicit asymmetric access state transition failed"},
670 {0x2A08, "Priority changed"}, 688 {0x2A08, "Priority changed"},
671 {0x2A09, "Capacity data has changed"}, 689 {0x2A09, "Capacity data has changed"},
690 {0x2A10, "Timestamp changed"},
691 {0x2A11, "Data encryption parameters changed by another i_t nexus"},
692 {0x2A12, "Data encryption parameters changed by vendor specific "
693 "event"},
694 {0x2A13, "Data encryption key instance counter has changed"},
672 695
673 {0x2B00, "Copy cannot execute since host cannot disconnect"}, 696 {0x2B00, "Copy cannot execute since host cannot disconnect"},
674 697
@@ -690,6 +713,7 @@ static struct error_info additional[] =
690 {0x2E00, "Insufficient time for operation"}, 713 {0x2E00, "Insufficient time for operation"},
691 714
692 {0x2F00, "Commands cleared by another initiator"}, 715 {0x2F00, "Commands cleared by another initiator"},
716 {0x2F01, "Commands cleared by power loss notification"},
693 717
694 {0x3000, "Incompatible medium installed"}, 718 {0x3000, "Incompatible medium installed"},
695 {0x3001, "Cannot read medium - unknown format"}, 719 {0x3001, "Cannot read medium - unknown format"},
@@ -702,7 +726,8 @@ static struct error_info additional[] =
702 {0x3008, "Cannot write - application code mismatch"}, 726 {0x3008, "Cannot write - application code mismatch"},
703 {0x3009, "Current session not fixated for append"}, 727 {0x3009, "Current session not fixated for append"},
704 {0x300A, "Cleaning request rejected"}, 728 {0x300A, "Cleaning request rejected"},
705 {0x300C, "WORM medium, overwrite attempted"}, 729 {0x300C, "WORM medium - overwrite attempted"},
730 {0x300D, "WORM medium - integrity check"},
706 {0x3010, "Medium not formatted"}, 731 {0x3010, "Medium not formatted"},
707 732
708 {0x3100, "Medium format corrupted"}, 733 {0x3100, "Medium format corrupted"},
@@ -790,6 +815,9 @@ static struct error_info additional[] =
790 {0x3F0F, "Echo buffer overwritten"}, 815 {0x3F0F, "Echo buffer overwritten"},
791 {0x3F10, "Medium loadable"}, 816 {0x3F10, "Medium loadable"},
792 {0x3F11, "Medium auxiliary memory accessible"}, 817 {0x3F11, "Medium auxiliary memory accessible"},
818 {0x3F12, "iSCSI IP address added"},
819 {0x3F13, "iSCSI IP address removed"},
820 {0x3F14, "iSCSI IP address changed"},
793/* 821/*
794 * {0x40NN, "Ram failure"}, 822 * {0x40NN, "Ram failure"},
795 * {0x40NN, "Diagnostic failure on component nn"}, 823 * {0x40NN, "Diagnostic failure on component nn"},
@@ -799,6 +827,7 @@ static struct error_info additional[] =
799 {0x4300, "Message error"}, 827 {0x4300, "Message error"},
800 828
801 {0x4400, "Internal target failure"}, 829 {0x4400, "Internal target failure"},
830 {0x4471, "ATA device failed set features"},
802 831
803 {0x4500, "Select or reselect failure"}, 832 {0x4500, "Select or reselect failure"},
804 833
@@ -807,9 +836,10 @@ static struct error_info additional[] =
807 {0x4700, "Scsi parity error"}, 836 {0x4700, "Scsi parity error"},
808 {0x4701, "Data phase CRC error detected"}, 837 {0x4701, "Data phase CRC error detected"},
809 {0x4702, "Scsi parity error detected during st data phase"}, 838 {0x4702, "Scsi parity error detected during st data phase"},
810 {0x4703, "Information unit CRC error detected"}, 839 {0x4703, "Information unit iuCRC error detected"},
811 {0x4704, "Asynchronous information protection error detected"}, 840 {0x4704, "Asynchronous information protection error detected"},
812 {0x4705, "Protocol service CRC error"}, 841 {0x4705, "Protocol service CRC error"},
842 {0x4706, "Phy test function in progress"},
813 {0x477f, "Some commands cleared by iSCSI Protocol event"}, 843 {0x477f, "Some commands cleared by iSCSI Protocol event"},
814 844
815 {0x4800, "Initiator detected error message received"}, 845 {0x4800, "Initiator detected error message received"},
@@ -844,6 +874,8 @@ static struct error_info additional[] =
844 {0x5300, "Media load or eject failed"}, 874 {0x5300, "Media load or eject failed"},
845 {0x5301, "Unload tape failure"}, 875 {0x5301, "Unload tape failure"},
846 {0x5302, "Medium removal prevented"}, 876 {0x5302, "Medium removal prevented"},
877 {0x5303, "Medium removal prevented by data transfer element"},
878 {0x5304, "Medium thread or unthread failure"},
847 879
848 {0x5400, "Scsi to host system interface failure"}, 880 {0x5400, "Scsi to host system interface failure"},
849 881
@@ -855,6 +887,7 @@ static struct error_info additional[] =
855 {0x5505, "Insufficient access control resources"}, 887 {0x5505, "Insufficient access control resources"},
856 {0x5506, "Auxiliary memory out of space"}, 888 {0x5506, "Auxiliary memory out of space"},
857 {0x5507, "Quota error"}, 889 {0x5507, "Quota error"},
890 {0x5508, "Maximum number of supplemental decryption keys exceeded"},
858 891
859 {0x5700, "Unable to recover table-of-contents"}, 892 {0x5700, "Unable to recover table-of-contents"},
860 893
@@ -1004,6 +1037,7 @@ static struct error_info additional[] =
1004 {0x6708, "Assign failure occurred"}, 1037 {0x6708, "Assign failure occurred"},
1005 {0x6709, "Multiply assigned logical unit"}, 1038 {0x6709, "Multiply assigned logical unit"},
1006 {0x670A, "Set target port groups command failed"}, 1039 {0x670A, "Set target port groups command failed"},
1040 {0x670B, "ATA device feature not enabled"},
1007 1041
1008 {0x6800, "Logical unit not configured"}, 1042 {0x6800, "Logical unit not configured"},
1009 1043
@@ -1030,6 +1064,8 @@ static struct error_info additional[] =
1030 {0x6F03, "Read of scrambled sector without authentication"}, 1064 {0x6F03, "Read of scrambled sector without authentication"},
1031 {0x6F04, "Media region code is mismatched to logical unit region"}, 1065 {0x6F04, "Media region code is mismatched to logical unit region"},
1032 {0x6F05, "Drive region must be permanent/region reset count error"}, 1066 {0x6F05, "Drive region must be permanent/region reset count error"},
1067 {0x6F06, "Insufficient block count for binding nonce recording"},
1068 {0x6F07, "Conflict in binding nonce recording"},
1033/* 1069/*
1034 * {0x70NN, "Decompression exception short algorithm id of nn"}, 1070 * {0x70NN, "Decompression exception short algorithm id of nn"},
1035 */ 1071 */
@@ -1041,6 +1077,8 @@ static struct error_info additional[] =
1041 {0x7203, "Session fixation error - incomplete track in session"}, 1077 {0x7203, "Session fixation error - incomplete track in session"},
1042 {0x7204, "Empty or partially written reserved track"}, 1078 {0x7204, "Empty or partially written reserved track"},
1043 {0x7205, "No more track reservations allowed"}, 1079 {0x7205, "No more track reservations allowed"},
1080 {0x7206, "RMZ extension is not allowed"},
1081 {0x7207, "No more test zone extensions are allowed"},
1044 1082
1045 {0x7300, "Cd control error"}, 1083 {0x7300, "Cd control error"},
1046 {0x7301, "Power calibration area almost full"}, 1084 {0x7301, "Power calibration area almost full"},
@@ -1049,6 +1087,18 @@ static struct error_info additional[] =
1049 {0x7304, "Program memory area update failure"}, 1087 {0x7304, "Program memory area update failure"},
1050 {0x7305, "Program memory area is full"}, 1088 {0x7305, "Program memory area is full"},
1051 {0x7306, "RMA/PMA is almost full"}, 1089 {0x7306, "RMA/PMA is almost full"},
1090 {0x7310, "Current power calibration area almost full"},
1091 {0x7311, "Current power calibration area is full"},
1092 {0x7317, "RDZ is full"},
1093
1094 {0x7400, "Security error"},
1095 {0x7401, "Unable to decrypt data"},
1096 {0x7402, "Unencrypted data encountered while decrypting"},
1097 {0x7403, "Incorrect data encryption key"},
1098 {0x7404, "Cryptographic integrity validation failed"},
1099 {0x7405, "Error decrypting data"},
1100 {0x7471, "Logical unit access not authorized"},
1101
1052 {0, NULL} 1102 {0, NULL}
1053}; 1103};
1054 1104
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 10573c24a50b..98bd22714d0d 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1397,7 +1397,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1397 sp->SCp.ptr = NULL; 1397 sp->SCp.ptr = NULL;
1398 } 1398 }
1399 } else { 1399 } else {
1400 sp->SCp.buffer = (struct scatterlist *) sp->buffer; 1400 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
1401 sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, 1401 sp->SCp.buffers_residual = sbus_map_sg(esp->sdev,
1402 sp->SCp.buffer, 1402 sp->SCp.buffer,
1403 sp->use_sg, 1403 sp->use_sg,
@@ -1410,7 +1410,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1410static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) 1410static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1411{ 1411{
1412 if (sp->use_sg) { 1412 if (sp->use_sg) {
1413 sbus_unmap_sg(esp->sdev, sp->buffer, sp->use_sg, 1413 sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg,
1414 sp->sc_data_direction); 1414 sp->sc_data_direction);
1415 } else if (sp->request_bufflen) { 1415 } else if (sp->request_bufflen) {
1416 sbus_unmap_single(esp->sdev, 1416 sbus_unmap_single(esp->sdev,
@@ -2754,18 +2754,15 @@ static int esp_do_data_finale(struct esp *esp)
2754 */ 2754 */
2755static int esp_should_clear_sync(struct scsi_cmnd *sp) 2755static int esp_should_clear_sync(struct scsi_cmnd *sp)
2756{ 2756{
2757 u8 cmd1 = sp->cmnd[0]; 2757 u8 cmd = sp->cmnd[0];
2758 u8 cmd2 = sp->data_cmnd[0];
2759 2758
2760 /* These cases are for spinning up a disk and 2759 /* These cases are for spinning up a disk and
2761 * waiting for that spinup to complete. 2760 * waiting for that spinup to complete.
2762 */ 2761 */
2763 if (cmd1 == START_STOP || 2762 if (cmd == START_STOP)
2764 cmd2 == START_STOP)
2765 return 0; 2763 return 0;
2766 2764
2767 if (cmd1 == TEST_UNIT_READY || 2765 if (cmd == TEST_UNIT_READY)
2768 cmd2 == TEST_UNIT_READY)
2769 return 0; 2766 return 0;
2770 2767
2771 /* One more special case for SCSI tape drives, 2768 /* One more special case for SCSI tape drives,
@@ -2773,8 +2770,7 @@ static int esp_should_clear_sync(struct scsi_cmnd *sp)
2773 * completion of a rewind or tape load operation. 2770 * completion of a rewind or tape load operation.
2774 */ 2771 */
2775 if (sp->device->type == TYPE_TAPE) { 2772 if (sp->device->type == TYPE_TAPE) {
2776 if (cmd1 == MODE_SENSE || 2773 if (cmd == MODE_SENSE)
2777 cmd2 == MODE_SENSE)
2778 return 0; 2774 return 0;
2779 } 2775 }
2780 2776
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index 7eed0b098171..6aeb5f003c3c 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -81,7 +81,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
81 int rc; 81 int rc;
82 82
83 single_host_data = hostdata; 83 single_host_data = hostdata;
84 rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, 0); 84 rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests);
85 if (rc < 0) { 85 if (rc < 0) {
86 printk("viopath_open failed with rc %d in open_event_path\n", 86 printk("viopath_open failed with rc %d in open_event_path\n",
87 rc); 87 rc);
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 242b8873b333..ed22b96580c6 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -238,6 +238,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
238 if (rc == 2) { 238 if (rc == 2) {
239 /* Adapter is good, but other end is not ready */ 239 /* Adapter is good, but other end is not ready */
240 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); 240 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
241 retrc = 0;
241 } else if (rc != 0) { 242 } else if (rc != 0) {
242 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc); 243 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);
243 goto reg_crq_failed; 244 goto reg_crq_failed;
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 3fd8a96f2af3..bfac4441d89f 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -257,7 +257,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp)
257static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) 257static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp)
258{ 258{
259 int sz = sp->use_sg - 1; 259 int sz = sp->use_sg - 1;
260 struct scatterlist *sg = (struct scatterlist *)sp->buffer; 260 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
261 261
262 while(sz >= 0) { 262 while(sz >= 0) {
263 vdma_free(sg[sz].dma_address); 263 vdma_free(sg[sz].dma_address);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index 4b6aa30f4d68..29f59345305d 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -764,12 +764,27 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
764 unsigned int action) 764 unsigned int action)
765{ 765{
766 unsigned long flags; 766 unsigned long flags;
767 struct ata_eh_info *ehi = &ap->eh_info;
768 struct ata_eh_context *ehc = &ap->eh_context;
767 769
768 spin_lock_irqsave(ap->lock, flags); 770 spin_lock_irqsave(ap->lock, flags);
769 771
770 ata_eh_clear_action(dev, &ap->eh_info, action); 772 /* Reset is represented by combination of actions and EHI
773 * flags. Suck in all related bits before clearing eh_info to
774 * avoid losing requested action.
775 */
776 if (action & ATA_EH_RESET_MASK) {
777 ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
778 ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
779
780 /* make sure all reset actions are cleared & clear EHI flags */
781 action |= ATA_EH_RESET_MASK;
782 ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
783 }
784
785 ata_eh_clear_action(dev, ehi, action);
771 786
772 if (!(ap->eh_context.i.flags & ATA_EHI_QUIET)) 787 if (!(ehc->i.flags & ATA_EHI_QUIET))
773 ap->pflags |= ATA_PFLAG_RECOVERED; 788 ap->pflags |= ATA_PFLAG_RECOVERED;
774 789
775 spin_unlock_irqrestore(ap->lock, flags); 790 spin_unlock_irqrestore(ap->lock, flags);
@@ -790,6 +805,12 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
790static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, 805static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
791 unsigned int action) 806 unsigned int action)
792{ 807{
808 /* if reset is complete, clear all reset actions & reset modifier */
809 if (action & ATA_EH_RESET_MASK) {
810 action |= ATA_EH_RESET_MASK;
811 ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
812 }
813
793 ata_eh_clear_action(dev, &ap->eh_context.i, action); 814 ata_eh_clear_action(dev, &ap->eh_context.i, action);
794} 815}
795 816
@@ -1276,8 +1297,6 @@ static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1276static void ata_eh_autopsy(struct ata_port *ap) 1297static void ata_eh_autopsy(struct ata_port *ap)
1277{ 1298{
1278 struct ata_eh_context *ehc = &ap->eh_context; 1299 struct ata_eh_context *ehc = &ap->eh_context;
1279 unsigned int action = ehc->i.action;
1280 struct ata_device *failed_dev = NULL;
1281 unsigned int all_err_mask = 0; 1300 unsigned int all_err_mask = 0;
1282 int tag, is_io = 0; 1301 int tag, is_io = 0;
1283 u32 serror; 1302 u32 serror;
@@ -1294,7 +1313,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1294 ehc->i.serror |= serror; 1313 ehc->i.serror |= serror;
1295 ata_eh_analyze_serror(ap); 1314 ata_eh_analyze_serror(ap);
1296 } else if (rc != -EOPNOTSUPP) 1315 } else if (rc != -EOPNOTSUPP)
1297 action |= ATA_EH_HARDRESET; 1316 ehc->i.action |= ATA_EH_HARDRESET;
1298 1317
1299 /* analyze NCQ failure */ 1318 /* analyze NCQ failure */
1300 ata_eh_analyze_ncq_error(ap); 1319 ata_eh_analyze_ncq_error(ap);
@@ -1315,7 +1334,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1315 qc->err_mask |= ehc->i.err_mask; 1334 qc->err_mask |= ehc->i.err_mask;
1316 1335
1317 /* analyze TF */ 1336 /* analyze TF */
1318 action |= ata_eh_analyze_tf(qc, &qc->result_tf); 1337 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1319 1338
1320 /* DEV errors are probably spurious in case of ATA_BUS error */ 1339 /* DEV errors are probably spurious in case of ATA_BUS error */
1321 if (qc->err_mask & AC_ERR_ATA_BUS) 1340 if (qc->err_mask & AC_ERR_ATA_BUS)
@@ -1329,11 +1348,11 @@ static void ata_eh_autopsy(struct ata_port *ap)
1329 /* SENSE_VALID trumps dev/unknown error and revalidation */ 1348 /* SENSE_VALID trumps dev/unknown error and revalidation */
1330 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1349 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1331 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 1350 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1332 action &= ~ATA_EH_REVALIDATE; 1351 ehc->i.action &= ~ATA_EH_REVALIDATE;
1333 } 1352 }
1334 1353
1335 /* accumulate error info */ 1354 /* accumulate error info */
1336 failed_dev = qc->dev; 1355 ehc->i.dev = qc->dev;
1337 all_err_mask |= qc->err_mask; 1356 all_err_mask |= qc->err_mask;
1338 if (qc->flags & ATA_QCFLAG_IO) 1357 if (qc->flags & ATA_QCFLAG_IO)
1339 is_io = 1; 1358 is_io = 1;
@@ -1342,25 +1361,22 @@ static void ata_eh_autopsy(struct ata_port *ap)
1342 /* enforce default EH actions */ 1361 /* enforce default EH actions */
1343 if (ap->pflags & ATA_PFLAG_FROZEN || 1362 if (ap->pflags & ATA_PFLAG_FROZEN ||
1344 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1363 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1345 action |= ATA_EH_SOFTRESET; 1364 ehc->i.action |= ATA_EH_SOFTRESET;
1346 else if (all_err_mask) 1365 else if (all_err_mask)
1347 action |= ATA_EH_REVALIDATE; 1366 ehc->i.action |= ATA_EH_REVALIDATE;
1348 1367
1349 /* if we have offending qcs and the associated failed device */ 1368 /* if we have offending qcs and the associated failed device */
1350 if (failed_dev) { 1369 if (ehc->i.dev) {
1351 /* speed down */ 1370 /* speed down */
1352 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask); 1371 ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
1372 all_err_mask);
1353 1373
1354 /* perform per-dev EH action only on the offending device */ 1374 /* perform per-dev EH action only on the offending device */
1355 ehc->i.dev_action[failed_dev->devno] |= 1375 ehc->i.dev_action[ehc->i.dev->devno] |=
1356 action & ATA_EH_PERDEV_MASK; 1376 ehc->i.action & ATA_EH_PERDEV_MASK;
1357 action &= ~ATA_EH_PERDEV_MASK; 1377 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
1358 } 1378 }
1359 1379
1360 /* record autopsy result */
1361 ehc->i.dev = failed_dev;
1362 ehc->i.action |= action;
1363
1364 DPRINTK("EXIT\n"); 1380 DPRINTK("EXIT\n");
1365} 1381}
1366 1382
@@ -1483,6 +1499,9 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1483 ata_reset_fn_t reset; 1499 ata_reset_fn_t reset;
1484 int i, did_followup_srst, rc; 1500 int i, did_followup_srst, rc;
1485 1501
1502 /* about to reset */
1503 ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1504
1486 /* Determine which reset to use and record in ehc->i.action. 1505 /* Determine which reset to use and record in ehc->i.action.
1487 * prereset() may examine and modify it. 1506 * prereset() may examine and modify it.
1488 */ 1507 */
@@ -1531,8 +1550,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1531 ata_port_printk(ap, KERN_INFO, "%s resetting port\n", 1550 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1532 reset == softreset ? "soft" : "hard"); 1551 reset == softreset ? "soft" : "hard");
1533 1552
1534 /* reset */ 1553 /* mark that this EH session started with reset */
1535 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1536 ehc->i.flags |= ATA_EHI_DID_RESET; 1554 ehc->i.flags |= ATA_EHI_DID_RESET;
1537 1555
1538 rc = ata_do_reset(ap, reset, classes); 1556 rc = ata_do_reset(ap, reset, classes);
@@ -1595,7 +1613,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1595 postreset(ap, classes); 1613 postreset(ap, classes);
1596 1614
1597 /* reset successful, schedule revalidation */ 1615 /* reset successful, schedule revalidation */
1598 ata_eh_done(ap, NULL, ATA_EH_RESET_MASK); 1616 ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1599 ehc->i.action |= ATA_EH_REVALIDATE; 1617 ehc->i.action |= ATA_EH_REVALIDATE;
1600 } 1618 }
1601 1619
@@ -1848,15 +1866,16 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
1848 for (i = 0; i < ata_port_max_devices(ap); i++) { 1866 for (i = 0; i < ata_port_max_devices(ap); i++) {
1849 struct ata_device *dev = &ap->device[i]; 1867 struct ata_device *dev = &ap->device[i];
1850 1868
1851 if (ata_dev_absent(dev) || ata_dev_ready(dev)) 1869 if (!(dev->flags & ATA_DFLAG_SUSPENDED))
1852 break; 1870 break;
1853 } 1871 }
1854 1872
1855 if (i == ata_port_max_devices(ap)) 1873 if (i == ata_port_max_devices(ap))
1856 return 1; 1874 return 1;
1857 1875
1858 /* always thaw frozen port and recover failed devices */ 1876 /* thaw frozen port, resume link and recover failed devices */
1859 if (ap->pflags & ATA_PFLAG_FROZEN || ata_port_nr_enabled(ap)) 1877 if ((ap->pflags & ATA_PFLAG_FROZEN) ||
1878 (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
1860 return 0; 1879 return 0;
1861 1880
1862 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 1881 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f81691fcf177..d44f9aac6b8f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -21,10 +21,12 @@
21 21
22struct lpfc_sli2_slim; 22struct lpfc_sli2_slim;
23 23
24#define LPFC_MAX_TARGET 256 /* max targets supported */
25#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */
26#define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */
27 24
25#define LPFC_MAX_TARGET 256 /* max number of targets supported */
26#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
27 requests */
28#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
29 the NameServer before giving up. */
28#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */ 30#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
29#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */ 31#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
30#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */ 32#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
@@ -41,7 +43,6 @@ struct lpfc_sli2_slim;
41 (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) 43 (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
42/* Provide maximum configuration definitions. */ 44/* Provide maximum configuration definitions. */
43#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ 45#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
44#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */
45#define FC_MAX_ADPTMSG 64 46#define FC_MAX_ADPTMSG 64
46 47
47#define MAX_HBAEVT 32 48#define MAX_HBAEVT 32
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b62a72dfab29..5c68cdd8736f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -219,9 +219,19 @@ lpfc_issue_lip(struct Scsi_Host *host)
219 return -ENOMEM; 219 return -ENOMEM;
220 220
221 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 221 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
222 lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); 222 pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
223 pmboxq->mb.mbxOwner = OWN_HOST;
224
223 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
224 226
227 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
228 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
229 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
230 phba->cfg_link_speed);
231 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
232 phba->fc_ratov * 2);
233 }
234
225 if (mbxstatus == MBX_TIMEOUT) 235 if (mbxstatus == MBX_TIMEOUT)
226 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 236 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
227 else 237 else
@@ -233,51 +243,53 @@ lpfc_issue_lip(struct Scsi_Host *host)
233 return 0; 243 return 0;
234} 244}
235 245
236static ssize_t 246static int
237lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) 247lpfc_selective_reset(struct lpfc_hba *phba)
238{ 248{
239 struct Scsi_Host *host = class_to_shost(cdev); 249 struct completion online_compl;
240 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 250 int status = 0;
241 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 251
252 init_completion(&online_compl);
253 lpfc_workq_post_event(phba, &status, &online_compl,
254 LPFC_EVT_OFFLINE);
255 wait_for_completion(&online_compl);
256
257 if (status != 0)
258 return -EIO;
259
260 init_completion(&online_compl);
261 lpfc_workq_post_event(phba, &status, &online_compl,
262 LPFC_EVT_ONLINE);
263 wait_for_completion(&online_compl);
264
265 if (status != 0)
266 return -EIO;
267
268 return 0;
242} 269}
243 270
244static ssize_t 271static ssize_t
245lpfc_board_online_show(struct class_device *cdev, char *buf) 272lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
246{ 273{
247 struct Scsi_Host *host = class_to_shost(cdev); 274 struct Scsi_Host *host = class_to_shost(cdev);
248 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 275 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
276 int status = -EINVAL;
249 277
250 if (phba->fc_flag & FC_OFFLINE_MODE) 278 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
251 return snprintf(buf, PAGE_SIZE, "0\n"); 279 status = lpfc_selective_reset(phba);
280
281 if (status == 0)
282 return strlen(buf);
252 else 283 else
253 return snprintf(buf, PAGE_SIZE, "1\n"); 284 return status;
254} 285}
255 286
256static ssize_t 287static ssize_t
257lpfc_board_online_store(struct class_device *cdev, const char *buf, 288lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
258 size_t count)
259{ 289{
260 struct Scsi_Host *host = class_to_shost(cdev); 290 struct Scsi_Host *host = class_to_shost(cdev);
261 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 291 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
262 struct completion online_compl; 292 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
263 int val=0, status=0;
264
265 if (sscanf(buf, "%d", &val) != 1)
266 return -EINVAL;
267
268 init_completion(&online_compl);
269
270 if (val)
271 lpfc_workq_post_event(phba, &status, &online_compl,
272 LPFC_EVT_ONLINE);
273 else
274 lpfc_workq_post_event(phba, &status, &online_compl,
275 LPFC_EVT_OFFLINE);
276 wait_for_completion(&online_compl);
277 if (!status)
278 return strlen(buf);
279 else
280 return -EIO;
281} 293}
282 294
283static ssize_t 295static ssize_t
@@ -532,10 +544,9 @@ static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
532 NULL); 544 NULL);
533static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, 545static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
534 NULL); 546 NULL);
535static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
536 lpfc_board_online_show, lpfc_board_online_store);
537static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 547static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
538 lpfc_board_mode_show, lpfc_board_mode_store); 548 lpfc_board_mode_show, lpfc_board_mode_store);
549static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
539 550
540static int lpfc_poll = 0; 551static int lpfc_poll = 0;
541module_param(lpfc_poll, int, 0); 552module_param(lpfc_poll, int, 0);
@@ -695,12 +706,12 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
695 "during discovery"); 706 "during discovery");
696 707
697/* 708/*
698# lpfc_max_luns: maximum number of LUNs per target driver will support 709# lpfc_max_luns: maximum allowed LUN.
699# Value range is [1,32768]. Default value is 256. 710# Value range is [0,65535]. Default value is 255.
700# NOTE: The SCSI layer will scan each target for this many luns 711# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
701*/ 712*/
702LPFC_ATTR_R(max_luns, 256, 1, 32768, 713LPFC_ATTR_R(max_luns, 255, 0, 65535,
703 "Maximum number of LUNs per target driver will support"); 714 "Maximum allowed LUN");
704 715
705/* 716/*
706# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. 717# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
@@ -739,8 +750,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
739 &class_device_attr_lpfc_max_luns, 750 &class_device_attr_lpfc_max_luns,
740 &class_device_attr_nport_evt_cnt, 751 &class_device_attr_nport_evt_cnt,
741 &class_device_attr_management_version, 752 &class_device_attr_management_version,
742 &class_device_attr_board_online,
743 &class_device_attr_board_mode, 753 &class_device_attr_board_mode,
754 &class_device_attr_issue_reset,
744 &class_device_attr_lpfc_poll, 755 &class_device_attr_lpfc_poll,
745 &class_device_attr_lpfc_poll_tmo, 756 &class_device_attr_lpfc_poll_tmo,
746 NULL, 757 NULL,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index ee22173fce43..517e9e4dd461 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -147,6 +147,7 @@ int lpfc_sli_hba_setup(struct lpfc_hba *);
147int lpfc_sli_hba_down(struct lpfc_hba *); 147int lpfc_sli_hba_down(struct lpfc_hba *);
148int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 148int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
149int lpfc_sli_handle_mb_event(struct lpfc_hba *); 149int lpfc_sli_handle_mb_event(struct lpfc_hba *);
150int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
150int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 151int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
151 struct lpfc_sli_ring *, uint32_t); 152 struct lpfc_sli_ring *, uint32_t);
152void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 153void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4126fd87956f..b89f6cb641e6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -648,33 +648,32 @@ lpfc_more_plogi(struct lpfc_hba * phba)
648} 648}
649 649
650static struct lpfc_nodelist * 650static struct lpfc_nodelist *
651lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 651lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
652 struct lpfc_nodelist *ndlp) 652 struct lpfc_nodelist *ndlp)
653{ 653{
654 struct lpfc_nodelist *new_ndlp; 654 struct lpfc_nodelist *new_ndlp;
655 struct lpfc_dmabuf *pcmd, *prsp;
656 uint32_t *lp; 655 uint32_t *lp;
657 struct serv_parm *sp; 656 struct serv_parm *sp;
658 uint8_t name[sizeof (struct lpfc_name)]; 657 uint8_t name[sizeof (struct lpfc_name)];
659 uint32_t rc; 658 uint32_t rc;
660 659
661 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
662 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
663 lp = (uint32_t *) prsp->virt; 660 lp = (uint32_t *) prsp->virt;
664 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 661 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
662 memset(name, 0, sizeof (struct lpfc_name));
665 663
666 /* Now we to find out if the NPort we are logging into, matches the WWPN 664 /* Now we to find out if the NPort we are logging into, matches the WWPN
667 * we have for that ndlp. If not, we have some work to do. 665 * we have for that ndlp. If not, we have some work to do.
668 */ 666 */
669 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName); 667 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName);
670 668
671 memset(name, 0, sizeof (struct lpfc_name)); 669 if (new_ndlp == ndlp)
672 rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
673 if (!rc || (new_ndlp == ndlp)) {
674 return ndlp; 670 return ndlp;
675 }
676 671
677 if (!new_ndlp) { 672 if (!new_ndlp) {
673 rc =
674 memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
675 if (!rc)
676 return ndlp;
678 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 677 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
679 if (!new_ndlp) 678 if (!new_ndlp)
680 return ndlp; 679 return ndlp;
@@ -683,17 +682,21 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
683 } 682 }
684 683
685 lpfc_unreg_rpi(phba, new_ndlp); 684 lpfc_unreg_rpi(phba, new_ndlp);
686 new_ndlp->nlp_prev_state = ndlp->nlp_state;
687 new_ndlp->nlp_DID = ndlp->nlp_DID; 685 new_ndlp->nlp_DID = ndlp->nlp_DID;
688 new_ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 686 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
689 lpfc_nlp_list(phba, new_ndlp, NLP_PLOGI_LIST); 687 new_ndlp->nlp_state = ndlp->nlp_state;
688 lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK);
690 689
691 /* Move this back to NPR list */ 690 /* Move this back to NPR list */
692 lpfc_unreg_rpi(phba, ndlp); 691 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
693 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 692 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
694 ndlp->nlp_state = NLP_STE_NPR_NODE; 693 }
695 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 694 else {
696 695 lpfc_unreg_rpi(phba, ndlp);
696 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
697 ndlp->nlp_state = NLP_STE_NPR_NODE;
698 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
699 }
697 return new_ndlp; 700 return new_ndlp;
698} 701}
699 702
@@ -703,6 +706,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
703{ 706{
704 IOCB_t *irsp; 707 IOCB_t *irsp;
705 struct lpfc_nodelist *ndlp; 708 struct lpfc_nodelist *ndlp;
709 struct lpfc_dmabuf *prsp;
706 int disc, rc, did, type; 710 int disc, rc, did, type;
707 711
708 712
@@ -769,7 +773,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
769 } 773 }
770 } else { 774 } else {
771 /* Good status, call state machine */ 775 /* Good status, call state machine */
772 ndlp = lpfc_plogi_confirm_nport(phba, cmdiocb, ndlp); 776 prsp = list_entry(((struct lpfc_dmabuf *)
777 cmdiocb->context2)->list.next,
778 struct lpfc_dmabuf, list);
779 ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp);
773 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, 780 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
774 NLP_EVT_CMPL_PLOGI); 781 NLP_EVT_CMPL_PLOGI);
775 } 782 }
@@ -3282,10 +3289,9 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3282 } else 3289 } else
3283 lpfc_sli_release_iocbq(phba, piocb); 3290 lpfc_sli_release_iocbq(phba, piocb);
3284 } 3291 }
3285 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) { 3292 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3286 phba->els_tmofunc.expires = jiffies + HZ * timeout; 3293 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
3287 add_timer(&phba->els_tmofunc); 3294
3288 }
3289 spin_unlock_irq(phba->host->host_lock); 3295 spin_unlock_irq(phba->host->host_lock);
3290} 3296}
3291 3297
@@ -3442,6 +3448,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3442 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 3448 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3443 ndlp->nlp_type |= NLP_FABRIC; 3449 ndlp->nlp_type |= NLP_FABRIC;
3444 } 3450 }
3451 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
3452 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
3445 } 3453 }
3446 3454
3447 phba->fc_stat.elsRcvFrame++; 3455 phba->fc_stat.elsRcvFrame++;
@@ -3463,13 +3471,14 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3463 rjt_err = 1; 3471 rjt_err = 1;
3464 break; 3472 break;
3465 } 3473 }
3474 ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp);
3466 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); 3475 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
3467 break; 3476 break;
3468 case ELS_CMD_FLOGI: 3477 case ELS_CMD_FLOGI:
3469 phba->fc_stat.elsRcvFLOGI++; 3478 phba->fc_stat.elsRcvFLOGI++;
3470 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); 3479 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
3471 if (newnode) { 3480 if (newnode) {
3472 mempool_free( ndlp, phba->nlp_mem_pool); 3481 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3473 } 3482 }
3474 break; 3483 break;
3475 case ELS_CMD_LOGO: 3484 case ELS_CMD_LOGO:
@@ -3492,7 +3501,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3492 phba->fc_stat.elsRcvRSCN++; 3501 phba->fc_stat.elsRcvRSCN++;
3493 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); 3502 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
3494 if (newnode) { 3503 if (newnode) {
3495 mempool_free( ndlp, phba->nlp_mem_pool); 3504 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3496 } 3505 }
3497 break; 3506 break;
3498 case ELS_CMD_ADISC: 3507 case ELS_CMD_ADISC:
@@ -3535,28 +3544,28 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3535 phba->fc_stat.elsRcvLIRR++; 3544 phba->fc_stat.elsRcvLIRR++;
3536 lpfc_els_rcv_lirr(phba, elsiocb, ndlp); 3545 lpfc_els_rcv_lirr(phba, elsiocb, ndlp);
3537 if (newnode) { 3546 if (newnode) {
3538 mempool_free( ndlp, phba->nlp_mem_pool); 3547 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3539 } 3548 }
3540 break; 3549 break;
3541 case ELS_CMD_RPS: 3550 case ELS_CMD_RPS:
3542 phba->fc_stat.elsRcvRPS++; 3551 phba->fc_stat.elsRcvRPS++;
3543 lpfc_els_rcv_rps(phba, elsiocb, ndlp); 3552 lpfc_els_rcv_rps(phba, elsiocb, ndlp);
3544 if (newnode) { 3553 if (newnode) {
3545 mempool_free( ndlp, phba->nlp_mem_pool); 3554 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3546 } 3555 }
3547 break; 3556 break;
3548 case ELS_CMD_RPL: 3557 case ELS_CMD_RPL:
3549 phba->fc_stat.elsRcvRPL++; 3558 phba->fc_stat.elsRcvRPL++;
3550 lpfc_els_rcv_rpl(phba, elsiocb, ndlp); 3559 lpfc_els_rcv_rpl(phba, elsiocb, ndlp);
3551 if (newnode) { 3560 if (newnode) {
3552 mempool_free( ndlp, phba->nlp_mem_pool); 3561 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3553 } 3562 }
3554 break; 3563 break;
3555 case ELS_CMD_RNID: 3564 case ELS_CMD_RNID:
3556 phba->fc_stat.elsRcvRNID++; 3565 phba->fc_stat.elsRcvRNID++;
3557 lpfc_els_rcv_rnid(phba, elsiocb, ndlp); 3566 lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
3558 if (newnode) { 3567 if (newnode) {
3559 mempool_free( ndlp, phba->nlp_mem_pool); 3568 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3560 } 3569 }
3561 break; 3570 break;
3562 default: 3571 default:
@@ -3568,7 +3577,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3568 "%d:0115 Unknown ELS command x%x received from " 3577 "%d:0115 Unknown ELS command x%x received from "
3569 "NPORT x%x\n", phba->brd_no, cmd, did); 3578 "NPORT x%x\n", phba->brd_no, cmd, did);
3570 if (newnode) { 3579 if (newnode) {
3571 mempool_free( ndlp, phba->nlp_mem_pool); 3580 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3572 } 3581 }
3573 break; 3582 break;
3574 } 3583 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index adb086009ae0..4d6cf990c4fc 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1084,7 +1084,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1084 fc_remote_port_rolechg(rport, rport_ids.roles); 1084 fc_remote_port_rolechg(rport, rport_ids.roles);
1085 1085
1086 if ((rport->scsi_target_id != -1) && 1086 if ((rport->scsi_target_id != -1) &&
1087 (rport->scsi_target_id < MAX_FCP_TARGET)) { 1087 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1088 ndlp->nlp_sid = rport->scsi_target_id; 1088 ndlp->nlp_sid = rport->scsi_target_id;
1089 } 1089 }
1090 1090
@@ -1313,7 +1313,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1313 if ((rport_add == mapped) && 1313 if ((rport_add == mapped) &&
1314 ((!nlp->rport) || 1314 ((!nlp->rport) ||
1315 (nlp->rport->scsi_target_id == -1) || 1315 (nlp->rport->scsi_target_id == -1) ||
1316 (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) { 1316 (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) {
1317 nlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1317 nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1318 spin_lock_irq(phba->host->host_lock); 1318 spin_lock_irq(phba->host->host_lock);
1319 nlp->nlp_flag |= NLP_TGT_NO_SCSIID; 1319 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 81755a3f7c68..ef47b824cbed 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -71,6 +71,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
71 uint16_t offset = 0; 71 uint16_t offset = 0;
72 static char licensed[56] = 72 static char licensed[56] =
73 "key unlock for use with gnu public licensed code only\0"; 73 "key unlock for use with gnu public licensed code only\0";
74 static int init_key = 1;
74 75
75 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 76 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
76 if (!pmb) { 77 if (!pmb) {
@@ -82,10 +83,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
82 phba->hba_state = LPFC_INIT_MBX_CMDS; 83 phba->hba_state = LPFC_INIT_MBX_CMDS;
83 84
84 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 85 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
85 uint32_t *ptext = (uint32_t *) licensed; 86 if (init_key) {
87 uint32_t *ptext = (uint32_t *) licensed;
86 88
87 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 89 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
88 *ptext = cpu_to_be32(*ptext); 90 *ptext = cpu_to_be32(*ptext);
91 init_key = 0;
92 }
89 93
90 lpfc_read_nv(phba, pmb); 94 lpfc_read_nv(phba, pmb);
91 memset((char*)mb->un.varRDnvp.rsvd3, 0, 95 memset((char*)mb->un.varRDnvp.rsvd3, 0,
@@ -405,19 +409,26 @@ lpfc_config_port_post(struct lpfc_hba * phba)
405 } 409 }
406 /* MBOX buffer will be freed in mbox compl */ 410 /* MBOX buffer will be freed in mbox compl */
407 411
408 i = 0; 412 return (0);
413}
414
415static int
416lpfc_discovery_wait(struct lpfc_hba *phba)
417{
418 int i = 0;
419
409 while ((phba->hba_state != LPFC_HBA_READY) || 420 while ((phba->hba_state != LPFC_HBA_READY) ||
410 (phba->num_disc_nodes) || (phba->fc_prli_sent) || 421 (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
411 ((phba->fc_map_cnt == 0) && (i<2)) || 422 ((phba->fc_map_cnt == 0) && (i<2)) ||
412 (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { 423 (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
413 /* Check every second for 30 retries. */ 424 /* Check every second for 30 retries. */
414 i++; 425 i++;
415 if (i > 30) { 426 if (i > 30) {
416 break; 427 return -ETIMEDOUT;
417 } 428 }
418 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { 429 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
419 /* The link is down. Set linkdown timeout */ 430 /* The link is down. Set linkdown timeout */
420 break; 431 return -ETIMEDOUT;
421 } 432 }
422 433
423 /* Delay for 1 second to give discovery time to complete. */ 434 /* Delay for 1 second to give discovery time to complete. */
@@ -425,12 +436,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
425 436
426 } 437 }
427 438
428 /* Since num_disc_nodes keys off of PLOGI, delay a bit to let 439 return 0;
429 * any potential PRLIs to flush thru the SLI sub-system.
430 */
431 msleep(50);
432
433 return (0);
434} 440}
435 441
436/************************************************************************/ 442/************************************************************************/
@@ -1339,7 +1345,8 @@ lpfc_offline(struct lpfc_hba * phba)
1339 struct lpfc_sli_ring *pring; 1345 struct lpfc_sli_ring *pring;
1340 struct lpfc_sli *psli; 1346 struct lpfc_sli *psli;
1341 unsigned long iflag; 1347 unsigned long iflag;
1342 int i = 0; 1348 int i;
1349 int cnt = 0;
1343 1350
1344 if (!phba) 1351 if (!phba)
1345 return 0; 1352 return 0;
@@ -1348,17 +1355,27 @@ lpfc_offline(struct lpfc_hba * phba)
1348 return 0; 1355 return 0;
1349 1356
1350 psli = &phba->sli; 1357 psli = &phba->sli;
1351 pring = &psli->ring[psli->fcp_ring];
1352 1358
1353 lpfc_linkdown(phba); 1359 lpfc_linkdown(phba);
1360 lpfc_sli_flush_mbox_queue(phba);
1354 1361
1355 /* The linkdown event takes 30 seconds to timeout. */ 1362 for (i = 0; i < psli->num_rings; i++) {
1356 while (pring->txcmplq_cnt) { 1363 pring = &psli->ring[i];
1357 mdelay(10); 1364 /* The linkdown event takes 30 seconds to timeout. */
1358 if (i++ > 3000) 1365 while (pring->txcmplq_cnt) {
1359 break; 1366 mdelay(10);
1367 if (cnt++ > 3000) {
1368 lpfc_printf_log(phba,
1369 KERN_WARNING, LOG_INIT,
1370 "%d:0466 Outstanding IO when "
1371 "bringing Adapter offline\n",
1372 phba->brd_no);
1373 break;
1374 }
1375 }
1360 } 1376 }
1361 1377
1378
1362 /* stop all timers associated with this hba */ 1379 /* stop all timers associated with this hba */
1363 lpfc_stop_timer(phba); 1380 lpfc_stop_timer(phba);
1364 phba->work_hba_events = 0; 1381 phba->work_hba_events = 0;
@@ -1639,6 +1656,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1639 goto out_free_irq; 1656 goto out_free_irq;
1640 } 1657 }
1641 1658
1659 lpfc_discovery_wait(phba);
1660
1642 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1661 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1643 spin_lock_irq(phba->host->host_lock); 1662 spin_lock_irq(phba->host->host_lock);
1644 lpfc_poll_start_timer(phba); 1663 lpfc_poll_start_timer(phba);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 07017658ac56..066292d3995a 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -133,6 +133,11 @@ lpfc_mem_free(struct lpfc_hba * phba)
133 133
134 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 134 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
135 pci_pool_destroy(phba->lpfc_mbuf_pool); 135 pci_pool_destroy(phba->lpfc_mbuf_pool);
136
137 /* Free the iocb lookup array */
138 kfree(psli->iocbq_lookup);
139 psli->iocbq_lookup = NULL;
140
136} 141}
137 142
138void * 143void *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 27d60ad897cd..bd0b0e293d63 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1110,6 +1110,17 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1110 phba->brd_no, 1110 phba->brd_no,
1111 did, mb->mbxStatus, phba->hba_state); 1111 did, mb->mbxStatus, phba->hba_state);
1112 1112
1113 /*
1114 * If RegLogin failed due to lack of HBA resources do not
1115 * retry discovery.
1116 */
1117 if (mb->mbxStatus == MBXERR_RPI_FULL) {
1118 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
1119 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
1120 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
1121 return ndlp->nlp_state;
1122 }
1123
1113 /* Put ndlp in npr list set plogi timer for 1 sec */ 1124 /* Put ndlp in npr list set plogi timer for 1 sec */
1114 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1125 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1115 spin_lock_irq(phba->host->host_lock); 1126 spin_lock_irq(phba->host->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index aea1ee472f3d..a760a44173df 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -153,22 +153,6 @@ static void
153lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 153lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
154{ 154{
155 unsigned long iflag = 0; 155 unsigned long iflag = 0;
156 /*
157 * There are only two special cases to consider. (1) the scsi command
158 * requested scatter-gather usage or (2) the scsi command allocated
159 * a request buffer, but did not request use_sg. There is a third
160 * case, but it does not require resource deallocation.
161 */
162 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
163 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
164 psb->seg_cnt, psb->pCmd->sc_data_direction);
165 } else {
166 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
167 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
168 psb->pCmd->request_bufflen,
169 psb->pCmd->sc_data_direction);
170 }
171 }
172 156
173 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 157 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
174 psb->pCmd = NULL; 158 psb->pCmd = NULL;
@@ -282,6 +266,27 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
282} 266}
283 267
284static void 268static void
269lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
270{
271 /*
272 * There are only two special cases to consider. (1) the scsi command
273 * requested scatter-gather usage or (2) the scsi command allocated
274 * a request buffer, but did not request use_sg. There is a third
275 * case, but it does not require resource deallocation.
276 */
277 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
278 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
279 psb->seg_cnt, psb->pCmd->sc_data_direction);
280 } else {
281 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
282 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
283 psb->pCmd->request_bufflen,
284 psb->pCmd->sc_data_direction);
285 }
286 }
287}
288
289static void
285lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) 290lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
286{ 291{
287 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 292 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
@@ -454,6 +459,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
454 cmd->scsi_done(cmd); 459 cmd->scsi_done(cmd);
455 460
456 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 461 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
462 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
457 lpfc_release_scsi_buf(phba, lpfc_cmd); 463 lpfc_release_scsi_buf(phba, lpfc_cmd);
458 return; 464 return;
459 } 465 }
@@ -511,6 +517,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
511 } 517 }
512 } 518 }
513 519
520 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
514 lpfc_release_scsi_buf(phba, lpfc_cmd); 521 lpfc_release_scsi_buf(phba, lpfc_cmd);
515} 522}
516 523
@@ -609,6 +616,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
609static int 616static int
610lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 617lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
611 struct lpfc_scsi_buf *lpfc_cmd, 618 struct lpfc_scsi_buf *lpfc_cmd,
619 unsigned int lun,
612 uint8_t task_mgmt_cmd) 620 uint8_t task_mgmt_cmd)
613{ 621{
614 struct lpfc_sli *psli; 622 struct lpfc_sli *psli;
@@ -627,8 +635,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
627 piocb = &piocbq->iocb; 635 piocb = &piocbq->iocb;
628 636
629 fcp_cmnd = lpfc_cmd->fcp_cmnd; 637 fcp_cmnd = lpfc_cmd->fcp_cmnd;
630 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 638 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
631 &lpfc_cmd->fcp_cmnd->fcp_lun);
632 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 639 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
633 640
634 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 641 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
@@ -655,14 +662,16 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
655 662
656static int 663static int
657lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, 664lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
658 unsigned tgt_id, struct lpfc_rport_data *rdata) 665 unsigned tgt_id, unsigned int lun,
666 struct lpfc_rport_data *rdata)
659{ 667{
660 struct lpfc_iocbq *iocbq; 668 struct lpfc_iocbq *iocbq;
661 struct lpfc_iocbq *iocbqrsp; 669 struct lpfc_iocbq *iocbqrsp;
662 int ret; 670 int ret;
663 671
664 lpfc_cmd->rdata = rdata; 672 lpfc_cmd->rdata = rdata;
665 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 673 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
674 FCP_TARGET_RESET);
666 if (!ret) 675 if (!ret)
667 return FAILED; 676 return FAILED;
668 677
@@ -822,6 +831,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
822 return 0; 831 return 0;
823 832
824 out_host_busy_free_buf: 833 out_host_busy_free_buf:
834 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
825 lpfc_release_scsi_buf(phba, lpfc_cmd); 835 lpfc_release_scsi_buf(phba, lpfc_cmd);
826 out_host_busy: 836 out_host_busy:
827 return SCSI_MLQUEUE_HOST_BUSY; 837 return SCSI_MLQUEUE_HOST_BUSY;
@@ -969,12 +979,12 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
969 if (lpfc_cmd == NULL) 979 if (lpfc_cmd == NULL)
970 goto out; 980 goto out;
971 981
972 lpfc_cmd->pCmd = cmnd;
973 lpfc_cmd->timeout = 60; 982 lpfc_cmd->timeout = 60;
974 lpfc_cmd->scsi_hba = phba; 983 lpfc_cmd->scsi_hba = phba;
975 lpfc_cmd->rdata = rdata; 984 lpfc_cmd->rdata = rdata;
976 985
977 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 986 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
987 FCP_LUN_RESET);
978 if (!ret) 988 if (!ret)
979 goto out_free_scsi_buf; 989 goto out_free_scsi_buf;
980 990
@@ -1001,7 +1011,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1001 cmd_status = iocbqrsp->iocb.ulpStatus; 1011 cmd_status = iocbqrsp->iocb.ulpStatus;
1002 1012
1003 lpfc_sli_release_iocbq(phba, iocbqrsp); 1013 lpfc_sli_release_iocbq(phba, iocbqrsp);
1004 lpfc_release_scsi_buf(phba, lpfc_cmd);
1005 1014
1006 /* 1015 /*
1007 * All outstanding txcmplq I/Os should have been aborted by the device. 1016 * All outstanding txcmplq I/Os should have been aborted by the device.
@@ -1040,6 +1049,8 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1040 } 1049 }
1041 1050
1042out_free_scsi_buf: 1051out_free_scsi_buf:
1052 lpfc_release_scsi_buf(phba, lpfc_cmd);
1053
1043 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1054 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1044 "%d:0713 SCSI layer issued LUN reset (%d, %d) " 1055 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1045 "Data: x%x x%x x%x\n", 1056 "Data: x%x x%x x%x\n",
@@ -1070,7 +1081,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1070 1081
1071 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1082 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1072 lpfc_cmd->timeout = 60; 1083 lpfc_cmd->timeout = 60;
1073 lpfc_cmd->pCmd = cmnd;
1074 lpfc_cmd->scsi_hba = phba; 1084 lpfc_cmd->scsi_hba = phba;
1075 1085
1076 /* 1086 /*
@@ -1078,7 +1088,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1078 * targets known to the driver. Should any target reset 1088 * targets known to the driver. Should any target reset
1079 * fail, this routine returns failure to the midlayer. 1089 * fail, this routine returns failure to the midlayer.
1080 */ 1090 */
1081 for (i = 0; i < MAX_FCP_TARGET; i++) { 1091 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1082 /* Search the mapped list for this target ID */ 1092 /* Search the mapped list for this target ID */
1083 match = 0; 1093 match = 0;
1084 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1094 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
@@ -1090,8 +1100,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1090 if (!match) 1100 if (!match)
1091 continue; 1101 continue;
1092 1102
1093 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, 1103 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun,
1094 i, ndlp->rport->dd_data); 1104 ndlp->rport->dd_data);
1095 if (ret != SUCCESS) { 1105 if (ret != SUCCESS) {
1096 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1106 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1097 "%d:0713 Bus Reset on target %d failed\n", 1107 "%d:0713 Bus Reset on target %d failed\n",
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index bb69a7a1ec59..350a625fa224 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -191,35 +191,12 @@ static int
191lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 191lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
193{ 193{
194 uint16_t iotag;
195
196 list_add_tail(&piocb->list, &pring->txcmplq); 194 list_add_tail(&piocb->list, &pring->txcmplq);
197 pring->txcmplq_cnt++; 195 pring->txcmplq_cnt++;
198 if (unlikely(pring->ringno == LPFC_ELS_RING)) 196 if (unlikely(pring->ringno == LPFC_ELS_RING))
199 mod_timer(&phba->els_tmofunc, 197 mod_timer(&phba->els_tmofunc,
200 jiffies + HZ * (phba->fc_ratov << 1)); 198 jiffies + HZ * (phba->fc_ratov << 1));
201 199
202 if (pring->fast_lookup) {
203 /* Setup fast lookup based on iotag for completion */
204 iotag = piocb->iocb.ulpIoTag;
205 if (iotag && (iotag < pring->fast_iotag))
206 *(pring->fast_lookup + iotag) = piocb;
207 else {
208
209 /* Cmd ring <ringno> put: iotag <iotag> greater then
210 configured max <fast_iotag> wd0 <icmd> */
211 lpfc_printf_log(phba,
212 KERN_ERR,
213 LOG_SLI,
214 "%d:0316 Cmd ring %d put: iotag x%x "
215 "greater then configured max x%x "
216 "wd0 x%x\n",
217 phba->brd_no,
218 pring->ringno, iotag,
219 pring->fast_iotag,
220 *(((uint32_t *)(&piocb->iocb)) + 7));
221 }
222 }
223 return (0); 200 return (0);
224} 201}
225 202
@@ -601,7 +578,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
601 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus 578 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
602 <status> */ 579 <status> */
603 lpfc_printf_log(phba, 580 lpfc_printf_log(phba,
604 KERN_ERR, 581 KERN_WARNING,
605 LOG_MBOX | LOG_SLI, 582 LOG_MBOX | LOG_SLI,
606 "%d:0304 Stray Mailbox Interrupt " 583 "%d:0304 Stray Mailbox Interrupt "
607 "mbxCommand x%x mbxStatus x%x\n", 584 "mbxCommand x%x mbxStatus x%x\n",
@@ -1570,8 +1547,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1570 1547
1571void lpfc_reset_barrier(struct lpfc_hba * phba) 1548void lpfc_reset_barrier(struct lpfc_hba * phba)
1572{ 1549{
1573 uint32_t * resp_buf; 1550 uint32_t __iomem *resp_buf;
1574 uint32_t * mbox_buf; 1551 uint32_t __iomem *mbox_buf;
1575 volatile uint32_t mbox; 1552 volatile uint32_t mbox;
1576 uint32_t hc_copy; 1553 uint32_t hc_copy;
1577 int i; 1554 int i;
@@ -1587,7 +1564,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1587 * Tell the other part of the chip to suspend temporarily all 1564 * Tell the other part of the chip to suspend temporarily all
1588 * its DMA activity. 1565 * its DMA activity.
1589 */ 1566 */
1590 resp_buf = (uint32_t *)phba->MBslimaddr; 1567 resp_buf = phba->MBslimaddr;
1591 1568
1592 /* Disable the error attention */ 1569 /* Disable the error attention */
1593 hc_copy = readl(phba->HCregaddr); 1570 hc_copy = readl(phba->HCregaddr);
@@ -1605,7 +1582,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1605 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 1582 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1606 1583
1607 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 1584 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1608 mbox_buf = (uint32_t *)phba->MBslimaddr; 1585 mbox_buf = phba->MBslimaddr;
1609 writel(mbox, mbox_buf); 1586 writel(mbox, mbox_buf);
1610 1587
1611 for (i = 0; 1588 for (i = 0;
@@ -1805,7 +1782,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1805 skip_post = 0; 1782 skip_post = 0;
1806 word0 = 0; /* This is really setting up word1 */ 1783 word0 = 0; /* This is really setting up word1 */
1807 } 1784 }
1808 to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t); 1785 to_slim = phba->MBslimaddr + sizeof (uint32_t);
1809 writel(*(uint32_t *) mb, to_slim); 1786 writel(*(uint32_t *) mb, to_slim);
1810 readl(to_slim); /* flush */ 1787 readl(to_slim); /* flush */
1811 1788
@@ -2659,8 +2636,6 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2659 2636
2660 INIT_LIST_HEAD(&(pring->txq)); 2637 INIT_LIST_HEAD(&(pring->txq));
2661 2638
2662 kfree(pring->fast_lookup);
2663 pring->fast_lookup = NULL;
2664 } 2639 }
2665 2640
2666 spin_unlock_irqrestore(phba->host->host_lock, flags); 2641 spin_unlock_irqrestore(phba->host->host_lock, flags);
@@ -3110,6 +3085,24 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3110 return retval; 3085 return retval;
3111} 3086}
3112 3087
3088int
3089lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3090{
3091 int i = 0;
3092
3093 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {
3094 if (i++ > LPFC_MBOX_TMO * 1000)
3095 return 1;
3096
3097 if (lpfc_sli_handle_mb_event(phba) == 0)
3098 i = 0;
3099
3100 msleep(1);
3101 }
3102
3103 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3104}
3105
3113irqreturn_t 3106irqreturn_t
3114lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) 3107lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
3115{ 3108{
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index a52d6c6cf083..d8ef0d2894d4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -135,8 +135,6 @@ struct lpfc_sli_ring {
135 uint32_t fast_iotag; /* max fastlookup based iotag */ 135 uint32_t fast_iotag; /* max fastlookup based iotag */
136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */ 136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */
137 uint32_t iotag_max; /* max iotag value to use */ 137 uint32_t iotag_max; /* max iotag value to use */
138 struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by
139 iotag */
140 struct list_head txq; 138 struct list_head txq;
141 uint16_t txq_cnt; /* current length of queue */ 139 uint16_t txq_cnt; /* current length of queue */
142 uint16_t txq_max; /* max length */ 140 uint16_t txq_max; /* max length */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6b737568b831..10e89c6ae823 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.6" 21#define LPFC_DRIVER_VERSION "8.1.7"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 93edaa8696cf..89ef34df5a1d 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -378,7 +378,7 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
378 int nseg; 378 int nseg;
379 379
380 total = 0; 380 total = 0;
381 scl = (struct scatterlist *) cmd->buffer; 381 scl = (struct scatterlist *) cmd->request_buffer;
382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg, 382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg,
383 cmd->sc_data_direction); 383 cmd->sc_data_direction);
384 for (i = 0; i < nseg; ++i) { 384 for (i = 0; i < nseg; ++i) {
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index c88717727be8..5572981a9f92 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1268,7 +1268,7 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
1268 if (cmd->use_sg > 0) { 1268 if (cmd->use_sg > 0) {
1269 int nseg; 1269 int nseg;
1270 total = 0; 1270 total = 0;
1271 scl = (struct scatterlist *) cmd->buffer; 1271 scl = (struct scatterlist *) cmd->request_buffer;
1272 off = ms->data_ptr; 1272 off = ms->data_ptr;
1273 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg, 1273 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,
1274 cmd->sc_data_direction); 1274 cmd->sc_data_direction);
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c
index 7abf64d1bfc9..0bd9c60e6455 100644
--- a/drivers/scsi/pluto.c
+++ b/drivers/scsi/pluto.c
@@ -169,8 +169,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
169 SCpnt->request->rq_status = RQ_SCSI_BUSY; 169 SCpnt->request->rq_status = RQ_SCSI_BUSY;
170 170
171 SCpnt->done = pluto_detect_done; 171 SCpnt->done = pluto_detect_done;
172 SCpnt->bufflen = 256;
173 SCpnt->buffer = fcs[i].inquiry;
174 SCpnt->request_bufflen = 256; 172 SCpnt->request_bufflen = 256;
175 SCpnt->request_buffer = fcs[i].inquiry; 173 SCpnt->request_buffer = fcs[i].inquiry;
176 PLD(("set up %d %08lx\n", i, (long)SCpnt)) 174 PLD(("set up %d %08lx\n", i, (long)SCpnt))
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 69e0551a81d2..5b2f0741a55b 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -874,7 +874,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
874 if (Cmnd->use_sg) { 874 if (Cmnd->use_sg) {
875 int sg_count; 875 int sg_count;
876 876
877 sg = (struct scatterlist *) Cmnd->buffer; 877 sg = (struct scatterlist *) Cmnd->request_buffer;
878 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction); 878 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
879 879
880 ds = cmd->dataseg; 880 ds = cmd->dataseg;
@@ -1278,7 +1278,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1278 1278
1279 if (Cmnd->use_sg) { 1279 if (Cmnd->use_sg) {
1280 sbus_unmap_sg(qpti->sdev, 1280 sbus_unmap_sg(qpti->sdev,
1281 (struct scatterlist *)Cmnd->buffer, 1281 (struct scatterlist *)Cmnd->request_buffer,
1282 Cmnd->use_sg, 1282 Cmnd->use_sg,
1283 Cmnd->sc_data_direction); 1283 Cmnd->sc_data_direction);
1284 } else { 1284 } else {
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 64631bd38952..4776f4e55839 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -269,8 +269,15 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
270 board_20619 }, 270 board_20619 },
271 271
272/* TODO: remove all associated board_20771 code, as it completely
273 * duplicates board_2037x code, unless reason for separation can be
274 * divined.
275 */
276#if 0
272 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 277 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
273 board_20771 }, 278 board_20771 },
279#endif
280
274 { } /* terminate list */ 281 { } /* terminate list */
275}; 282};
276 283
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2ab7df0dcfe8..b332caddd5b3 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -346,7 +346,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
346 if (level > 3) { 346 if (level > 3) {
347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
348 " done = 0x%p, queuecommand 0x%p\n", 348 " done = 0x%p, queuecommand 0x%p\n",
349 cmd->buffer, cmd->bufflen, 349 cmd->request_buffer, cmd->request_bufflen,
350 cmd->done, 350 cmd->done,
351 sdev->host->hostt->queuecommand); 351 sdev->host->hostt->queuecommand);
352 352
@@ -661,11 +661,6 @@ void __scsi_done(struct scsi_cmnd *cmd)
661 */ 661 */
662int scsi_retry_command(struct scsi_cmnd *cmd) 662int scsi_retry_command(struct scsi_cmnd *cmd)
663{ 663{
664 /*
665 * Restore the SCSI command state.
666 */
667 scsi_setup_cmd_retry(cmd);
668
669 /* 664 /*
670 * Zero the sense information from the last time we tried 665 * Zero the sense information from the last time we tried
671 * this command. 666 * this command.
@@ -711,10 +706,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
711 "Notifying upper driver of completion " 706 "Notifying upper driver of completion "
712 "(result %x)\n", cmd->result)); 707 "(result %x)\n", cmd->result));
713 708
714 /*
715 * We can get here with use_sg=0, causing a panic in the upper level
716 */
717 cmd->use_sg = cmd->old_use_sg;
718 cmd->done(cmd); 709 cmd->done(cmd);
719} 710}
720EXPORT_SYMBOL(scsi_finish_command); 711EXPORT_SYMBOL(scsi_finish_command);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9c63b00773c4..a80303c6b3fd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -286,7 +286,7 @@ static int inquiry_evpd_83(unsigned char * arr, int target_dev_id,
286 int dev_id_num, const char * dev_id_str, 286 int dev_id_num, const char * dev_id_str,
287 int dev_id_str_len); 287 int dev_id_str_len);
288static int inquiry_evpd_88(unsigned char * arr, int target_dev_id); 288static int inquiry_evpd_88(unsigned char * arr, int target_dev_id);
289static void do_create_driverfs_files(void); 289static int do_create_driverfs_files(void);
290static void do_remove_driverfs_files(void); 290static void do_remove_driverfs_files(void);
291 291
292static int sdebug_add_adapter(void); 292static int sdebug_add_adapter(void);
@@ -2487,19 +2487,22 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp,
2487DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, 2487DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
2488 sdebug_add_host_store); 2488 sdebug_add_host_store);
2489 2489
2490static void do_create_driverfs_files(void) 2490static int do_create_driverfs_files(void)
2491{ 2491{
2492 driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); 2492 int ret;
2493 driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); 2493
2494 driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2494 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
2495 driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2495 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
2496 driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2496 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
2497 driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2497 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
2498 driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2498 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
2499 driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 2499 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
2500 driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2500 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
2501 driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); 2501 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2502 driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2502 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
2503 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
2504 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
2505 return ret;
2503} 2506}
2504 2507
2505static void do_remove_driverfs_files(void) 2508static void do_remove_driverfs_files(void)
@@ -2522,6 +2525,7 @@ static int __init scsi_debug_init(void)
2522 unsigned int sz; 2525 unsigned int sz;
2523 int host_to_add; 2526 int host_to_add;
2524 int k; 2527 int k;
2528 int ret;
2525 2529
2526 if (scsi_debug_dev_size_mb < 1) 2530 if (scsi_debug_dev_size_mb < 1)
2527 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 2531 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
@@ -2560,12 +2564,32 @@ static int __init scsi_debug_init(void)
2560 if (scsi_debug_num_parts > 0) 2564 if (scsi_debug_num_parts > 0)
2561 sdebug_build_parts(fake_storep); 2565 sdebug_build_parts(fake_storep);
2562 2566
2563 init_all_queued(); 2567 ret = device_register(&pseudo_primary);
2568 if (ret < 0) {
2569 printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
2570 ret);
2571 goto free_vm;
2572 }
2573 ret = bus_register(&pseudo_lld_bus);
2574 if (ret < 0) {
2575 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
2576 ret);
2577 goto dev_unreg;
2578 }
2579 ret = driver_register(&sdebug_driverfs_driver);
2580 if (ret < 0) {
2581 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
2582 ret);
2583 goto bus_unreg;
2584 }
2585 ret = do_create_driverfs_files();
2586 if (ret < 0) {
2587 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
2588 ret);
2589 goto del_files;
2590 }
2564 2591
2565 device_register(&pseudo_primary); 2592 init_all_queued();
2566 bus_register(&pseudo_lld_bus);
2567 driver_register(&sdebug_driverfs_driver);
2568 do_create_driverfs_files();
2569 2593
2570 sdebug_driver_template.proc_name = (char *)sdebug_proc_name; 2594 sdebug_driver_template.proc_name = (char *)sdebug_proc_name;
2571 2595
@@ -2585,6 +2609,18 @@ static int __init scsi_debug_init(void)
2585 scsi_debug_add_host); 2609 scsi_debug_add_host);
2586 } 2610 }
2587 return 0; 2611 return 0;
2612
2613del_files:
2614 do_remove_driverfs_files();
2615 driver_unregister(&sdebug_driverfs_driver);
2616bus_unreg:
2617 bus_unregister(&pseudo_lld_bus);
2618dev_unreg:
2619 device_unregister(&pseudo_primary);
2620free_vm:
2621 vfree(fake_storep);
2622
2623 return ret;
2588} 2624}
2589 2625
2590static void __exit scsi_debug_exit(void) 2626static void __exit scsi_debug_exit(void)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6683d596234a..6a5b731bd5ba 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -460,19 +460,67 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
460 * Return value: 460 * Return value:
461 * SUCCESS or FAILED or NEEDS_RETRY 461 * SUCCESS or FAILED or NEEDS_RETRY
462 **/ 462 **/
463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) 463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense)
464{ 464{
465 struct scsi_device *sdev = scmd->device; 465 struct scsi_device *sdev = scmd->device;
466 struct Scsi_Host *shost = sdev->host; 466 struct Scsi_Host *shost = sdev->host;
467 int old_result = scmd->result;
467 DECLARE_COMPLETION(done); 468 DECLARE_COMPLETION(done);
468 unsigned long timeleft; 469 unsigned long timeleft;
469 unsigned long flags; 470 unsigned long flags;
471 unsigned char old_cmnd[MAX_COMMAND_SIZE];
472 enum dma_data_direction old_data_direction;
473 unsigned short old_use_sg;
474 unsigned char old_cmd_len;
475 unsigned old_bufflen;
476 void *old_buffer;
470 int rtn; 477 int rtn;
471 478
479 /*
480 * We need saved copies of a number of fields - this is because
481 * error handling may need to overwrite these with different values
482 * to run different commands, and once error handling is complete,
483 * we will need to restore these values prior to running the actual
484 * command.
485 */
486 old_buffer = scmd->request_buffer;
487 old_bufflen = scmd->request_bufflen;
488 memcpy(old_cmnd, scmd->cmnd, sizeof(scmd->cmnd));
489 old_data_direction = scmd->sc_data_direction;
490 old_cmd_len = scmd->cmd_len;
491 old_use_sg = scmd->use_sg;
492
493 if (copy_sense) {
494 int gfp_mask = GFP_ATOMIC;
495
496 if (shost->hostt->unchecked_isa_dma)
497 gfp_mask |= __GFP_DMA;
498
499 scmd->sc_data_direction = DMA_FROM_DEVICE;
500 scmd->request_bufflen = 252;
501 scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);
502 if (!scmd->request_buffer)
503 return FAILED;
504 } else {
505 scmd->request_buffer = NULL;
506 scmd->request_bufflen = 0;
507 scmd->sc_data_direction = DMA_NONE;
508 }
509
510 scmd->underflow = 0;
511 scmd->use_sg = 0;
512 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
513
472 if (sdev->scsi_level <= SCSI_2) 514 if (sdev->scsi_level <= SCSI_2)
473 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | 515 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
474 (sdev->lun << 5 & 0xe0); 516 (sdev->lun << 5 & 0xe0);
475 517
518 /*
519 * Zero the sense buffer. The scsi spec mandates that any
520 * untransferred sense data should be interpreted as being zero.
521 */
522 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
523
476 shost->eh_action = &done; 524 shost->eh_action = &done;
477 525
478 spin_lock_irqsave(shost->host_lock, flags); 526 spin_lock_irqsave(shost->host_lock, flags);
@@ -522,6 +570,29 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
522 rtn = FAILED; 570 rtn = FAILED;
523 } 571 }
524 572
573
574 /*
575 * Last chance to have valid sense data.
576 */
577 if (copy_sense) {
578 if (!SCSI_SENSE_VALID(scmd)) {
579 memcpy(scmd->sense_buffer, scmd->request_buffer,
580 sizeof(scmd->sense_buffer));
581 }
582 kfree(scmd->request_buffer);
583 }
584
585
586 /*
587 * Restore original data
588 */
589 scmd->request_buffer = old_buffer;
590 scmd->request_bufflen = old_bufflen;
591 memcpy(scmd->cmnd, old_cmnd, sizeof(scmd->cmnd));
592 scmd->sc_data_direction = old_data_direction;
593 scmd->cmd_len = old_cmd_len;
594 scmd->use_sg = old_use_sg;
595 scmd->result = old_result;
525 return rtn; 596 return rtn;
526} 597}
527 598
@@ -537,56 +608,10 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
537static int scsi_request_sense(struct scsi_cmnd *scmd) 608static int scsi_request_sense(struct scsi_cmnd *scmd)
538{ 609{
539 static unsigned char generic_sense[6] = 610 static unsigned char generic_sense[6] =
540 {REQUEST_SENSE, 0, 0, 0, 252, 0}; 611 {REQUEST_SENSE, 0, 0, 0, 252, 0};
541 unsigned char *scsi_result;
542 int saved_result;
543 int rtn;
544 612
545 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); 613 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense));
546 614 return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1);
547 scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0));
548
549
550 if (unlikely(!scsi_result)) {
551 printk(KERN_ERR "%s: cannot allocate scsi_result.\n",
552 __FUNCTION__);
553 return FAILED;
554 }
555
556 /*
557 * zero the sense buffer. some host adapters automatically always
558 * request sense, so it is not a good idea that
559 * scmd->request_buffer and scmd->sense_buffer point to the same
560 * address (db). 0 is not a valid sense code.
561 */
562 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
563 memset(scsi_result, 0, 252);
564
565 saved_result = scmd->result;
566 scmd->request_buffer = scsi_result;
567 scmd->request_bufflen = 252;
568 scmd->use_sg = 0;
569 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
570 scmd->sc_data_direction = DMA_FROM_DEVICE;
571 scmd->underflow = 0;
572
573 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
574
575 /* last chance to have valid sense data */
576 if(!SCSI_SENSE_VALID(scmd)) {
577 memcpy(scmd->sense_buffer, scmd->request_buffer,
578 sizeof(scmd->sense_buffer));
579 }
580
581 kfree(scsi_result);
582
583 /*
584 * when we eventually call scsi_finish, we really wish to complete
585 * the original request, so let's restore the original data. (db)
586 */
587 scsi_setup_cmd_retry(scmd);
588 scmd->result = saved_result;
589 return rtn;
590} 615}
591 616
592/** 617/**
@@ -605,12 +630,6 @@ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
605{ 630{
606 scmd->device->host->host_failed--; 631 scmd->device->host->host_failed--;
607 scmd->eh_eflags = 0; 632 scmd->eh_eflags = 0;
608
609 /*
610 * set this back so that the upper level can correctly free up
611 * things.
612 */
613 scsi_setup_cmd_retry(scmd);
614 list_move_tail(&scmd->eh_entry, done_q); 633 list_move_tail(&scmd->eh_entry, done_q);
615} 634}
616EXPORT_SYMBOL(scsi_eh_finish_cmd); 635EXPORT_SYMBOL(scsi_eh_finish_cmd);
@@ -715,47 +734,26 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
715{ 734{
716 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; 735 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
717 int retry_cnt = 1, rtn; 736 int retry_cnt = 1, rtn;
718 int saved_result;
719 737
720retry_tur: 738retry_tur:
721 memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); 739 memcpy(scmd->cmnd, tur_command, sizeof(tur_command));
722 740
723 /*
724 * zero the sense buffer. the scsi spec mandates that any
725 * untransferred sense data should be interpreted as being zero.
726 */
727 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
728
729 saved_result = scmd->result;
730 scmd->request_buffer = NULL;
731 scmd->request_bufflen = 0;
732 scmd->use_sg = 0;
733 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
734 scmd->underflow = 0;
735 scmd->sc_data_direction = DMA_NONE;
736 741
737 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); 742 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0);
738 743
739 /*
740 * when we eventually call scsi_finish, we really wish to complete
741 * the original request, so let's restore the original data. (db)
742 */
743 scsi_setup_cmd_retry(scmd);
744 scmd->result = saved_result;
745
746 /*
747 * hey, we are done. let's look to see what happened.
748 */
749 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 744 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
750 __FUNCTION__, scmd, rtn)); 745 __FUNCTION__, scmd, rtn));
751 if (rtn == SUCCESS) 746
752 return 0; 747 switch (rtn) {
753 else if (rtn == NEEDS_RETRY) { 748 case NEEDS_RETRY:
754 if (retry_cnt--) 749 if (retry_cnt--)
755 goto retry_tur; 750 goto retry_tur;
751 /*FALLTHRU*/
752 case SUCCESS:
756 return 0; 753 return 0;
754 default:
755 return 1;
757 } 756 }
758 return 1;
759} 757}
760 758
761/** 759/**
@@ -837,44 +835,16 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
837static int scsi_eh_try_stu(struct scsi_cmnd *scmd) 835static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
838{ 836{
839 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 837 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
840 int rtn;
841 int saved_result;
842 838
843 if (!scmd->device->allow_restart) 839 if (scmd->device->allow_restart) {
844 return 1; 840 int rtn;
845
846 memcpy(scmd->cmnd, stu_command, sizeof(stu_command));
847
848 /*
849 * zero the sense buffer. the scsi spec mandates that any
850 * untransferred sense data should be interpreted as being zero.
851 */
852 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
853
854 saved_result = scmd->result;
855 scmd->request_buffer = NULL;
856 scmd->request_bufflen = 0;
857 scmd->use_sg = 0;
858 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
859 scmd->underflow = 0;
860 scmd->sc_data_direction = DMA_NONE;
861 841
862 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); 842 memcpy(scmd->cmnd, stu_command, sizeof(stu_command));
863 843 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0);
864 /* 844 if (rtn == SUCCESS)
865 * when we eventually call scsi_finish, we really wish to complete 845 return 0;
866 * the original request, so let's restore the original data. (db) 846 }
867 */
868 scsi_setup_cmd_retry(scmd);
869 scmd->result = saved_result;
870 847
871 /*
872 * hey, we are done. let's look to see what happened.
873 */
874 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
875 __FUNCTION__, scmd, rtn));
876 if (rtn == SUCCESS)
877 return 0;
878 return 1; 848 return 1;
879} 849}
880 850
@@ -1684,8 +1654,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1684 1654
1685 scmd->scsi_done = scsi_reset_provider_done_command; 1655 scmd->scsi_done = scsi_reset_provider_done_command;
1686 scmd->done = NULL; 1656 scmd->done = NULL;
1687 scmd->buffer = NULL;
1688 scmd->bufflen = 0;
1689 scmd->request_buffer = NULL; 1657 scmd->request_buffer = NULL;
1690 scmd->request_bufflen = 0; 1658 scmd->request_bufflen = 0;
1691 1659
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index a89c4115cfba..32293f451669 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
110 sshdr.asc, sshdr.ascq); 110 sshdr.asc, sshdr.ascq);
111 break; 111 break;
112 case NOT_READY: /* This happens if there is no disc in drive */ 112 case NOT_READY: /* This happens if there is no disc in drive */
113 if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) { 113 if (sdev->removable)
114 printk(KERN_INFO "Device not ready. Make sure"
115 " there is a disc in the drive.\n");
116 break; 114 break;
117 }
118 case UNIT_ATTENTION: 115 case UNIT_ATTENTION:
119 if (sdev->removable) { 116 if (sdev->removable) {
120 sdev->changed = 1; 117 sdev->changed = 1;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 08af9aae7df3..077c1c691210 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -436,60 +436,16 @@ EXPORT_SYMBOL_GPL(scsi_execute_async);
436 * 436 *
437 * Arguments: cmd - command that is ready to be queued. 437 * Arguments: cmd - command that is ready to be queued.
438 * 438 *
439 * Returns: Nothing
440 *
441 * Notes: This function has the job of initializing a number of 439 * Notes: This function has the job of initializing a number of
442 * fields related to error handling. Typically this will 440 * fields related to error handling. Typically this will
443 * be called once for each command, as required. 441 * be called once for each command, as required.
444 */ 442 */
445static int scsi_init_cmd_errh(struct scsi_cmnd *cmd) 443static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
446{ 444{
447 cmd->serial_number = 0; 445 cmd->serial_number = 0;
448
449 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); 446 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
450
451 if (cmd->cmd_len == 0) 447 if (cmd->cmd_len == 0)
452 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 448 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
453
454 /*
455 * We need saved copies of a number of fields - this is because
456 * error handling may need to overwrite these with different values
457 * to run different commands, and once error handling is complete,
458 * we will need to restore these values prior to running the actual
459 * command.
460 */
461 cmd->old_use_sg = cmd->use_sg;
462 cmd->old_cmd_len = cmd->cmd_len;
463 cmd->sc_old_data_direction = cmd->sc_data_direction;
464 cmd->old_underflow = cmd->underflow;
465 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
466 cmd->buffer = cmd->request_buffer;
467 cmd->bufflen = cmd->request_bufflen;
468
469 return 1;
470}
471
472/*
473 * Function: scsi_setup_cmd_retry()
474 *
475 * Purpose: Restore the command state for a retry
476 *
477 * Arguments: cmd - command to be restored
478 *
479 * Returns: Nothing
480 *
481 * Notes: Immediately prior to retrying a command, we need
482 * to restore certain fields that we saved above.
483 */
484void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
485{
486 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
487 cmd->request_buffer = cmd->buffer;
488 cmd->request_bufflen = cmd->bufflen;
489 cmd->use_sg = cmd->old_use_sg;
490 cmd->cmd_len = cmd->old_cmd_len;
491 cmd->sc_data_direction = cmd->sc_old_data_direction;
492 cmd->underflow = cmd->old_underflow;
493} 449}
494 450
495void scsi_device_unbusy(struct scsi_device *sdev) 451void scsi_device_unbusy(struct scsi_device *sdev)
@@ -807,22 +763,13 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
807 */ 763 */
808static void scsi_release_buffers(struct scsi_cmnd *cmd) 764static void scsi_release_buffers(struct scsi_cmnd *cmd)
809{ 765{
810 struct request *req = cmd->request;
811
812 /*
813 * Free up any indirection buffers we allocated for DMA purposes.
814 */
815 if (cmd->use_sg) 766 if (cmd->use_sg)
816 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 767 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
817 else if (cmd->request_buffer != req->buffer)
818 kfree(cmd->request_buffer);
819 768
820 /* 769 /*
821 * Zero these out. They now point to freed memory, and it is 770 * Zero these out. They now point to freed memory, and it is
822 * dangerous to hang onto the pointers. 771 * dangerous to hang onto the pointers.
823 */ 772 */
824 cmd->buffer = NULL;
825 cmd->bufflen = 0;
826 cmd->request_buffer = NULL; 773 cmd->request_buffer = NULL;
827 cmd->request_bufflen = 0; 774 cmd->request_bufflen = 0;
828} 775}
@@ -858,7 +805,7 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
858void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 805void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
859{ 806{
860 int result = cmd->result; 807 int result = cmd->result;
861 int this_count = cmd->bufflen; 808 int this_count = cmd->request_bufflen;
862 request_queue_t *q = cmd->device->request_queue; 809 request_queue_t *q = cmd->device->request_queue;
863 struct request *req = cmd->request; 810 struct request *req = cmd->request;
864 int clear_errors = 1; 811 int clear_errors = 1;
@@ -866,28 +813,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
866 int sense_valid = 0; 813 int sense_valid = 0;
867 int sense_deferred = 0; 814 int sense_deferred = 0;
868 815
869 /* 816 scsi_release_buffers(cmd);
870 * Free up any indirection buffers we allocated for DMA purposes.
871 * For the case of a READ, we need to copy the data out of the
872 * bounce buffer and into the real buffer.
873 */
874 if (cmd->use_sg)
875 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
876 else if (cmd->buffer != req->buffer) {
877 if (rq_data_dir(req) == READ) {
878 unsigned long flags;
879 char *to = bio_kmap_irq(req->bio, &flags);
880 memcpy(to, cmd->buffer, cmd->bufflen);
881 bio_kunmap_irq(to, &flags);
882 }
883 kfree(cmd->buffer);
884 }
885 817
886 if (result) { 818 if (result) {
887 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 819 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
888 if (sense_valid) 820 if (sense_valid)
889 sense_deferred = scsi_sense_is_deferred(&sshdr); 821 sense_deferred = scsi_sense_is_deferred(&sshdr);
890 } 822 }
823
891 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 824 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
892 req->errors = result; 825 req->errors = result;
893 if (result) { 826 if (result) {
@@ -908,15 +841,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
908 } 841 }
909 842
910 /* 843 /*
911 * Zero these out. They now point to freed memory, and it is
912 * dangerous to hang onto the pointers.
913 */
914 cmd->buffer = NULL;
915 cmd->bufflen = 0;
916 cmd->request_buffer = NULL;
917 cmd->request_bufflen = 0;
918
919 /*
920 * Next deal with any sectors which we were able to correctly 844 * Next deal with any sectors which we were able to correctly
921 * handle. 845 * handle.
922 */ 846 */
@@ -1012,7 +936,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1012 if (!(req->flags & REQ_QUIET)) { 936 if (!(req->flags & REQ_QUIET)) {
1013 scmd_printk(KERN_INFO, cmd, 937 scmd_printk(KERN_INFO, cmd,
1014 "Volume overflow, CDB: "); 938 "Volume overflow, CDB: ");
1015 __scsi_print_command(cmd->data_cmnd); 939 __scsi_print_command(cmd->cmnd);
1016 scsi_print_sense("", cmd); 940 scsi_print_sense("", cmd);
1017 } 941 }
1018 /* See SSC3rXX or current. */ 942 /* See SSC3rXX or current. */
@@ -1143,7 +1067,7 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1143 * successfully. Since this is a REQ_BLOCK_PC command the 1067 * successfully. Since this is a REQ_BLOCK_PC command the
1144 * caller should check the request's errors value 1068 * caller should check the request's errors value
1145 */ 1069 */
1146 scsi_io_completion(cmd, cmd->bufflen); 1070 scsi_io_completion(cmd, cmd->request_bufflen);
1147} 1071}
1148 1072
1149static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1073static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index e2fbe9a9d5a9..ae24c85aaeea 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -57,7 +57,6 @@ extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
57 57
58/* scsi_lib.c */ 58/* scsi_lib.c */
59extern int scsi_maybe_unblock_host(struct scsi_device *sdev); 59extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
60extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd);
61extern void scsi_device_unbusy(struct scsi_device *sdev); 60extern void scsi_device_unbusy(struct scsi_device *sdev);
62extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); 61extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
63extern void scsi_next_command(struct scsi_cmnd *cmd); 62extern void scsi_next_command(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index dd075627e605..5a625c3fddae 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -41,6 +41,7 @@ struct sas_host_attrs {
41 struct mutex lock; 41 struct mutex lock;
42 u32 next_target_id; 42 u32 next_target_id;
43 u32 next_expander_id; 43 u32 next_expander_id;
44 int next_port_id;
44}; 45};
45#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) 46#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data)
46 47
@@ -146,6 +147,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
146 mutex_init(&sas_host->lock); 147 mutex_init(&sas_host->lock);
147 sas_host->next_target_id = 0; 148 sas_host->next_target_id = 0;
148 sas_host->next_expander_id = 0; 149 sas_host->next_expander_id = 0;
150 sas_host->next_port_id = 0;
149 return 0; 151 return 0;
150} 152}
151 153
@@ -327,7 +329,7 @@ sas_phy_protocol_attr(identify.target_port_protocols,
327sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", 329sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
328 unsigned long long); 330 unsigned long long);
329sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); 331sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
330//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8); 332//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
331sas_phy_linkspeed_attr(negotiated_linkrate); 333sas_phy_linkspeed_attr(negotiated_linkrate);
332sas_phy_linkspeed_attr(minimum_linkrate_hw); 334sas_phy_linkspeed_attr(minimum_linkrate_hw);
333sas_phy_linkspeed_attr(minimum_linkrate); 335sas_phy_linkspeed_attr(minimum_linkrate);
@@ -590,6 +592,38 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id)
590} 592}
591EXPORT_SYMBOL(sas_port_alloc); 593EXPORT_SYMBOL(sas_port_alloc);
592 594
595/** sas_port_alloc_num - allocate and initialize a SAS port structure
596 *
597 * @parent: parent device
598 *
599 * Allocates a SAS port structure and a number to go with it. This
600 * interface is really for adapters where the port number has no
601 * meansing, so the sas class should manage them. It will be added to
602 * the device tree below the device specified by @parent which must be
603 * either a Scsi_Host or a sas_expander_device.
604 *
605 * Returns %NULL on error
606 */
607struct sas_port *sas_port_alloc_num(struct device *parent)
608{
609 int index;
610 struct Scsi_Host *shost = dev_to_shost(parent);
611 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
612
613 /* FIXME: use idr for this eventually */
614 mutex_lock(&sas_host->lock);
615 if (scsi_is_sas_expander_device(parent)) {
616 struct sas_rphy *rphy = dev_to_rphy(parent);
617 struct sas_expander_device *exp = rphy_to_expander_device(rphy);
618
619 index = exp->next_port_id++;
620 } else
621 index = sas_host->next_port_id++;
622 mutex_unlock(&sas_host->lock);
623 return sas_port_alloc(parent, index);
624}
625EXPORT_SYMBOL(sas_port_alloc_num);
626
593/** 627/**
594 * sas_port_add - add a SAS port to the device hierarchy 628 * sas_port_add - add a SAS port to the device hierarchy
595 * 629 *
@@ -658,6 +692,13 @@ void sas_port_delete(struct sas_port *port)
658 } 692 }
659 mutex_unlock(&port->phy_list_mutex); 693 mutex_unlock(&port->phy_list_mutex);
660 694
695 if (port->is_backlink) {
696 struct device *parent = port->dev.parent;
697
698 sysfs_remove_link(&port->dev.kobj, parent->bus_id);
699 port->is_backlink = 0;
700 }
701
661 transport_remove_device(dev); 702 transport_remove_device(dev);
662 device_del(dev); 703 device_del(dev);
663 transport_destroy_device(dev); 704 transport_destroy_device(dev);
@@ -733,6 +774,19 @@ void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy)
733} 774}
734EXPORT_SYMBOL(sas_port_delete_phy); 775EXPORT_SYMBOL(sas_port_delete_phy);
735 776
777void sas_port_mark_backlink(struct sas_port *port)
778{
779 struct device *parent = port->dev.parent->parent->parent;
780
781 if (port->is_backlink)
782 return;
783 port->is_backlink = 1;
784 sysfs_create_link(&port->dev.kobj, &parent->kobj,
785 parent->bus_id);
786
787}
788EXPORT_SYMBOL(sas_port_mark_backlink);
789
736/* 790/*
737 * SAS remote PHY attributes. 791 * SAS remote PHY attributes.
738 */ 792 */
@@ -1140,7 +1194,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
1140 1194
1141 if (identify->device_type == SAS_END_DEVICE && 1195 if (identify->device_type == SAS_END_DEVICE &&
1142 rphy->scsi_target_id != -1) { 1196 rphy->scsi_target_id != -1) {
1143 scsi_scan_target(&rphy->dev, parent->port_identifier, 1197 scsi_scan_target(&rphy->dev, 0,
1144 rphy->scsi_target_id, ~0, 0); 1198 rphy->scsi_target_id, ~0, 0);
1145 } 1199 }
1146 1200
@@ -1242,15 +1296,13 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1242 1296
1243 mutex_lock(&sas_host->lock); 1297 mutex_lock(&sas_host->lock);
1244 list_for_each_entry(rphy, &sas_host->rphy_list, list) { 1298 list_for_each_entry(rphy, &sas_host->rphy_list, list) {
1245 struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
1246
1247 if (rphy->identify.device_type != SAS_END_DEVICE || 1299 if (rphy->identify.device_type != SAS_END_DEVICE ||
1248 rphy->scsi_target_id == -1) 1300 rphy->scsi_target_id == -1)
1249 continue; 1301 continue;
1250 1302
1251 if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && 1303 if ((channel == SCAN_WILD_CARD || channel == 0) &&
1252 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { 1304 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {
1253 scsi_scan_target(&rphy->dev, parent->port_identifier, 1305 scsi_scan_target(&rphy->dev, 0,
1254 rphy->scsi_target_id, lun, 1); 1306 rphy->scsi_target_id, lun, 1);
1255 } 1307 }
1256 } 1308 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3225d31449e1..98bd3aab9739 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -502,8 +502,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
502 SCpnt->cmnd[4] = (unsigned char) this_count; 502 SCpnt->cmnd[4] = (unsigned char) this_count;
503 SCpnt->cmnd[5] = 0; 503 SCpnt->cmnd[5] = 0;
504 } 504 }
505 SCpnt->request_bufflen = SCpnt->bufflen = 505 SCpnt->request_bufflen = this_count * sdp->sector_size;
506 this_count * sdp->sector_size;
507 506
508 /* 507 /*
509 * We shouldn't disconnect in the middle of a sector, so with a dumb 508 * We shouldn't disconnect in the middle of a sector, so with a dumb
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index 3f312a84c6a7..2679ea8bff1a 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -1002,7 +1002,7 @@ connect_loop:
1002 } 1002 }
1003#endif 1003#endif
1004 1004
1005 buffer = (struct scatterlist *) SCint->buffer; 1005 buffer = (struct scatterlist *) SCint->request_buffer;
1006 len = buffer->length; 1006 len = buffer->length;
1007 data = page_address(buffer->page) + buffer->offset; 1007 data = page_address(buffer->page) + buffer->offset;
1008 } else { 1008 } else {
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index fd94408577e5..fae6e95a6298 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -360,7 +360,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
360 "mismatch count %d, bytes %d\n", 360 "mismatch count %d, bytes %d\n",
361 size, SCpnt->request_bufflen); 361 size, SCpnt->request_bufflen);
362 if (SCpnt->request_bufflen > size) 362 if (SCpnt->request_bufflen > size)
363 SCpnt->request_bufflen = SCpnt->bufflen = size; 363 SCpnt->request_bufflen = size;
364 } 364 }
365 } 365 }
366 366
@@ -387,8 +387,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
387 387
388 if (this_count > 0xffff) { 388 if (this_count > 0xffff) {
389 this_count = 0xffff; 389 this_count = 0xffff;
390 SCpnt->request_bufflen = SCpnt->bufflen = 390 SCpnt->request_bufflen = this_count * s_size;
391 this_count * s_size;
392 } 391 }
393 392
394 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; 393 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 756ceb93ddc8..7f669b600677 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -368,7 +368,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], 368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); 369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
370 if (cmdstatp->have_sense) 370 if (cmdstatp->have_sense)
371 __scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 371 __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
372 } ) /* end DEB */ 372 } ) /* end DEB */
373 if (!debugging) { /* Abnormal conditions for tape */ 373 if (!debugging) { /* Abnormal conditions for tape */
374 if (!cmdstatp->have_sense) 374 if (!cmdstatp->have_sense)
@@ -384,9 +384,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
384 scode != VOLUME_OVERFLOW && 384 scode != VOLUME_OVERFLOW &&
385 SRpnt->cmd[0] != MODE_SENSE && 385 SRpnt->cmd[0] != MODE_SENSE &&
386 SRpnt->cmd[0] != TEST_UNIT_READY) { 386 SRpnt->cmd[0] != TEST_UNIT_READY) {
387 printk(KERN_WARNING "%s: Error with sense data: ", name); 387
388 __scsi_print_sense("st", SRpnt->sense, 388 __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
389 SCSI_SENSE_BUFFERSIZE);
390 } 389 }
391 } 390 }
392 391
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 2ebe0d663899..2f8073b73bf3 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -517,7 +517,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
517 */ 517 */
518 518
519 if (cmd->use_sg) { 519 if (cmd->use_sg) {
520 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 520 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
521 cmd->SCp.buffers_residual = cmd->use_sg - 1; 521 cmd->SCp.buffers_residual = cmd->use_sg - 1;
522 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); 522 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
523 cmd->SCp.this_residual = cmd->SCp.buffer->length; 523 cmd->SCp.this_residual = cmd->SCp.buffer->length;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 1f328cae5c05..6b60536ac92b 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -347,7 +347,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
347static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 347static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
348{ 348{
349 int sz = sp->use_sg - 1; 349 int sz = sp->use_sg - 1;
350 struct scatterlist *sg = (struct scatterlist *)sp->buffer; 350 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
351 351
352 while(sz >= 0) { 352 while(sz >= 0) {
353 dvma_unmap((char *)sg[sz].dma_address); 353 dvma_unmap((char *)sg[sz].dma_address);
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 680f38ab60d8..2083454db511 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -373,7 +373,7 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd,
373 */ 373 */
374 374
375 if (cmd->use_sg) { 375 if (cmd->use_sg) {
376 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 376 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
377 cmd->SCp.buffers_residual = cmd->use_sg - 1; 377 cmd->SCp.buffers_residual = cmd->use_sg - 1;
378 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + 378 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
379 cmd->SCp.buffer->offset; 379 cmd->SCp.buffer->offset;
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 0dbd4df44c05..dc673e1b6fd9 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -1047,12 +1047,13 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id *
1047 up = &sunsab_ports[inst * 2]; 1047 up = &sunsab_ports[inst * 2];
1048 1048
1049 err = sunsab_init_one(&up[0], op, 1049 err = sunsab_init_one(&up[0], op,
1050 sizeof(union sab82532_async_regs), 1050 0,
1051 (inst * 2) + 0); 1051 (inst * 2) + 0);
1052 if (err) 1052 if (err)
1053 return err; 1053 return err;
1054 1054
1055 err = sunsab_init_one(&up[0], op, 0, 1055 err = sunsab_init_one(&up[1], op,
1056 sizeof(union sab82532_async_regs),
1056 (inst * 2) + 1); 1057 (inst * 2) + 1);
1057 if (err) { 1058 if (err) {
1058 of_iounmap(up[0].port.membase, 1059 of_iounmap(up[0].port.membase,
@@ -1117,7 +1118,7 @@ static int __init sunsab_init(void)
1117 int err; 1118 int err;
1118 1119
1119 num_channels = 0; 1120 num_channels = 0;
1120 for_each_node_by_name(dp, "su") 1121 for_each_node_by_name(dp, "se")
1121 num_channels += 2; 1122 num_channels += 2;
1122 for_each_node_by_name(dp, "serial") { 1123 for_each_node_by_name(dp, "serial") {
1123 if (of_device_is_compatible(dp, "sab82532")) 1124 if (of_device_is_compatible(dp, "sab82532"))
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 93bdaa3169fc..d3a5aeee73a3 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1200,6 +1200,11 @@ static int __init sunsu_kbd_ms_init(struct uart_sunsu_port *up)
1200 if (up->port.type == PORT_UNKNOWN) 1200 if (up->port.type == PORT_UNKNOWN)
1201 return -ENODEV; 1201 return -ENODEV;
1202 1202
1203 printk("%s: %s port at %lx, irq %u\n",
1204 to_of_device(up->port.dev)->node->full_name,
1205 (up->su_type == SU_PORT_KBD) ? "Keyboard" : "Mouse",
1206 up->port.mapbase, up->port.irq);
1207
1203#ifdef CONFIG_SERIO 1208#ifdef CONFIG_SERIO
1204 serio = &up->serio; 1209 serio = &up->serio;
1205 serio->port_data = up; 1210 serio->port_data = up;
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index a1456d9352cb..47bc3d57e019 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -68,9 +68,6 @@ static int num_sunzilog;
68#define NUM_SUNZILOG num_sunzilog 68#define NUM_SUNZILOG num_sunzilog
69#define NUM_CHANNELS (NUM_SUNZILOG * 2) 69#define NUM_CHANNELS (NUM_SUNZILOG * 2)
70 70
71#define KEYBOARD_LINE 0x2
72#define MOUSE_LINE 0x3
73
74#define ZS_CLOCK 4915200 /* Zilog input clock rate. */ 71#define ZS_CLOCK 4915200 /* Zilog input clock rate. */
75#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */ 72#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */
76 73
@@ -1225,12 +1222,10 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
1225{ 1222{
1226 int baud, brg; 1223 int baud, brg;
1227 1224
1228 if (channel == KEYBOARD_LINE) { 1225 if (up->flags & SUNZILOG_FLAG_CONS_KEYB) {
1229 up->flags |= SUNZILOG_FLAG_CONS_KEYB;
1230 up->cflag = B1200 | CS8 | CLOCAL | CREAD; 1226 up->cflag = B1200 | CS8 | CLOCAL | CREAD;
1231 baud = 1200; 1227 baud = 1200;
1232 } else { 1228 } else {
1233 up->flags |= SUNZILOG_FLAG_CONS_MOUSE;
1234 up->cflag = B4800 | CS8 | CLOCAL | CREAD; 1229 up->cflag = B4800 | CS8 | CLOCAL | CREAD;
1235 baud = 4800; 1230 baud = 4800;
1236 } 1231 }
@@ -1243,14 +1238,14 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
1243} 1238}
1244 1239
1245#ifdef CONFIG_SERIO 1240#ifdef CONFIG_SERIO
1246static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int channel) 1241static void __init sunzilog_register_serio(struct uart_sunzilog_port *up)
1247{ 1242{
1248 struct serio *serio = &up->serio; 1243 struct serio *serio = &up->serio;
1249 1244
1250 serio->port_data = up; 1245 serio->port_data = up;
1251 1246
1252 serio->id.type = SERIO_RS232; 1247 serio->id.type = SERIO_RS232;
1253 if (channel == KEYBOARD_LINE) { 1248 if (up->flags & SUNZILOG_FLAG_CONS_KEYB) {
1254 serio->id.proto = SERIO_SUNKBD; 1249 serio->id.proto = SERIO_SUNKBD;
1255 strlcpy(serio->name, "zskbd", sizeof(serio->name)); 1250 strlcpy(serio->name, "zskbd", sizeof(serio->name));
1256 } else { 1251 } else {
@@ -1259,7 +1254,8 @@ static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int ch
1259 strlcpy(serio->name, "zsms", sizeof(serio->name)); 1254 strlcpy(serio->name, "zsms", sizeof(serio->name));
1260 } 1255 }
1261 strlcpy(serio->phys, 1256 strlcpy(serio->phys,
1262 (channel == KEYBOARD_LINE ? "zs/serio0" : "zs/serio1"), 1257 ((up->flags & SUNZILOG_FLAG_CONS_KEYB) ?
1258 "zs/serio0" : "zs/serio1"),
1263 sizeof(serio->phys)); 1259 sizeof(serio->phys));
1264 1260
1265 serio->write = sunzilog_serio_write; 1261 serio->write = sunzilog_serio_write;
@@ -1286,8 +1282,8 @@ static void __init sunzilog_init_hw(struct uart_sunzilog_port *up)
1286 (void) read_zsreg(channel, R0); 1282 (void) read_zsreg(channel, R0);
1287 } 1283 }
1288 1284
1289 if (up->port.line == KEYBOARD_LINE || 1285 if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
1290 up->port.line == MOUSE_LINE) { 1286 SUNZILOG_FLAG_CONS_MOUSE)) {
1291 sunzilog_init_kbdms(up, up->port.line); 1287 sunzilog_init_kbdms(up, up->port.line);
1292 up->curregs[R9] |= (NV | MIE); 1288 up->curregs[R9] |= (NV | MIE);
1293 write_zsreg(channel, R9, up->curregs[R9]); 1289 write_zsreg(channel, R9, up->curregs[R9]);
@@ -1313,37 +1309,26 @@ static void __init sunzilog_init_hw(struct uart_sunzilog_port *up)
1313 spin_unlock_irqrestore(&up->port.lock, flags); 1309 spin_unlock_irqrestore(&up->port.lock, flags);
1314 1310
1315#ifdef CONFIG_SERIO 1311#ifdef CONFIG_SERIO
1316 if (up->port.line == KEYBOARD_LINE || up->port.line == MOUSE_LINE) 1312 if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
1317 sunzilog_register_serio(up, up->port.line); 1313 SUNZILOG_FLAG_CONS_MOUSE))
1314 sunzilog_register_serio(up);
1318#endif 1315#endif
1319} 1316}
1320 1317
1321static int __devinit zs_get_instance(struct device_node *dp)
1322{
1323 int ret;
1324
1325 ret = of_getintprop_default(dp, "slave", -1);
1326 if (ret != -1)
1327 return ret;
1328
1329 if (of_find_property(dp, "keyboard", NULL))
1330 ret = 1;
1331 else
1332 ret = 0;
1333
1334 return ret;
1335}
1336
1337static int zilog_irq = -1; 1318static int zilog_irq = -1;
1338 1319
1339static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *match) 1320static int __devinit zs_probe(struct of_device *op, const struct of_device_id *match)
1340{ 1321{
1341 struct of_device *op = to_of_device(&dev->dev); 1322 static int inst;
1342 struct uart_sunzilog_port *up; 1323 struct uart_sunzilog_port *up;
1343 struct zilog_layout __iomem *rp; 1324 struct zilog_layout __iomem *rp;
1344 int inst = zs_get_instance(dev->node); 1325 int keyboard_mouse;
1345 int err; 1326 int err;
1346 1327
1328 keyboard_mouse = 0;
1329 if (of_find_property(op->node, "keyboard", NULL))
1330 keyboard_mouse = 1;
1331
1347 sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0, 1332 sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0,
1348 sizeof(struct zilog_layout), 1333 sizeof(struct zilog_layout),
1349 "zs"); 1334 "zs");
@@ -1352,16 +1337,8 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1352 1337
1353 rp = sunzilog_chip_regs[inst]; 1338 rp = sunzilog_chip_regs[inst];
1354 1339
1355 if (zilog_irq == -1) { 1340 if (zilog_irq == -1)
1356 zilog_irq = op->irqs[0]; 1341 zilog_irq = op->irqs[0];
1357 err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED,
1358 "zs", sunzilog_irq_chain);
1359 if (err) {
1360 of_iounmap(rp, sizeof(struct zilog_layout));
1361
1362 return err;
1363 }
1364 }
1365 1342
1366 up = &sunzilog_port_table[inst * 2]; 1343 up = &sunzilog_port_table[inst * 2];
1367 1344
@@ -1378,7 +1355,7 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1378 up[0].port.line = (inst * 2) + 0; 1355 up[0].port.line = (inst * 2) + 0;
1379 up[0].port.dev = &op->dev; 1356 up[0].port.dev = &op->dev;
1380 up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A; 1357 up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A;
1381 if (inst == 1) 1358 if (keyboard_mouse)
1382 up[0].flags |= SUNZILOG_FLAG_CONS_KEYB; 1359 up[0].flags |= SUNZILOG_FLAG_CONS_KEYB;
1383 sunzilog_init_hw(&up[0]); 1360 sunzilog_init_hw(&up[0]);
1384 1361
@@ -1395,11 +1372,11 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1395 up[1].port.line = (inst * 2) + 1; 1372 up[1].port.line = (inst * 2) + 1;
1396 up[1].port.dev = &op->dev; 1373 up[1].port.dev = &op->dev;
1397 up[1].flags |= 0; 1374 up[1].flags |= 0;
1398 if (inst == 1) 1375 if (keyboard_mouse)
1399 up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE; 1376 up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE;
1400 sunzilog_init_hw(&up[1]); 1377 sunzilog_init_hw(&up[1]);
1401 1378
1402 if (inst != 1) { 1379 if (!keyboard_mouse) {
1403 err = uart_add_one_port(&sunzilog_reg, &up[0].port); 1380 err = uart_add_one_port(&sunzilog_reg, &up[0].port);
1404 if (err) { 1381 if (err) {
1405 of_iounmap(rp, sizeof(struct zilog_layout)); 1382 of_iounmap(rp, sizeof(struct zilog_layout));
@@ -1411,9 +1388,18 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1411 of_iounmap(rp, sizeof(struct zilog_layout)); 1388 of_iounmap(rp, sizeof(struct zilog_layout));
1412 return err; 1389 return err;
1413 } 1390 }
1391 } else {
1392 printk(KERN_INFO "%s: Keyboard at MMIO %lx (irq = %d) "
1393 "is a zs\n",
1394 op->dev.bus_id, up[0].port.mapbase, op->irqs[0]);
1395 printk(KERN_INFO "%s: Mouse at MMIO %lx (irq = %d) "
1396 "is a zs\n",
1397 op->dev.bus_id, up[1].port.mapbase, op->irqs[0]);
1414 } 1398 }
1415 1399
1416 dev_set_drvdata(&dev->dev, &up[0]); 1400 dev_set_drvdata(&op->dev, &up[0]);
1401
1402 inst++;
1417 1403
1418 return 0; 1404 return 0;
1419} 1405}
@@ -1462,36 +1448,65 @@ static struct of_platform_driver zs_driver = {
1462static int __init sunzilog_init(void) 1448static int __init sunzilog_init(void)
1463{ 1449{
1464 struct device_node *dp; 1450 struct device_node *dp;
1465 int err; 1451 int err, uart_count;
1452 int num_keybms;
1466 1453
1467 NUM_SUNZILOG = 0; 1454 NUM_SUNZILOG = 0;
1468 for_each_node_by_name(dp, "zs") 1455 num_keybms = 0;
1456 for_each_node_by_name(dp, "zs") {
1469 NUM_SUNZILOG++; 1457 NUM_SUNZILOG++;
1458 if (of_find_property(dp, "keyboard", NULL))
1459 num_keybms++;
1460 }
1470 1461
1462 uart_count = 0;
1471 if (NUM_SUNZILOG) { 1463 if (NUM_SUNZILOG) {
1472 int uart_count; 1464 int uart_count;
1473 1465
1474 err = sunzilog_alloc_tables(); 1466 err = sunzilog_alloc_tables();
1475 if (err) 1467 if (err)
1476 return err; 1468 goto out;
1477 1469
1478 /* Subtract 1 for keyboard, 1 for mouse. */ 1470 uart_count = (NUM_SUNZILOG * 2) - (2 * num_keybms);
1479 uart_count = (NUM_SUNZILOG * 2) - 2;
1480 1471
1481 sunzilog_reg.nr = uart_count; 1472 sunzilog_reg.nr = uart_count;
1482 sunzilog_reg.minor = sunserial_current_minor; 1473 sunzilog_reg.minor = sunserial_current_minor;
1483 err = uart_register_driver(&sunzilog_reg); 1474 err = uart_register_driver(&sunzilog_reg);
1484 if (err) { 1475 if (err)
1485 sunzilog_free_tables(); 1476 goto out_free_tables;
1486 return err; 1477
1487 }
1488 sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64; 1478 sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64;
1489 sunzilog_reg.cons = SUNZILOG_CONSOLE(); 1479 sunzilog_reg.cons = SUNZILOG_CONSOLE();
1490 1480
1491 sunserial_current_minor += uart_count; 1481 sunserial_current_minor += uart_count;
1492 } 1482 }
1493 1483
1494 return of_register_driver(&zs_driver, &of_bus_type); 1484 err = of_register_driver(&zs_driver, &of_bus_type);
1485 if (err)
1486 goto out_unregister_uart;
1487
1488 if (zilog_irq != -1) {
1489 err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED,
1490 "zs", sunzilog_irq_chain);
1491 if (err)
1492 goto out_unregister_driver;
1493 }
1494
1495out:
1496 return err;
1497
1498out_unregister_driver:
1499 of_unregister_driver(&zs_driver);
1500
1501out_unregister_uart:
1502 if (NUM_SUNZILOG) {
1503 uart_unregister_driver(&sunzilog_reg);
1504 sunzilog_reg.cons = NULL;
1505 }
1506
1507out_free_tables:
1508 sunzilog_free_tables();
1509 goto out;
1495} 1510}
1496 1511
1497static void __exit sunzilog_exit(void) 1512static void __exit sunzilog_exit(void)
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 3badb48d662b..6533b0f39231 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1518,6 +1518,26 @@ config FB_PXA_PARAMETERS
1518 1518
1519 <file:Documentation/fb/pxafb.txt> describes the available parameters. 1519 <file:Documentation/fb/pxafb.txt> describes the available parameters.
1520 1520
1521config FB_MBX
1522 tristate "2700G LCD framebuffer support"
1523 depends on FB && ARCH_PXA
1524 select FB_CFB_FILLRECT
1525 select FB_CFB_COPYAREA
1526 select FB_CFB_IMAGEBLIT
1527 ---help---
1528 Framebuffer driver for the Intel 2700G (Marathon) Graphics
1529 Accelerator
1530
1531config FB_MBX_DEBUG
1532 bool "Enable debugging info via debugfs"
1533 depends on FB_MBX && DEBUG_FS
1534 default n
1535 ---help---
1536 Enable this if you want debugging information using the debug
1537 filesystem (debugfs)
1538
1539 If unsure, say N.
1540
1521config FB_W100 1541config FB_W100
1522 tristate "W100 frame buffer support" 1542 tristate "W100 frame buffer support"
1523 depends on FB && PXA_SHARPSL 1543 depends on FB && PXA_SHARPSL
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 6283d015f8f5..95563c9c6b9c 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_FB_SIS) += sis/
38obj-$(CONFIG_FB_KYRO) += kyro/ 38obj-$(CONFIG_FB_KYRO) += kyro/
39obj-$(CONFIG_FB_SAVAGE) += savage/ 39obj-$(CONFIG_FB_SAVAGE) += savage/
40obj-$(CONFIG_FB_GEODE) += geode/ 40obj-$(CONFIG_FB_GEODE) += geode/
41obj-$(CONFIG_FB_MBX) += mbx/
41obj-$(CONFIG_FB_I810) += vgastate.o 42obj-$(CONFIG_FB_I810) += vgastate.o
42obj-$(CONFIG_FB_NEOMAGIC) += neofb.o vgastate.o 43obj-$(CONFIG_FB_NEOMAGIC) += neofb.o vgastate.o
43obj-$(CONFIG_FB_VIRGE) += virgefb.o 44obj-$(CONFIG_FB_VIRGE) += virgefb.o
diff --git a/drivers/video/mbx/Makefile b/drivers/video/mbx/Makefile
new file mode 100644
index 000000000000..16c1165cf9c7
--- /dev/null
+++ b/drivers/video/mbx/Makefile
@@ -0,0 +1,4 @@
1# Makefile for the 2700G controller driver.
2
3obj-$(CONFIG_FB_MBX) += mbxfb.o
4obj-$(CONFIG_FB_MBX_DEBUG) += mbxfbdebugfs.o
diff --git a/drivers/video/mbx/mbxdebugfs.c b/drivers/video/mbx/mbxdebugfs.c
new file mode 100644
index 000000000000..84aab3ad024e
--- /dev/null
+++ b/drivers/video/mbx/mbxdebugfs.c
@@ -0,0 +1,188 @@
1#include <linux/debugfs.h>
2
3#define BIG_BUFFER_SIZE (1024)
4
5static char big_buffer[BIG_BUFFER_SIZE];
6
7struct mbxfb_debugfs_data {
8 struct dentry *dir;
9 struct dentry *sysconf;
10 struct dentry *clock;
11 struct dentry *display;
12 struct dentry *gsctl;
13};
14
15static int open_file_generic(struct inode *inode, struct file *file)
16{
17 file->private_data = inode->u.generic_ip;
18 return 0;
19}
20
21static ssize_t write_file_dummy(struct file *file, const char __user *buf,
22 size_t count, loff_t *ppos)
23{
24 return count;
25}
26
27static ssize_t sysconf_read_file(struct file *file, char __user *userbuf,
28 size_t count, loff_t *ppos)
29{
30 char * s = big_buffer;
31
32 s += sprintf(s, "SYSCFG = %08lx\n", SYSCFG);
33 s += sprintf(s, "PFBASE = %08lx\n", PFBASE);
34 s += sprintf(s, "PFCEIL = %08lx\n", PFCEIL);
35 s += sprintf(s, "POLLFLAG = %08lx\n", POLLFLAG);
36 s += sprintf(s, "SYSRST = %08lx\n", SYSRST);
37
38 return simple_read_from_buffer(userbuf, count, ppos,
39 big_buffer, s-big_buffer);
40}
41
42
43static ssize_t gsctl_read_file(struct file *file, char __user *userbuf,
44 size_t count, loff_t *ppos)
45{
46 char * s = big_buffer;
47
48 s += sprintf(s, "GSCTRL = %08lx\n", GSCTRL);
49 s += sprintf(s, "VSCTRL = %08lx\n", VSCTRL);
50 s += sprintf(s, "GBBASE = %08lx\n", GBBASE);
51 s += sprintf(s, "VBBASE = %08lx\n", VBBASE);
52 s += sprintf(s, "GDRCTRL = %08lx\n", GDRCTRL);
53 s += sprintf(s, "VCMSK = %08lx\n", VCMSK);
54 s += sprintf(s, "GSCADR = %08lx\n", GSCADR);
55 s += sprintf(s, "VSCADR = %08lx\n", VSCADR);
56 s += sprintf(s, "VUBASE = %08lx\n", VUBASE);
57 s += sprintf(s, "VVBASE = %08lx\n", VVBASE);
58 s += sprintf(s, "GSADR = %08lx\n", GSADR);
59 s += sprintf(s, "VSADR = %08lx\n", VSADR);
60 s += sprintf(s, "HCCTRL = %08lx\n", HCCTRL);
61 s += sprintf(s, "HCSIZE = %08lx\n", HCSIZE);
62 s += sprintf(s, "HCPOS = %08lx\n", HCPOS);
63 s += sprintf(s, "HCBADR = %08lx\n", HCBADR);
64 s += sprintf(s, "HCCKMSK = %08lx\n", HCCKMSK);
65 s += sprintf(s, "GPLUT = %08lx\n", GPLUT);
66
67 return simple_read_from_buffer(userbuf, count, ppos,
68 big_buffer, s-big_buffer);
69}
70
71static ssize_t display_read_file(struct file *file, char __user *userbuf,
72 size_t count, loff_t *ppos)
73{
74 char * s = big_buffer;
75
76 s += sprintf(s, "DSCTRL = %08lx\n", DSCTRL);
77 s += sprintf(s, "DHT01 = %08lx\n", DHT01);
78 s += sprintf(s, "DHT02 = %08lx\n", DHT02);
79 s += sprintf(s, "DHT03 = %08lx\n", DHT03);
80 s += sprintf(s, "DVT01 = %08lx\n", DVT01);
81 s += sprintf(s, "DVT02 = %08lx\n", DVT02);
82 s += sprintf(s, "DVT03 = %08lx\n", DVT03);
83 s += sprintf(s, "DBCOL = %08lx\n", DBCOL);
84 s += sprintf(s, "BGCOLOR = %08lx\n", BGCOLOR);
85 s += sprintf(s, "DINTRS = %08lx\n", DINTRS);
86 s += sprintf(s, "DINTRE = %08lx\n", DINTRE);
87 s += sprintf(s, "DINTRCNT = %08lx\n", DINTRCNT);
88 s += sprintf(s, "DSIG = %08lx\n", DSIG);
89 s += sprintf(s, "DMCTRL = %08lx\n", DMCTRL);
90 s += sprintf(s, "CLIPCTRL = %08lx\n", CLIPCTRL);
91 s += sprintf(s, "SPOCTRL = %08lx\n", SPOCTRL);
92 s += sprintf(s, "SVCTRL = %08lx\n", SVCTRL);
93 s += sprintf(s, "DLSTS = %08lx\n", DLSTS);
94 s += sprintf(s, "DLLCTRL = %08lx\n", DLLCTRL);
95 s += sprintf(s, "DVLNUM = %08lx\n", DVLNUM);
96 s += sprintf(s, "DUCTRL = %08lx\n", DUCTRL);
97 s += sprintf(s, "DVECTRL = %08lx\n", DVECTRL);
98 s += sprintf(s, "DHDET = %08lx\n", DHDET);
99 s += sprintf(s, "DVDET = %08lx\n", DVDET);
100 s += sprintf(s, "DODMSK = %08lx\n", DODMSK);
101 s += sprintf(s, "CSC01 = %08lx\n", CSC01);
102 s += sprintf(s, "CSC02 = %08lx\n", CSC02);
103 s += sprintf(s, "CSC03 = %08lx\n", CSC03);
104 s += sprintf(s, "CSC04 = %08lx\n", CSC04);
105 s += sprintf(s, "CSC05 = %08lx\n", CSC05);
106
107 return simple_read_from_buffer(userbuf, count, ppos,
108 big_buffer, s-big_buffer);
109}
110
111static ssize_t clock_read_file(struct file *file, char __user *userbuf,
112 size_t count, loff_t *ppos)
113{
114 char * s = big_buffer;
115
116 s += sprintf(s, "SYSCLKSRC = %08lx\n", SYSCLKSRC);
117 s += sprintf(s, "PIXCLKSRC = %08lx\n", PIXCLKSRC);
118 s += sprintf(s, "CLKSLEEP = %08lx\n", CLKSLEEP);
119 s += sprintf(s, "COREPLL = %08lx\n", COREPLL);
120 s += sprintf(s, "DISPPLL = %08lx\n", DISPPLL);
121 s += sprintf(s, "PLLSTAT = %08lx\n", PLLSTAT);
122 s += sprintf(s, "VOVRCLK = %08lx\n", VOVRCLK);
123 s += sprintf(s, "PIXCLK = %08lx\n", PIXCLK);
124 s += sprintf(s, "MEMCLK = %08lx\n", MEMCLK);
125 s += sprintf(s, "M24CLK = %08lx\n", M24CLK);
126 s += sprintf(s, "MBXCLK = %08lx\n", MBXCLK);
127 s += sprintf(s, "SDCLK = %08lx\n", SDCLK);
128 s += sprintf(s, "PIXCLKDIV = %08lx\n", PIXCLKDIV);
129
130 return simple_read_from_buffer(userbuf, count, ppos,
131 big_buffer, s-big_buffer);
132}
133
134static struct file_operations sysconf_fops = {
135 .read = sysconf_read_file,
136 .write = write_file_dummy,
137 .open = open_file_generic,
138};
139
140static struct file_operations clock_fops = {
141 .read = clock_read_file,
142 .write = write_file_dummy,
143 .open = open_file_generic,
144};
145
146static struct file_operations display_fops = {
147 .read = display_read_file,
148 .write = write_file_dummy,
149 .open = open_file_generic,
150};
151
152static struct file_operations gsctl_fops = {
153 .read = gsctl_read_file,
154 .write = write_file_dummy,
155 .open = open_file_generic,
156};
157
158
159static void __devinit mbxfb_debugfs_init(struct fb_info *fbi)
160{
161 struct mbxfb_info *mfbi = fbi->par;
162 struct mbxfb_debugfs_data *dbg;
163
164 dbg = kzalloc(sizeof(struct mbxfb_debugfs_data), GFP_KERNEL);
165 mfbi->debugfs_data = dbg;
166
167 dbg->dir = debugfs_create_dir("mbxfb", NULL);
168 dbg->sysconf = debugfs_create_file("sysconf", 0444, dbg->dir,
169 fbi, &sysconf_fops);
170 dbg->clock = debugfs_create_file("clock", 0444, dbg->dir,
171 fbi, &clock_fops);
172 dbg->display = debugfs_create_file("display", 0444, dbg->dir,
173 fbi, &display_fops);
174 dbg->gsctl = debugfs_create_file("gsctl", 0444, dbg->dir,
175 fbi, &gsctl_fops);
176}
177
178static void __devexit mbxfb_debugfs_remove(struct fb_info *fbi)
179{
180 struct mbxfb_info *mfbi = fbi->par;
181 struct mbxfb_debugfs_data *dbg = mfbi->debugfs_data;
182
183 debugfs_remove(dbg->gsctl);
184 debugfs_remove(dbg->display);
185 debugfs_remove(dbg->clock);
186 debugfs_remove(dbg->sysconf);
187 debugfs_remove(dbg->dir);
188}
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
new file mode 100644
index 000000000000..6849ab75d403
--- /dev/null
+++ b/drivers/video/mbx/mbxfb.c
@@ -0,0 +1,683 @@
1/*
2 * linux/drivers/video/mbx/mbxfb.c
3 *
4 * Copyright (C) 2006 Compulab, Ltd.
5 * Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on pxafb.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
11 * more details.
12 *
13 * Intel 2700G (Marathon) Graphics Accelerator Frame Buffer Driver
14 *
15 */
16
17#include <linux/delay.h>
18#include <linux/fb.h>
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22
23#include <asm/io.h>
24
25#include <video/mbxfb.h>
26
27#include "regs.h"
28#include "reg_bits.h"
29
30static unsigned long virt_base_2700;
31
32#define MIN_XRES 16
33#define MIN_YRES 16
34#define MAX_XRES 2048
35#define MAX_YRES 2048
36
37#define MAX_PALETTES 16
38
39/* FIXME: take care of different chip revisions with different sizes
40 of ODFB */
41#define MEMORY_OFFSET 0x60000
42
43struct mbxfb_info {
44 struct device *dev;
45
46 struct resource *fb_res;
47 struct resource *fb_req;
48
49 struct resource *reg_res;
50 struct resource *reg_req;
51
52 void __iomem *fb_virt_addr;
53 unsigned long fb_phys_addr;
54
55 void __iomem *reg_virt_addr;
56 unsigned long reg_phys_addr;
57
58 int (*platform_probe) (struct fb_info * fb);
59 int (*platform_remove) (struct fb_info * fb);
60
61 u32 pseudo_palette[MAX_PALETTES];
62#ifdef CONFIG_FB_MBX_DEBUG
63 void *debugfs_data;
64#endif
65
66};
67
68static struct fb_var_screeninfo mbxfb_default __devinitdata = {
69 .xres = 640,
70 .yres = 480,
71 .xres_virtual = 640,
72 .yres_virtual = 480,
73 .bits_per_pixel = 16,
74 .red = {11, 5, 0},
75 .green = {5, 6, 0},
76 .blue = {0, 5, 0},
77 .activate = FB_ACTIVATE_TEST,
78 .height = -1,
79 .width = -1,
80 .pixclock = 40000,
81 .left_margin = 48,
82 .right_margin = 16,
83 .upper_margin = 33,
84 .lower_margin = 10,
85 .hsync_len = 96,
86 .vsync_len = 2,
87 .vmode = FB_VMODE_NONINTERLACED,
88 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
89};
90
91static struct fb_fix_screeninfo mbxfb_fix __devinitdata = {
92 .id = "MBX",
93 .type = FB_TYPE_PACKED_PIXELS,
94 .visual = FB_VISUAL_TRUECOLOR,
95 .xpanstep = 0,
96 .ypanstep = 0,
97 .ywrapstep = 0,
98 .accel = FB_ACCEL_NONE,
99};
100
101struct pixclock_div {
102 u8 m;
103 u8 n;
104 u8 p;
105};
106
107static unsigned int mbxfb_get_pixclock(unsigned int pixclock_ps,
108 struct pixclock_div *div)
109{
110 u8 m, n, p;
111 unsigned int err = 0;
112 unsigned int min_err = ~0x0;
113 unsigned int clk;
114 unsigned int best_clk = 0;
115 unsigned int ref_clk = 13000; /* FIXME: take from platform data */
116 unsigned int pixclock;
117
118 /* convert pixclock to KHz */
119 pixclock = PICOS2KHZ(pixclock_ps);
120
121 for (m = 1; m < 64; m++) {
122 for (n = 1; n < 8; n++) {
123 for (p = 0; p < 8; p++) {
124 clk = (ref_clk * m) / (n * (1 << p));
125 err = (clk > pixclock) ? (clk - pixclock) :
126 (pixclock - clk);
127 if (err < min_err) {
128 min_err = err;
129 best_clk = clk;
130 div->m = m;
131 div->n = n;
132 div->p = p;
133 }
134 }
135 }
136 }
137 return KHZ2PICOS(best_clk);
138}
139
140static int mbxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
141 u_int trans, struct fb_info *info)
142{
143 u32 val, ret = 1;
144
145 if (regno < MAX_PALETTES) {
146 u32 *pal = info->pseudo_palette;
147
148 val = (red & 0xf800) | ((green & 0xfc00) >> 5) |
149 ((blue & 0xf800) >> 11);
150 pal[regno] = val;
151 ret = 0;
152 }
153
154 return ret;
155}
156
157static int mbxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
158{
159 struct pixclock_div div;
160
161 var->pixclock = mbxfb_get_pixclock(var->pixclock, &div);
162
163 if (var->xres < MIN_XRES)
164 var->xres = MIN_XRES;
165 if (var->yres < MIN_YRES)
166 var->yres = MIN_YRES;
167 if (var->xres > MAX_XRES)
168 return -EINVAL;
169 if (var->yres > MAX_YRES)
170 return -EINVAL;
171 var->xres_virtual = max(var->xres_virtual, var->xres);
172 var->yres_virtual = max(var->yres_virtual, var->yres);
173
174 switch (var->bits_per_pixel) {
175 /* 8 bits-per-pixel is not supported yet */
176 case 8:
177 return -EINVAL;
178 case 16:
179 var->green.length = (var->green.length == 5) ? 5 : 6;
180 var->red.length = 5;
181 var->blue.length = 5;
182 var->transp.length = 6 - var->green.length;
183 var->blue.offset = 0;
184 var->green.offset = 5;
185 var->red.offset = 5 + var->green.length;
186 var->transp.offset = (5 + var->red.offset) & 15;
187 break;
188 case 24: /* RGB 888 */
189 case 32: /* RGBA 8888 */
190 var->red.offset = 16;
191 var->red.length = 8;
192 var->green.offset = 8;
193 var->green.length = 8;
194 var->blue.offset = 0;
195 var->blue.length = 8;
196 var->transp.length = var->bits_per_pixel - 24;
197 var->transp.offset = (var->transp.length) ? 24 : 0;
198 break;
199 }
200 var->red.msb_right = 0;
201 var->green.msb_right = 0;
202 var->blue.msb_right = 0;
203 var->transp.msb_right = 0;
204
205 return 0;
206}
207
208static int mbxfb_set_par(struct fb_info *info)
209{
210 struct fb_var_screeninfo *var = &info->var;
211 struct pixclock_div div;
212 ushort hbps, ht, hfps, has;
213 ushort vbps, vt, vfps, vas;
214 u32 gsctrl = readl(GSCTRL);
215 u32 gsadr = readl(GSADR);
216
217 info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8;
218
219 /* setup color mode */
220 gsctrl &= ~(FMsk(GSCTRL_GPIXFMT));
221 /* FIXME: add *WORKING* support for 8-bits per color */
222 if (info->var.bits_per_pixel == 8) {
223 return -EINVAL;
224 } else {
225 fb_dealloc_cmap(&info->cmap);
226 gsctrl &= ~GSCTRL_LUT_EN;
227
228 info->fix.visual = FB_VISUAL_TRUECOLOR;
229 switch (info->var.bits_per_pixel) {
230 case 16:
231 if (info->var.green.length == 5)
232 gsctrl |= GSCTRL_GPIXFMT_ARGB1555;
233 else
234 gsctrl |= GSCTRL_GPIXFMT_RGB565;
235 break;
236 case 24:
237 gsctrl |= GSCTRL_GPIXFMT_RGB888;
238 break;
239 case 32:
240 gsctrl |= GSCTRL_GPIXFMT_ARGB8888;
241 break;
242 }
243 }
244
245 /* setup resolution */
246 gsctrl &= ~(FMsk(GSCTRL_GSWIDTH) | FMsk(GSCTRL_GSHEIGHT));
247 gsctrl |= Gsctrl_Width(info->var.xres - 1) |
248 Gsctrl_Height(info->var.yres - 1);
249 writel(gsctrl, GSCTRL);
250 udelay(1000);
251
252 gsadr &= ~(FMsk(GSADR_SRCSTRIDE));
253 gsadr |= Gsadr_Srcstride(info->var.xres * info->var.bits_per_pixel /
254 (8 * 16) - 1);
255 writel(gsadr, GSADR);
256 udelay(1000);
257
258 /* setup timings */
259 var->pixclock = mbxfb_get_pixclock(info->var.pixclock, &div);
260
261 writel((Disp_Pll_M(div.m) | Disp_Pll_N(div.n) |
262 Disp_Pll_P(div.p) | DISP_PLL_EN), DISPPLL);
263
264 hbps = var->hsync_len;
265 has = hbps + var->left_margin;
266 hfps = has + var->xres;
267 ht = hfps + var->right_margin;
268
269 vbps = var->vsync_len;
270 vas = vbps + var->upper_margin;
271 vfps = vas + var->yres;
272 vt = vfps + var->lower_margin;
273
274 writel((Dht01_Hbps(hbps) | Dht01_Ht(ht)), DHT01);
275 writel((Dht02_Hlbs(has) | Dht02_Has(has)), DHT02);
276 writel((Dht03_Hfps(hfps) | Dht03_Hrbs(hfps)), DHT03);
277 writel((Dhdet_Hdes(has) | Dhdet_Hdef(hfps)), DHDET);
278
279 writel((Dvt01_Vbps(vbps) | Dvt01_Vt(vt)), DVT01);
280 writel((Dvt02_Vtbs(vas) | Dvt02_Vas(vas)), DVT02);
281 writel((Dvt03_Vfps(vfps) | Dvt03_Vbbs(vfps)), DVT03);
282 writel((Dvdet_Vdes(vas) | Dvdet_Vdef(vfps)), DVDET);
283 writel((Dvectrl_Vevent(vfps) | Dvectrl_Vfetch(vbps)), DVECTRL);
284
285 writel((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
286
287 return 0;
288}
289
290static int mbxfb_blank(int blank, struct fb_info *info)
291{
292 switch (blank) {
293 case FB_BLANK_POWERDOWN:
294 case FB_BLANK_VSYNC_SUSPEND:
295 case FB_BLANK_HSYNC_SUSPEND:
296 case FB_BLANK_NORMAL:
297 writel((readl(DSCTRL) & ~DSCTRL_SYNCGEN_EN), DSCTRL);
298 udelay(1000);
299 writel((readl(PIXCLK) & ~PIXCLK_EN), PIXCLK);
300 udelay(1000);
301 writel((readl(VOVRCLK) & ~VOVRCLK_EN), VOVRCLK);
302 udelay(1000);
303 break;
304 case FB_BLANK_UNBLANK:
305 writel((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
306 udelay(1000);
307 writel((readl(PIXCLK) | PIXCLK_EN), PIXCLK);
308 udelay(1000);
309 break;
310 }
311 return 0;
312}
313
314static struct fb_ops mbxfb_ops = {
315 .owner = THIS_MODULE,
316 .fb_check_var = mbxfb_check_var,
317 .fb_set_par = mbxfb_set_par,
318 .fb_setcolreg = mbxfb_setcolreg,
319 .fb_fillrect = cfb_fillrect,
320 .fb_copyarea = cfb_copyarea,
321 .fb_imageblit = cfb_imageblit,
322 .fb_blank = mbxfb_blank,
323};
324
325/*
326 Enable external SDRAM controller. Assume that all clocks are active
327 by now.
328*/
329static void __devinit setup_memc(struct fb_info *fbi)
330{
331 struct mbxfb_info *mfbi = fbi->par;
332 unsigned long tmp;
333 int i;
334
335 /* FIXME: use platfrom specific parameters */
336 /* setup SDRAM controller */
337 writel((LMCFG_LMC_DS | LMCFG_LMC_TS | LMCFG_LMD_TS |
338 LMCFG_LMA_TS),
339 LMCFG);
340 udelay(1000);
341
342 writel(LMPWR_MC_PWR_ACT, LMPWR);
343 udelay(1000);
344
345 /* setup SDRAM timings */
346 writel((Lmtim_Tras(7) | Lmtim_Trp(3) | Lmtim_Trcd(3) |
347 Lmtim_Trc(9) | Lmtim_Tdpl(2)),
348 LMTIM);
349 udelay(1000);
350 /* setup SDRAM refresh rate */
351 writel(0xc2b, LMREFRESH);
352 udelay(1000);
353 /* setup SDRAM type parameters */
354 writel((LMTYPE_CASLAT_3 | LMTYPE_BKSZ_2 | LMTYPE_ROWSZ_11 |
355 LMTYPE_COLSZ_8),
356 LMTYPE);
357 udelay(1000);
358 /* enable memory controller */
359 writel(LMPWR_MC_PWR_ACT, LMPWR);
360 udelay(1000);
361
362 /* perform dummy reads */
363 for ( i = 0; i < 16; i++ ) {
364 tmp = readl(fbi->screen_base);
365 }
366}
367
368static void enable_clocks(struct fb_info *fbi)
369{
370 /* enable clocks */
371 writel(SYSCLKSRC_PLL_2, SYSCLKSRC);
372 udelay(1000);
373 writel(PIXCLKSRC_PLL_1, PIXCLKSRC);
374 udelay(1000);
375 writel(0x00000000, CLKSLEEP);
376 udelay(1000);
377 writel((Core_Pll_M(0x17) | Core_Pll_N(0x3) | Core_Pll_P(0x0) |
378 CORE_PLL_EN),
379 COREPLL);
380 udelay(1000);
381 writel((Disp_Pll_M(0x1b) | Disp_Pll_N(0x7) | Disp_Pll_P(0x1) |
382 DISP_PLL_EN),
383 DISPPLL);
384
385 writel(0x00000000, VOVRCLK);
386 udelay(1000);
387 writel(PIXCLK_EN, PIXCLK);
388 udelay(1000);
389 writel(MEMCLK_EN, MEMCLK);
390 udelay(1000);
391 writel(0x00000006, M24CLK);
392 udelay(1000);
393 writel(0x00000006, MBXCLK);
394 udelay(1000);
395 writel(SDCLK_EN, SDCLK);
396 udelay(1000);
397 writel(0x00000001, PIXCLKDIV);
398 udelay(1000);
399}
400
401static void __devinit setup_graphics(struct fb_info *fbi)
402{
403 unsigned long gsctrl;
404
405 gsctrl = GSCTRL_GAMMA_EN | Gsctrl_Width(fbi->var.xres - 1) |
406 Gsctrl_Height(fbi->var.yres - 1);
407 switch (fbi->var.bits_per_pixel) {
408 case 16:
409 if (fbi->var.green.length == 5)
410 gsctrl |= GSCTRL_GPIXFMT_ARGB1555;
411 else
412 gsctrl |= GSCTRL_GPIXFMT_RGB565;
413 break;
414 case 24:
415 gsctrl |= GSCTRL_GPIXFMT_RGB888;
416 break;
417 case 32:
418 gsctrl |= GSCTRL_GPIXFMT_ARGB8888;
419 break;
420 }
421
422 writel(gsctrl, GSCTRL);
423 udelay(1000);
424 writel(0x00000000, GBBASE);
425 udelay(1000);
426 writel(0x00ffffff, GDRCTRL);
427 udelay(1000);
428 writel((GSCADR_STR_EN | Gscadr_Gbase_Adr(0x6000)), GSCADR);
429 udelay(1000);
430 writel(0x00000000, GPLUT);
431 udelay(1000);
432}
433
434static void __devinit setup_display(struct fb_info *fbi)
435{
436 unsigned long dsctrl = 0;
437
438 dsctrl = DSCTRL_BLNK_POL;
439 if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
440 dsctrl |= DSCTRL_HS_POL;
441 if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
442 dsctrl |= DSCTRL_VS_POL;
443 writel(dsctrl, DSCTRL);
444 udelay(1000);
445 writel(0xd0303010, DMCTRL);
446 udelay(1000);
447 writel((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
448}
449
450static void __devinit enable_controller(struct fb_info *fbi)
451{
452 writel(SYSRST_RST, SYSRST);
453 udelay(1000);
454
455
456 enable_clocks(fbi);
457 setup_memc(fbi);
458 setup_graphics(fbi);
459 setup_display(fbi);
460}
461
462#ifdef CONFIG_PM
463/*
464 * Power management hooks. Note that we won't be called from IRQ context,
465 * unlike the blank functions above, so we may sleep.
466 */
467static int mbxfb_suspend(struct platform_device *dev, pm_message_t state)
468{
469 /* make frame buffer memory enter self-refresh mode */
470 writel(LMPWR_MC_PWR_SRM, LMPWR);
471 while (LMPWRSTAT != LMPWRSTAT_MC_PWR_SRM)
472 ; /* empty statement */
473
474 /* reset the device, since it's initial state is 'mostly sleeping' */
475 writel(SYSRST_RST, SYSRST);
476 return 0;
477}
478
479static int mbxfb_resume(struct platform_device *dev)
480{
481 struct fb_info *fbi = platform_get_drvdata(dev);
482
483 enable_clocks(fbi);
484/* setup_graphics(fbi); */
485/* setup_display(fbi); */
486
487 writel((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
488 return 0;
489}
490#else
491#define mbxfb_suspend NULL
492#define mbxfb_resume NULL
493#endif
494
495/* debugfs entries */
496#ifndef CONFIG_FB_MBX_DEBUG
497#define mbxfb_debugfs_init(x) do {} while(0)
498#define mbxfb_debugfs_remove(x) do {} while(0)
499#endif
500
501#define res_size(_r) (((_r)->end - (_r)->start) + 1)
502
503static int __devinit mbxfb_probe(struct platform_device *dev)
504{
505 int ret;
506 struct fb_info *fbi;
507 struct mbxfb_info *mfbi;
508 struct mbxfb_platform_data *pdata;
509
510 dev_dbg(dev, "mbxfb_probe\n");
511
512 fbi = framebuffer_alloc(sizeof(struct mbxfb_info), &dev->dev);
513 if (fbi == NULL) {
514 dev_err(&dev->dev, "framebuffer_alloc failed\n");
515 return -ENOMEM;
516 }
517
518 mfbi = fbi->par;
519 fbi->pseudo_palette = mfbi->pseudo_palette;
520 pdata = dev->dev.platform_data;
521 if (pdata->probe)
522 mfbi->platform_probe = pdata->probe;
523 if (pdata->remove)
524 mfbi->platform_remove = pdata->remove;
525
526 mfbi->fb_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
527 mfbi->reg_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
528
529 if (!mfbi->fb_res || !mfbi->reg_res) {
530 dev_err(&dev->dev, "no resources found\n");
531 ret = -ENODEV;
532 goto err1;
533 }
534
535 mfbi->fb_req = request_mem_region(mfbi->fb_res->start,
536 res_size(mfbi->fb_res), dev->name);
537 if (mfbi->fb_req == NULL) {
538 dev_err(&dev->dev, "failed to claim framebuffer memory\n");
539 ret = -EINVAL;
540 goto err1;
541 }
542 mfbi->fb_phys_addr = mfbi->fb_res->start;
543
544 mfbi->reg_req = request_mem_region(mfbi->reg_res->start,
545 res_size(mfbi->reg_res), dev->name);
546 if (mfbi->reg_req == NULL) {
547 dev_err(&dev->dev, "failed to claim Marathon registers\n");
548 ret = -EINVAL;
549 goto err2;
550 }
551 mfbi->reg_phys_addr = mfbi->reg_res->start;
552
553 mfbi->reg_virt_addr = ioremap_nocache(mfbi->reg_phys_addr,
554 res_size(mfbi->reg_req));
555 if (!mfbi->reg_virt_addr) {
556 dev_err(&dev->dev, "failed to ioremap Marathon registers\n");
557 ret = -EINVAL;
558 goto err3;
559 }
560 virt_base_2700 = (unsigned long)mfbi->reg_virt_addr;
561
562 mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr,
563 res_size(mfbi->fb_req));
564 if (!mfbi->reg_virt_addr) {
565 dev_err(&dev->dev, "failed to ioremap frame buffer\n");
566 ret = -EINVAL;
567 goto err4;
568 }
569
570 /* FIXME: get from platform */
571 fbi->screen_base = (char __iomem *)(mfbi->fb_virt_addr + 0x60000);
572 fbi->screen_size = 8 * 1024 * 1024; /* 8 Megs */
573 fbi->fbops = &mbxfb_ops;
574
575 fbi->var = mbxfb_default;
576 fbi->fix = mbxfb_fix;
577 fbi->fix.smem_start = mfbi->fb_phys_addr + 0x60000;
578 fbi->fix.smem_len = 8 * 1024 * 1024;
579 fbi->fix.line_length = 640 * 2;
580
581 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
582 if (ret < 0) {
583 dev_err(&dev->dev, "fb_alloc_cmap failed\n");
584 ret = -EINVAL;
585 goto err5;
586 }
587
588 platform_set_drvdata(dev, fbi);
589
590 printk(KERN_INFO "fb%d: mbx frame buffer device\n", fbi->node);
591
592 if (mfbi->platform_probe)
593 mfbi->platform_probe(fbi);
594
595 enable_controller(fbi);
596
597 mbxfb_debugfs_init(fbi);
598
599 ret = register_framebuffer(fbi);
600 if (ret < 0) {
601 dev_err(&dev->dev, "register_framebuffer failed\n");
602 ret = -EINVAL;
603 goto err6;
604 }
605
606 return 0;
607
608err6:
609 fb_dealloc_cmap(&fbi->cmap);
610err5:
611 iounmap(mfbi->fb_virt_addr);
612err4:
613 iounmap(mfbi->reg_virt_addr);
614err3:
615 release_mem_region(mfbi->reg_res->start, res_size(mfbi->reg_res));
616err2:
617 release_mem_region(mfbi->fb_res->start, res_size(mfbi->fb_res));
618err1:
619 framebuffer_release(fbi);
620
621 return ret;
622}
623
624static int __devexit mbxfb_remove(struct platform_device *dev)
625{
626 struct fb_info *fbi = platform_get_drvdata(dev);
627
628 writel(SYSRST_RST, SYSRST);
629 udelay(1000);
630
631 mbxfb_debugfs_remove(fbi);
632
633 if (fbi) {
634 struct mbxfb_info *mfbi = fbi->par;
635
636 unregister_framebuffer(fbi);
637 if (mfbi) {
638 if (mfbi->platform_remove)
639 mfbi->platform_remove(fbi);
640
641 if (mfbi->fb_virt_addr)
642 iounmap(mfbi->fb_virt_addr);
643 if (mfbi->reg_virt_addr)
644 iounmap(mfbi->reg_virt_addr);
645 if (mfbi->reg_req)
646 release_mem_region(mfbi->reg_req->start,
647 res_size(mfbi->reg_req));
648 if (mfbi->fb_req)
649 release_mem_region(mfbi->fb_req->start,
650 res_size(mfbi->fb_req));
651 }
652 framebuffer_release(fbi);
653 }
654
655 return 0;
656}
657
658static struct platform_driver mbxfb_driver = {
659 .probe = mbxfb_probe,
660 .remove = mbxfb_remove,
661 .suspend = mbxfb_suspend,
662 .resume = mbxfb_resume,
663 .driver = {
664 .name = "mbx-fb",
665 },
666};
667
668int __devinit mbxfb_init(void)
669{
670 return platform_driver_register(&mbxfb_driver);
671}
672
673static void __devexit mbxfb_exit(void)
674{
675 platform_driver_unregister(&mbxfb_driver);
676}
677
678module_init(mbxfb_init);
679module_exit(mbxfb_exit);
680
681MODULE_DESCRIPTION("loadable framebuffer driver for Marathon device");
682MODULE_AUTHOR("Mike Rapoport, Compulab");
683MODULE_LICENSE("GPL");
diff --git a/drivers/video/mbx/reg_bits.h b/drivers/video/mbx/reg_bits.h
new file mode 100644
index 000000000000..c226a8e45312
--- /dev/null
+++ b/drivers/video/mbx/reg_bits.h
@@ -0,0 +1,418 @@
1#ifndef __REG_BITS_2700G_
2#define __REG_BITS_2700G_
3
4/* use defines from asm-arm/arch-pxa/bitfields.h for bit fields access */
5#define UData(Data) ((unsigned long) (Data))
6#define Fld(Size, Shft) (((Size) << 16) + (Shft))
7#define FSize(Field) ((Field) >> 16)
8#define FShft(Field) ((Field) & 0x0000FFFF)
9#define FMsk(Field) (((UData (1) << FSize (Field)) - 1) << FShft (Field))
10#define FAlnMsk(Field) ((UData (1) << FSize (Field)) - 1)
11#define F1stBit(Field) (UData (1) << FShft (Field))
12
13#define SYSRST_RST (1 << 0)
14
15/* SYSCLKSRC - SYSCLK Source Control Register */
16#define SYSCLKSRC_SEL Fld(2,0)
17#define SYSCLKSRC_REF ((0x0) << FShft(SYSCLKSRC_SEL))
18#define SYSCLKSRC_PLL_1 ((0x1) << FShft(SYSCLKSRC_SEL))
19#define SYSCLKSRC_PLL_2 ((0x2) << FShft(SYSCLKSRC_SEL))
20
21/* PIXCLKSRC - PIXCLK Source Control Register */
22#define PIXCLKSRC_SEL Fld(2,0)
23#define PIXCLKSRC_REF ((0x0) << FShft(PIXCLKSRC_SEL))
24#define PIXCLKSRC_PLL_1 ((0x1) << FShft(PIXCLKSRC_SEL))
25#define PIXCLKSRC_PLL_2 ((0x2) << FShft(PIXCLKSRC_SEL))
26
27/* Clock Disable Register */
28#define CLKSLEEP_SLP (1 << 0)
29
30/* Core PLL Control Register */
31#define CORE_PLL_M Fld(6,7)
32#define Core_Pll_M(x) ((x) << FShft(CORE_PLL_M))
33#define CORE_PLL_N Fld(3,4)
34#define Core_Pll_N(x) ((x) << FShft(CORE_PLL_N))
35#define CORE_PLL_P Fld(3,1)
36#define Core_Pll_P(x) ((x) << FShft(CORE_PLL_P))
37#define CORE_PLL_EN (1 << 0)
38
39/* Display PLL Control Register */
40#define DISP_PLL_M Fld(6,7)
41#define Disp_Pll_M(x) ((x) << FShft(DISP_PLL_M))
42#define DISP_PLL_N Fld(3,4)
43#define Disp_Pll_N(x) ((x) << FShft(DISP_PLL_N))
44#define DISP_PLL_P Fld(3,1)
45#define Disp_Pll_P(x) ((x) << FShft(DISP_PLL_P))
46#define DISP_PLL_EN (1 << 0)
47
48/* PLL status register */
49#define PLLSTAT_CORE_PLL_LOST_L (1 << 3)
50#define PLLSTAT_CORE_PLL_LSTS (1 << 2)
51#define PLLSTAT_DISP_PLL_LOST_L (1 << 1)
52#define PLLSTAT_DISP_PLL_LSTS (1 << 0)
53
54/* Video and scale clock control register */
55#define VOVRCLK_EN (1 << 0)
56
57/* Pixel clock control register */
58#define PIXCLK_EN (1 << 0)
59
60/* Memory clock control register */
61#define MEMCLK_EN (1 << 0)
62
63/* MBX clock control register */
64#define MBXCLK_DIV Fld(2,2)
65#define MBXCLK_DIV_1 ((0x0) << FShft(MBXCLK_DIV))
66#define MBXCLK_DIV_2 ((0x1) << FShft(MBXCLK_DIV))
67#define MBXCLK_DIV_3 ((0x2) << FShft(MBXCLK_DIV))
68#define MBXCLK_DIV_4 ((0x3) << FShft(MBXCLK_DIV))
69#define MBXCLK_EN Fld(2,0)
70#define MBXCLK_EN_NONE ((0x0) << FShft(MBXCLK_EN))
71#define MBXCLK_EN_2D ((0x1) << FShft(MBXCLK_EN))
72#define MBXCLK_EN_BOTH ((0x2) << FShft(MBXCLK_EN))
73
74/* M24 clock control register */
75#define M24CLK_DIV Fld(2,1)
76#define M24CLK_DIV_1 ((0x0) << FShft(M24CLK_DIV))
77#define M24CLK_DIV_2 ((0x1) << FShft(M24CLK_DIV))
78#define M24CLK_DIV_3 ((0x2) << FShft(M24CLK_DIV))
79#define M24CLK_DIV_4 ((0x3) << FShft(M24CLK_DIV))
80#define M24CLK_EN (1 << 0)
81
82/* SDRAM clock control register */
83#define SDCLK_EN (1 << 0)
84
85/* PixClk Divisor Register */
86#define PIXCLKDIV_PD Fld(9,0)
87#define Pixclkdiv_Pd(x) ((x) << FShft(PIXCLKDIV_PD))
88
89/* LCD Config control register */
90#define LCDCFG_IN_FMT Fld(3,28)
91#define Lcdcfg_In_Fmt(x) ((x) << FShft(LCDCFG_IN_FMT))
92#define LCDCFG_LCD1DEN_POL (1 << 27)
93#define LCDCFG_LCD1FCLK_POL (1 << 26)
94#define LCDCFG_LCD1LCLK_POL (1 << 25)
95#define LCDCFG_LCD1D_POL (1 << 24)
96#define LCDCFG_LCD2DEN_POL (1 << 23)
97#define LCDCFG_LCD2FCLK_POL (1 << 22)
98#define LCDCFG_LCD2LCLK_POL (1 << 21)
99#define LCDCFG_LCD2D_POL (1 << 20)
100#define LCDCFG_LCD1_TS (1 << 19)
101#define LCDCFG_LCD1D_DS (1 << 18)
102#define LCDCFG_LCD1C_DS (1 << 17)
103#define LCDCFG_LCD1_IS_IN (1 << 16)
104#define LCDCFG_LCD2_TS (1 << 3)
105#define LCDCFG_LCD2D_DS (1 << 2)
106#define LCDCFG_LCD2C_DS (1 << 1)
107#define LCDCFG_LCD2_IS_IN (1 << 0)
108
109/* On-Die Frame Buffer Power Control Register */
110#define ODFBPWR_SLOW (1 << 2)
111#define ODFBPWR_MODE Fld(2,0)
112#define ODFBPWR_MODE_ACT ((0x0) << FShft(ODFBPWR_MODE))
113#define ODFBPWR_MODE_ACT_LP ((0x1) << FShft(ODFBPWR_MODE))
114#define ODFBPWR_MODE_SLEEP ((0x2) << FShft(ODFBPWR_MODE))
115#define ODFBPWR_MODE_SHUTD ((0x3) << FShft(ODFBPWR_MODE))
116
117/* On-Die Frame Buffer Power State Status Register */
118#define ODFBSTAT_ACT (1 << 2)
119#define ODFBSTAT_SLP (1 << 1)
120#define ODFBSTAT_SDN (1 << 0)
121
122/* LMRST - Local Memory (SDRAM) Reset */
123#define LMRST_MC_RST (1 << 0)
124
125/* LMCFG - Local Memory (SDRAM) Configuration Register */
126#define LMCFG_LMC_DS (1 << 5)
127#define LMCFG_LMD_DS (1 << 4)
128#define LMCFG_LMA_DS (1 << 3)
129#define LMCFG_LMC_TS (1 << 2)
130#define LMCFG_LMD_TS (1 << 1)
131#define LMCFG_LMA_TS (1 << 0)
132
133/* LMPWR - Local Memory (SDRAM) Power Control Register */
134#define LMPWR_MC_PWR_CNT Fld(2,0)
135#define LMPWR_MC_PWR_ACT ((0x0) << FShft(LMPWR_MC_PWR_CNT)) /* Active */
136#define LMPWR_MC_PWR_SRM ((0x1) << FShft(LMPWR_MC_PWR_CNT)) /* Self-refresh */
137#define LMPWR_MC_PWR_DPD ((0x3) << FShft(LMPWR_MC_PWR_CNT)) /* deep power down */
138
139/* LMPWRSTAT - Local Memory (SDRAM) Power Status Register */
140#define LMPWRSTAT_MC_PWR_CNT Fld(2,0)
141#define LMPWRSTAT_MC_PWR_ACT ((0x0) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* Active */
142#define LMPWRSTAT_MC_PWR_SRM ((0x1) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* Self-refresh */
143#define LMPWRSTAT_MC_PWR_DPD ((0x3) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* deep power down */
144
145/* LMTYPE - Local Memory (SDRAM) Type Register */
146#define LMTYPE_CASLAT Fld(3,10)
147#define LMTYPE_CASLAT_1 ((0x1) << FShft(LMTYPE_CASLAT))
148#define LMTYPE_CASLAT_2 ((0x2) << FShft(LMTYPE_CASLAT))
149#define LMTYPE_CASLAT_3 ((0x3) << FShft(LMTYPE_CASLAT))
150#define LMTYPE_BKSZ Fld(2,8)
151#define LMTYPE_BKSZ_1 ((0x1) << FShft(LMTYPE_BKSZ))
152#define LMTYPE_BKSZ_2 ((0x2) << FShft(LMTYPE_BKSZ))
153#define LMTYPE_ROWSZ Fld(4,4)
154#define LMTYPE_ROWSZ_11 ((0xb) << FShft(LMTYPE_ROWSZ))
155#define LMTYPE_ROWSZ_12 ((0xc) << FShft(LMTYPE_ROWSZ))
156#define LMTYPE_ROWSZ_13 ((0xd) << FShft(LMTYPE_ROWSZ))
157#define LMTYPE_COLSZ Fld(4,0)
158#define LMTYPE_COLSZ_7 ((0x7) << FShft(LMTYPE_COLSZ))
159#define LMTYPE_COLSZ_8 ((0x8) << FShft(LMTYPE_COLSZ))
160#define LMTYPE_COLSZ_9 ((0x9) << FShft(LMTYPE_COLSZ))
161#define LMTYPE_COLSZ_10 ((0xa) << FShft(LMTYPE_COLSZ))
162#define LMTYPE_COLSZ_11 ((0xb) << FShft(LMTYPE_COLSZ))
163#define LMTYPE_COLSZ_12 ((0xc) << FShft(LMTYPE_COLSZ))
164
165/* LMTIM - Local Memory (SDRAM) Timing Register */
166#define LMTIM_TRAS Fld(4,16)
167#define Lmtim_Tras(x) ((x) << FShft(LMTIM_TRAS))
168#define LMTIM_TRP Fld(4,12)
169#define Lmtim_Trp(x) ((x) << FShft(LMTIM_TRP))
170#define LMTIM_TRCD Fld(4,8)
171#define Lmtim_Trcd(x) ((x) << FShft(LMTIM_TRCD))
172#define LMTIM_TRC Fld(4,4)
173#define Lmtim_Trc(x) ((x) << FShft(LMTIM_TRC))
174#define LMTIM_TDPL Fld(4,0)
175#define Lmtim_Tdpl(x) ((x) << FShft(LMTIM_TDPL))
176
177/* LMREFRESH - Local Memory (SDRAM) tREF Control Register */
178#define LMREFRESH_TREF Fld(2,0)
179#define Lmrefresh_Tref(x) ((x) << FShft(LMREFRESH_TREF))
180
181/* GSCTRL - Graphics surface control register */
182#define GSCTRL_LUT_EN (1 << 31)
183#define GSCTRL_GPIXFMT Fld(4,27)
184#define GSCTRL_GPIXFMT_INDEXED ((0x0) << FShft(GSCTRL_GPIXFMT))
185#define GSCTRL_GPIXFMT_ARGB4444 ((0x4) << FShft(GSCTRL_GPIXFMT))
186#define GSCTRL_GPIXFMT_ARGB1555 ((0x5) << FShft(GSCTRL_GPIXFMT))
187#define GSCTRL_GPIXFMT_RGB888 ((0x6) << FShft(GSCTRL_GPIXFMT))
188#define GSCTRL_GPIXFMT_RGB565 ((0x7) << FShft(GSCTRL_GPIXFMT))
189#define GSCTRL_GPIXFMT_ARGB8888 ((0x8) << FShft(GSCTRL_GPIXFMT))
190#define GSCTRL_GAMMA_EN (1 << 26)
191
192#define GSCTRL_GSWIDTH Fld(11,11)
193#define Gsctrl_Width(Pixel) /* Display Width [1..2048 pix.] */ \
194 (((Pixel) - 1) << FShft(GSCTRL_GSWIDTH))
195
196#define GSCTRL_GSHEIGHT Fld(11,0)
197#define Gsctrl_Height(Pixel) /* Display Height [1..2048 pix.] */ \
198 (((Pixel) - 1) << FShft(GSCTRL_GSHEIGHT))
199
200/* GBBASE fileds */
201#define GBBASE_GLALPHA Fld(8,24)
202#define Gbbase_Glalpha(x) ((x) << FShft(GBBASE_GLALPHA))
203
204#define GBBASE_COLKEY Fld(24,0)
205#define Gbbase_Colkey(x) ((x) << FShft(GBBASE_COLKEY))
206
207/* GDRCTRL fields */
208#define GDRCTRL_PIXDBL (1 << 31)
209#define GDRCTRL_PIXHLV (1 << 30)
210#define GDRCTRL_LNDBL (1 << 29)
211#define GDRCTRL_LNHLV (1 << 28)
212#define GDRCTRL_COLKEYM Fld(24,0)
213#define Gdrctrl_Colkeym(x) ((x) << FShft(GDRCTRL_COLKEYM))
214
215/* GSCADR graphics stream control address register fields */
216#define GSCADR_STR_EN (1 << 31)
217#define GSCADR_COLKEY_EN (1 << 30)
218#define GSCADR_COLKEYSCR (1 << 29)
219#define GSCADR_BLEND_M Fld(2,27)
220#define GSCADR_BLEND_NONE ((0x0) << FShft(GSCADR_BLEND_M))
221#define GSCADR_BLEND_INV ((0x1) << FShft(GSCADR_BLEND_M))
222#define GSCADR_BLEND_GLOB ((0x2) << FShft(GSCADR_BLEND_M))
223#define GSCADR_BLEND_PIX ((0x3) << FShft(GSCADR_BLEND_M))
224#define GSCADR_BLEND_POS Fld(2,24)
225#define GSCADR_BLEND_GFX ((0x0) << FShft(GSCADR_BLEND_POS))
226#define GSCADR_BLEND_VID ((0x1) << FShft(GSCADR_BLEND_POS))
227#define GSCADR_BLEND_CUR ((0x2) << FShft(GSCADR_BLEND_POS))
228#define GSCADR_GBASE_ADR Fld(23,0)
229#define Gscadr_Gbase_Adr(x) ((x) << FShft(GSCADR_GBASE_ADR))
230
231/* GSADR graphics stride address register fields */
232#define GSADR_SRCSTRIDE Fld(10,22)
233#define Gsadr_Srcstride(x) ((x) << FShft(GSADR_SRCSTRIDE))
234#define GSADR_XSTART Fld(11,11)
235#define Gsadr_Xstart(x) ((x) << FShft(GSADR_XSTART))
236#define GSADR_YSTART Fld(11,0)
237#define Gsadr_Ystart(y) ((y) << FShft(GSADR_YSTART))
238
239/* GPLUT graphics palette register fields */
240#define GPLUT_LUTADR Fld(8,24)
241#define Gplut_Lutadr(x) ((x) << FShft(GPLUT_LUTADR))
242#define GPLUT_LUTDATA Fld(24,0)
243#define Gplut_Lutdata(x) ((x) << FShft(GPLUT_LUTDATA))
244
245/* HCCTRL - Hardware Cursor Register fields */
246#define HCCTRL_CUR_EN (1 << 31)
247#define HCCTRL_COLKEY_EN (1 << 29)
248#define HCCTRL_COLKEYSRC (1 << 28)
249#define HCCTRL_BLEND_M Fld(2,26)
250#define HCCTRL_BLEND_NONE ((0x0) << FShft(HCCTRL_BLEND_M))
251#define HCCTRL_BLEND_INV ((0x1) << FShft(HCCTRL_BLEND_M))
252#define HCCTRL_BLEND_GLOB ((0x2) << FShft(HCCTRL_BLEND_M))
253#define HCCTRL_BLEND_PIX ((0x3) << FShft(HCCTRL_BLEND_M))
254#define HCCTRL_CPIXFMT Fld(3,23)
255#define HCCTRL_CPIXFMT_RGB332 ((0x3) << FShft(HCCTRL_CPIXFMT))
256#define HCCTRL_CPIXFMT_ARGB4444 ((0x4) << FShft(HCCTRL_CPIXFMT))
257#define HCCTRL_CPIXFMT_ARGB1555 ((0x5) << FShft(HCCTRL_CPIXFMT))
258#define HCCTRL_CBASE_ADR Fld(23,0)
259#define Hcctrl_Cbase_Adr(x) ((x) << FShft(HCCTRL_CBASE_ADR))
260
261/* HCSIZE Hardware Cursor Size Register fields */
262#define HCSIZE_BLEND_POS Fld(2,29)
263#define HCSIZE_BLEND_GFX ((0x0) << FShft(HCSIZE_BLEND_POS))
264#define HCSIZE_BLEND_VID ((0x1) << FShft(HCSIZE_BLEND_POS))
265#define HCSIZE_BLEND_CUR ((0x2) << FShft(HCSIZE_BLEND_POS))
266#define HCSIZE_CWIDTH Fld(3,16)
267#define Hcsize_Cwidth(x) ((x) << FShft(HCSIZE_CWIDTH))
268#define HCSIZE_CHEIGHT Fld(3,0)
269#define Hcsize_Cheight(x) ((x) << FShft(HCSIZE_CHEIGHT))
270
271/* HCPOS Hardware Cursor Position Register fields */
272#define HCPOS_SWITCHSRC (1 << 30)
273#define HCPOS_CURBLINK Fld(6,24)
274#define Hcpos_Curblink(x) ((x) << FShft(HCPOS_CURBLINK))
275#define HCPOS_XSTART Fld(12,12)
276#define Hcpos_Xstart(x) ((x) << FShft(HCPOS_XSTART))
277#define HCPOS_YSTART Fld(12,0)
278#define Hcpos_Ystart(y) ((y) << FShft(HCPOS_YSTART))
279
280/* HCBADR Hardware Cursor Blend Address Register */
281#define HCBADR_GLALPHA Fld(8,24)
282#define Hcbadr_Glalpha(x) ((x) << FShft(HCBADR_GLALPHA))
283#define HCBADR_COLKEY Fld(24,0)
284#define Hcbadr_Colkey(x) ((x) << FShft(HCBADR_COLKEY))
285
286/* HCCKMSK - Hardware Cursor Color Key Mask Register */
287#define HCCKMSK_COLKEY_M Fld(24,0)
288#define Hcckmsk_Colkey_M(x) ((x) << FShft(HCCKMSK_COLKEY_M))
289
290/* DSCTRL - Display sync control register */
291#define DSCTRL_SYNCGEN_EN (1 << 31)
292#define DSCTRL_DPL_RST (1 << 29)
293#define DSCTRL_PWRDN_M (1 << 28)
294#define DSCTRL_UPDSYNCCNT (1 << 26)
295#define DSCTRL_UPDINTCNT (1 << 25)
296#define DSCTRL_UPDCNT (1 << 24)
297#define DSCTRL_UPDWAIT Fld(4,16)
298#define Dsctrl_Updwait(x) ((x) << FShft(DSCTRL_UPDWAIT))
299#define DSCTRL_CLKPOL (1 << 11)
300#define DSCTRL_CSYNC_EN (1 << 10)
301#define DSCTRL_VS_SLAVE (1 << 7)
302#define DSCTRL_HS_SLAVE (1 << 6)
303#define DSCTRL_BLNK_POL (1 << 5)
304#define DSCTRL_BLNK_DIS (1 << 4)
305#define DSCTRL_VS_POL (1 << 3)
306#define DSCTRL_VS_DIS (1 << 2)
307#define DSCTRL_HS_POL (1 << 1)
308#define DSCTRL_HS_DIS (1 << 0)
309
310/* DHT01 - Display horizontal timing register 01 */
311#define DHT01_HBPS Fld(12,16)
312#define Dht01_Hbps(x) ((x) << FShft(DHT01_HBPS))
313#define DHT01_HT Fld(12,0)
314#define Dht01_Ht(x) ((x) << FShft(DHT01_HT))
315
316/* DHT02 - Display horizontal timing register 02 */
317#define DHT02_HAS Fld(12,16)
318#define Dht02_Has(x) ((x) << FShft(DHT02_HAS))
319#define DHT02_HLBS Fld(12,0)
320#define Dht02_Hlbs(x) ((x) << FShft(DHT02_HLBS))
321
322/* DHT03 - Display horizontal timing register 03 */
323#define DHT03_HFPS Fld(12,16)
324#define Dht03_Hfps(x) ((x) << FShft(DHT03_HFPS))
325#define DHT03_HRBS Fld(12,0)
326#define Dht03_Hrbs(x) ((x) << FShft(DHT03_HRBS))
327
328/* DVT01 - Display vertical timing register 01 */
329#define DVT01_VBPS Fld(12,16)
330#define Dvt01_Vbps(x) ((x) << FShft(DVT01_VBPS))
331#define DVT01_VT Fld(12,0)
332#define Dvt01_Vt(x) ((x) << FShft(DVT01_VT))
333
334/* DVT02 - Display vertical timing register 02 */
335#define DVT02_VAS Fld(12,16)
336#define Dvt02_Vas(x) ((x) << FShft(DVT02_VAS))
337#define DVT02_VTBS Fld(12,0)
338#define Dvt02_Vtbs(x) ((x) << FShft(DVT02_VTBS))
339
340/* DVT03 - Display vertical timing register 03 */
341#define DVT03_VFPS Fld(12,16)
342#define Dvt03_Vfps(x) ((x) << FShft(DVT03_VFPS))
343#define DVT03_VBBS Fld(12,0)
344#define Dvt03_Vbbs(x) ((x) << FShft(DVT03_VBBS))
345
346/* DVECTRL - display vertical event control register */
347#define DVECTRL_VEVENT Fld(12,16)
348#define Dvectrl_Vevent(x) ((x) << FShft(DVECTRL_VEVENT))
349#define DVECTRL_VFETCH Fld(12,0)
350#define Dvectrl_Vfetch(x) ((x) << FShft(DVECTRL_VFETCH))
351
352/* DHDET - display horizontal DE timing register */
353#define DHDET_HDES Fld(12,16)
354#define Dhdet_Hdes(x) ((x) << FShft(DHDET_HDES))
355#define DHDET_HDEF Fld(12,0)
356#define Dhdet_Hdef(x) ((x) << FShft(DHDET_HDEF))
357
358/* DVDET - display vertical DE timing register */
359#define DVDET_VDES Fld(12,16)
360#define Dvdet_Vdes(x) ((x) << FShft(DVDET_VDES))
361#define DVDET_VDEF Fld(12,0)
362#define Dvdet_Vdef(x) ((x) << FShft(DVDET_VDEF))
363
364/* DODMSK - display output data mask register */
365#define DODMSK_MASK_LVL (1 << 31)
366#define DODMSK_BLNK_LVL (1 << 30)
367#define DODMSK_MASK_B Fld(8,16)
368#define Dodmsk_Mask_B(x) ((x) << FShft(DODMSK_MASK_B))
369#define DODMSK_MASK_G Fld(8,8)
370#define Dodmsk_Mask_G(x) ((x) << FShft(DODMSK_MASK_G))
371#define DODMSK_MASK_R Fld(8,0)
372#define Dodmsk_Mask_R(x) ((x) << FShft(DODMSK_MASK_R))
373
374/* DBCOL - display border color control register */
375#define DBCOL_BORDCOL Fld(24,0)
376#define Dbcol_Bordcol(x) ((x) << FShft(DBCOL_BORDCOL))
377
378/* DVLNUM - display vertical line number register */
379#define DVLNUM_VLINE Fld(12,0)
380#define Dvlnum_Vline(x) ((x) << FShft(DVLNUM_VLINE))
381
382/* DMCTRL - Display Memory Control Register */
383#define DMCTRL_MEM_REF Fld(2,30)
384#define DMCTRL_MEM_REF_ACT ((0x0) << FShft(DMCTRL_MEM_REF))
385#define DMCTRL_MEM_REF_HB ((0x1) << FShft(DMCTRL_MEM_REF))
386#define DMCTRL_MEM_REF_VB ((0x2) << FShft(DMCTRL_MEM_REF))
387#define DMCTRL_MEM_REF_BOTH ((0x3) << FShft(DMCTRL_MEM_REF))
388#define DMCTRL_UV_THRHLD Fld(6,24)
389#define Dmctrl_Uv_Thrhld(x) ((x) << FShft(DMCTRL_UV_THRHLD))
390#define DMCTRL_V_THRHLD Fld(7,16)
391#define Dmctrl_V_Thrhld(x) ((x) << FShft(DMCTRL_V_THRHLD))
392#define DMCTRL_D_THRHLD Fld(7,8)
393#define Dmctrl_D_Thrhld(x) ((x) << FShft(DMCTRL_D_THRHLD))
394#define DMCTRL_BURSTLEN Fld(6,0)
395#define Dmctrl_Burstlen(x) ((x) << FShft(DMCTRL_BURSTLEN))
396
397
398/* DLSTS - display load status register */
399#define DLSTS_RLD_ADONE (1 << 23)
400/* #define DLSTS_RLD_ADOUT Fld(23,0) */
401
402/* DLLCTRL - display list load control register */
403#define DLLCTRL_RLD_ADRLN Fld(8,24)
404#define Dllctrl_Rld_Adrln(x) ((x) << FShft(DLLCTRL_RLD_ADRLN))
405
406/* SPOCTRL - Scale Pitch/Order Control Register */
407#define SPOCTRL_H_SC_BP (1 << 31)
408#define SPOCTRL_V_SC_BP (1 << 30)
409#define SPOCTRL_HV_SC_OR (1 << 29)
410#define SPOCTRL_VS_UR_C (1 << 27)
411#define SPOCTRL_VORDER Fld(2,16)
412#define SPOCTRL_VORDER_1TAP ((0x0) << FShft(SPOCTRL_VORDER))
413#define SPOCTRL_VORDER_2TAP ((0x1) << FShft(SPOCTRL_VORDER))
414#define SPOCTRL_VORDER_4TAP ((0x3) << FShft(SPOCTRL_VORDER))
415#define SPOCTRL_VPITCH Fld(16,0)
416#define Spoctrl_Vpitch(x) ((x) << FShft(SPOCTRL_VPITCH))
417
418#endif /* __REG_BITS_2700G_ */
diff --git a/drivers/video/mbx/regs.h b/drivers/video/mbx/regs.h
new file mode 100644
index 000000000000..ad20be07666b
--- /dev/null
+++ b/drivers/video/mbx/regs.h
@@ -0,0 +1,195 @@
1#ifndef __REGS_2700G_
2#define __REGS_2700G_
3
4/* extern unsigned long virt_base_2700; */
5/* #define __REG_2700G(x) (*(volatile unsigned long*)((x)+virt_base_2700)) */
6#define __REG_2700G(x) ((x)+virt_base_2700)
7
8/* System Configuration Registers (0x0000_0000 0x0000_0010) */
9#define SYSCFG __REG_2700G(0x00000000)
10#define PFBASE __REG_2700G(0x00000004)
11#define PFCEIL __REG_2700G(0x00000008)
12#define POLLFLAG __REG_2700G(0x0000000c)
13#define SYSRST __REG_2700G(0x00000010)
14
15/* Interrupt Control Registers (0x0000_0014 0x0000_002F) */
16#define NINTPW __REG_2700G(0x00000014)
17#define MINTENABLE __REG_2700G(0x00000018)
18#define MINTSTAT __REG_2700G(0x0000001c)
19#define SINTENABLE __REG_2700G(0x00000020)
20#define SINTSTAT __REG_2700G(0x00000024)
21#define SINTCLR __REG_2700G(0x00000028)
22
23/* Clock Control Registers (0x0000_002C 0x0000_005F) */
24#define SYSCLKSRC __REG_2700G(0x0000002c)
25#define PIXCLKSRC __REG_2700G(0x00000030)
26#define CLKSLEEP __REG_2700G(0x00000034)
27#define COREPLL __REG_2700G(0x00000038)
28#define DISPPLL __REG_2700G(0x0000003c)
29#define PLLSTAT __REG_2700G(0x00000040)
30#define VOVRCLK __REG_2700G(0x00000044)
31#define PIXCLK __REG_2700G(0x00000048)
32#define MEMCLK __REG_2700G(0x0000004c)
33#define M24CLK __REG_2700G(0x00000054)
34#define MBXCLK __REG_2700G(0x00000054)
35#define SDCLK __REG_2700G(0x00000058)
36#define PIXCLKDIV __REG_2700G(0x0000005c)
37
38/* LCD Port Control Register (0x0000_0060 0x0000_006F) */
39#define LCD_CONFIG __REG_2700G(0x00000060)
40
41/* On-Die Frame Buffer Registers (0x0000_0064 0x0000_006B) */
42#define ODFBPWR __REG_2700G(0x00000064)
43#define ODFBSTAT __REG_2700G(0x00000068)
44
45/* GPIO Registers (0x0000_006C 0x0000_007F) */
46#define GPIOCGF __REG_2700G(0x0000006c)
47#define GPIOHI __REG_2700G(0x00000070)
48#define GPIOLO __REG_2700G(0x00000074)
49#define GPIOSTAT __REG_2700G(0x00000078)
50
51/* Pulse Width Modulator (PWM) Registers (0x0000_0200 0x0000_02FF) */
52#define PWMRST __REG_2700G(0x00000200)
53#define PWMCFG __REG_2700G(0x00000204)
54#define PWM0DIV __REG_2700G(0x00000210)
55#define PWM0DUTY __REG_2700G(0x00000214)
56#define PWM0PER __REG_2700G(0x00000218)
57#define PWM1DIV __REG_2700G(0x00000220)
58#define PWM1DUTY __REG_2700G(0x00000224)
59#define PWM1PER __REG_2700G(0x00000228)
60
61/* Identification (ID) Registers (0x0000_0300 0x0000_0FFF) */
62#define ID __REG_2700G(0x00000FF0)
63
64/* Local Memory (SDRAM) Interface Registers (0x0000_1000 0x0000_1FFF) */
65#define LMRST __REG_2700G(0x00001000)
66#define LMCFG __REG_2700G(0x00001004)
67#define LMPWR __REG_2700G(0x00001008)
68#define LMPWRSTAT __REG_2700G(0x0000100c)
69#define LMCEMR __REG_2700G(0x00001010)
70#define LMTYPE __REG_2700G(0x00001014)
71#define LMTIM __REG_2700G(0x00001018)
72#define LMREFRESH __REG_2700G(0x0000101c)
73#define LMPROTMIN __REG_2700G(0x00001020)
74#define LMPROTMAX __REG_2700G(0x00001024)
75#define LMPROTCFG __REG_2700G(0x00001028)
76#define LMPROTERR __REG_2700G(0x0000102c)
77
78/* Plane Controller Registers (0x0000_2000 0x0000_2FFF) */
79#define GSCTRL __REG_2700G(0x00002000)
80#define VSCTRL __REG_2700G(0x00002004)
81#define GBBASE __REG_2700G(0x00002020)
82#define VBBASE __REG_2700G(0x00002024)
83#define GDRCTRL __REG_2700G(0x00002040)
84#define VCMSK __REG_2700G(0x00002044)
85#define GSCADR __REG_2700G(0x00002060)
86#define VSCADR __REG_2700G(0x00002064)
87#define VUBASE __REG_2700G(0x00002084)
88#define VVBASE __REG_2700G(0x000020a4)
89#define GSADR __REG_2700G(0x000020c0)
90#define VSADR __REG_2700G(0x000020c4)
91#define HCCTRL __REG_2700G(0x00002100)
92#define HCSIZE __REG_2700G(0x00002110)
93#define HCPOS __REG_2700G(0x00002120)
94#define HCBADR __REG_2700G(0x00002130)
95#define HCCKMSK __REG_2700G(0x00002140)
96#define GPLUT __REG_2700G(0x00002150)
97#define DSCTRL __REG_2700G(0x00002154)
98#define DHT01 __REG_2700G(0x00002158)
99#define DHT02 __REG_2700G(0x0000215c)
100#define DHT03 __REG_2700G(0x00002160)
101#define DVT01 __REG_2700G(0x00002164)
102#define DVT02 __REG_2700G(0x00002168)
103#define DVT03 __REG_2700G(0x0000216c)
104#define DBCOL __REG_2700G(0x00002170)
105#define BGCOLOR __REG_2700G(0x00002174)
106#define DINTRS __REG_2700G(0x00002178)
107#define DINTRE __REG_2700G(0x0000217c)
108#define DINTRCNT __REG_2700G(0x00002180)
109#define DSIG __REG_2700G(0x00002184)
110#define DMCTRL __REG_2700G(0x00002188)
111#define CLIPCTRL __REG_2700G(0x0000218c)
112#define SPOCTRL __REG_2700G(0x00002190)
113#define SVCTRL __REG_2700G(0x00002194)
114
115/* 0x0000_2198 */
116/* 0x0000_21A8 VSCOEFF[0:4] Video Scalar Vertical Coefficient [0:4] 4.14.5 */
117#define VSCOEFF0 __REG_2700G(0x00002198)
118#define VSCOEFF1 __REG_2700G(0x0000219c)
119#define VSCOEFF2 __REG_2700G(0x000021a0)
120#define VSCOEFF3 __REG_2700G(0x000021a4)
121#define VSCOEFF4 __REG_2700G(0x000021a8)
122
123#define SHCTRL __REG_2700G(0x000021b0)
124
125/* 0x0000_21B4 */
126/* 0x0000_21D4 HSCOEFF[0:8] Video Scalar Horizontal Coefficient [0:8] 4.14.7 */
127#define HSCOEFF0 __REG_2700G(0x000021b4)
128#define HSCOEFF1 __REG_2700G(0x000021b8)
129#define HSCOEFF2 __REG_2700G(0x000021bc)
130#define HSCOEFF3 __REG_2700G(0x000021b0)
131#define HSCOEFF4 __REG_2700G(0x000021c4)
132#define HSCOEFF5 __REG_2700G(0x000021c8)
133#define HSCOEFF6 __REG_2700G(0x000021cc)
134#define HSCOEFF7 __REG_2700G(0x000021d0)
135#define HSCOEFF8 __REG_2700G(0x000021d4)
136
137#define SSSIZE __REG_2700G(0x000021D8)
138
139/* 0x0000_2200 */
140/* 0x0000_2240 VIDGAM[0:16] Video Gamma LUT Index [0:16] 4.15.2 */
141#define VIDGAM0 __REG_2700G(0x00002200)
142#define VIDGAM1 __REG_2700G(0x00002204)
143#define VIDGAM2 __REG_2700G(0x00002208)
144#define VIDGAM3 __REG_2700G(0x0000220c)
145#define VIDGAM4 __REG_2700G(0x00002210)
146#define VIDGAM5 __REG_2700G(0x00002214)
147#define VIDGAM6 __REG_2700G(0x00002218)
148#define VIDGAM7 __REG_2700G(0x0000221c)
149#define VIDGAM8 __REG_2700G(0x00002220)
150#define VIDGAM9 __REG_2700G(0x00002224)
151#define VIDGAM10 __REG_2700G(0x00002228)
152#define VIDGAM11 __REG_2700G(0x0000222c)
153#define VIDGAM12 __REG_2700G(0x00002230)
154#define VIDGAM13 __REG_2700G(0x00002234)
155#define VIDGAM14 __REG_2700G(0x00002238)
156#define VIDGAM15 __REG_2700G(0x0000223c)
157#define VIDGAM16 __REG_2700G(0x00002240)
158
159/* 0x0000_2250 */
160/* 0x0000_2290 GFXGAM[0:16] Graphics Gamma LUT Index [0:16] 4.15.3 */
161#define GFXGAM0 __REG_2700G(0x00002250)
162#define GFXGAM1 __REG_2700G(0x00002254)
163#define GFXGAM2 __REG_2700G(0x00002258)
164#define GFXGAM3 __REG_2700G(0x0000225c)
165#define GFXGAM4 __REG_2700G(0x00002260)
166#define GFXGAM5 __REG_2700G(0x00002264)
167#define GFXGAM6 __REG_2700G(0x00002268)
168#define GFXGAM7 __REG_2700G(0x0000226c)
169#define GFXGAM8 __REG_2700G(0x00002270)
170#define GFXGAM9 __REG_2700G(0x00002274)
171#define GFXGAM10 __REG_2700G(0x00002278)
172#define GFXGAM11 __REG_2700G(0x0000227c)
173#define GFXGAM12 __REG_2700G(0x00002280)
174#define GFXGAM13 __REG_2700G(0x00002284)
175#define GFXGAM14 __REG_2700G(0x00002288)
176#define GFXGAM15 __REG_2700G(0x0000228c)
177#define GFXGAM16 __REG_2700G(0x00002290)
178
179#define DLSTS __REG_2700G(0x00002300)
180#define DLLCTRL __REG_2700G(0x00002304)
181#define DVLNUM __REG_2700G(0x00002308)
182#define DUCTRL __REG_2700G(0x0000230c)
183#define DVECTRL __REG_2700G(0x00002310)
184#define DHDET __REG_2700G(0x00002314)
185#define DVDET __REG_2700G(0x00002318)
186#define DODMSK __REG_2700G(0x0000231c)
187#define CSC01 __REG_2700G(0x00002330)
188#define CSC02 __REG_2700G(0x00002334)
189#define CSC03 __REG_2700G(0x00002338)
190#define CSC04 __REG_2700G(0x0000233c)
191#define CSC05 __REG_2700G(0x00002340)
192
193#define FB_MEMORY_START __REG_2700G(0x00060000)
194
195#endif /* __REGS_2700G_ */
diff --git a/fs/char_dev.c b/fs/char_dev.c
index a4cbc6706ef0..3483d3cf8087 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -182,6 +182,28 @@ int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
182 return 0; 182 return 0;
183} 183}
184 184
185/**
186 * register_chrdev() - Register a major number for character devices.
187 * @major: major device number or 0 for dynamic allocation
188 * @name: name of this range of devices
189 * @fops: file operations associated with this devices
190 *
191 * If @major == 0 this functions will dynamically allocate a major and return
192 * its number.
193 *
194 * If @major > 0 this function will attempt to reserve a device with the given
195 * major number and will return zero on success.
196 *
197 * Returns a -ve errno on failure.
198 *
199 * The name of this device has nothing to do with the name of the device in
200 * /dev. It only helps to keep track of the different owners of devices. If
201 * your module name has only one type of devices it's ok to use e.g. the name
202 * of the module here.
203 *
204 * This function registers a range of 256 minor numbers. The first minor number
205 * is 0.
206 */
185int register_chrdev(unsigned int major, const char *name, 207int register_chrdev(unsigned int major, const char *name,
186 const struct file_operations *fops) 208 const struct file_operations *fops)
187{ 209{
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 10c46231ce15..efbb586bed4b 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -2944,7 +2944,7 @@ int jfs_sync(void *arg)
2944 * Inode is being freed 2944 * Inode is being freed
2945 */ 2945 */
2946 list_del_init(&jfs_ip->anon_inode_list); 2946 list_del_init(&jfs_ip->anon_inode_list);
2947 } else if (! !mutex_trylock(&jfs_ip->commit_mutex)) { 2947 } else if (mutex_trylock(&jfs_ip->commit_mutex)) {
2948 /* 2948 /*
2949 * inode will be removed from anonymous list 2949 * inode will be removed from anonymous list
2950 * when it is committed 2950 * when it is committed
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 09ea03f62277..295268ad231b 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -165,8 +165,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
165 165
166 out3: 166 out3:
167 txEnd(tid); 167 txEnd(tid);
168 mutex_unlock(&JFS_IP(dip)->commit_mutex);
169 mutex_unlock(&JFS_IP(ip)->commit_mutex); 168 mutex_unlock(&JFS_IP(ip)->commit_mutex);
169 mutex_unlock(&JFS_IP(dip)->commit_mutex);
170 if (rc) { 170 if (rc) {
171 free_ea_wmap(ip); 171 free_ea_wmap(ip);
172 ip->i_nlink = 0; 172 ip->i_nlink = 0;
@@ -300,8 +300,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
300 300
301 out3: 301 out3:
302 txEnd(tid); 302 txEnd(tid);
303 mutex_unlock(&JFS_IP(dip)->commit_mutex);
304 mutex_unlock(&JFS_IP(ip)->commit_mutex); 303 mutex_unlock(&JFS_IP(ip)->commit_mutex);
304 mutex_unlock(&JFS_IP(dip)->commit_mutex);
305 if (rc) { 305 if (rc) {
306 free_ea_wmap(ip); 306 free_ea_wmap(ip);
307 ip->i_nlink = 0; 307 ip->i_nlink = 0;
@@ -384,8 +384,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
384 if (rc == -EIO) 384 if (rc == -EIO)
385 txAbort(tid, 1); 385 txAbort(tid, 1);
386 txEnd(tid); 386 txEnd(tid);
387 mutex_unlock(&JFS_IP(dip)->commit_mutex);
388 mutex_unlock(&JFS_IP(ip)->commit_mutex); 387 mutex_unlock(&JFS_IP(ip)->commit_mutex);
388 mutex_unlock(&JFS_IP(dip)->commit_mutex);
389 389
390 goto out2; 390 goto out2;
391 } 391 }
@@ -422,8 +422,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
422 422
423 txEnd(tid); 423 txEnd(tid);
424 424
425 mutex_unlock(&JFS_IP(dip)->commit_mutex);
426 mutex_unlock(&JFS_IP(ip)->commit_mutex); 425 mutex_unlock(&JFS_IP(ip)->commit_mutex);
426 mutex_unlock(&JFS_IP(dip)->commit_mutex);
427 427
428 /* 428 /*
429 * Truncating the directory index table is not guaranteed. It 429 * Truncating the directory index table is not guaranteed. It
@@ -503,8 +503,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
503 if (rc == -EIO) 503 if (rc == -EIO)
504 txAbort(tid, 1); /* Marks FS Dirty */ 504 txAbort(tid, 1); /* Marks FS Dirty */
505 txEnd(tid); 505 txEnd(tid);
506 mutex_unlock(&JFS_IP(dip)->commit_mutex);
507 mutex_unlock(&JFS_IP(ip)->commit_mutex); 506 mutex_unlock(&JFS_IP(ip)->commit_mutex);
507 mutex_unlock(&JFS_IP(dip)->commit_mutex);
508 IWRITE_UNLOCK(ip); 508 IWRITE_UNLOCK(ip);
509 goto out1; 509 goto out1;
510 } 510 }
@@ -527,8 +527,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
527 if ((new_size = commitZeroLink(tid, ip)) < 0) { 527 if ((new_size = commitZeroLink(tid, ip)) < 0) {
528 txAbort(tid, 1); /* Marks FS Dirty */ 528 txAbort(tid, 1); /* Marks FS Dirty */
529 txEnd(tid); 529 txEnd(tid);
530 mutex_unlock(&JFS_IP(dip)->commit_mutex);
531 mutex_unlock(&JFS_IP(ip)->commit_mutex); 530 mutex_unlock(&JFS_IP(ip)->commit_mutex);
531 mutex_unlock(&JFS_IP(dip)->commit_mutex);
532 IWRITE_UNLOCK(ip); 532 IWRITE_UNLOCK(ip);
533 rc = new_size; 533 rc = new_size;
534 goto out1; 534 goto out1;
@@ -556,9 +556,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
556 556
557 txEnd(tid); 557 txEnd(tid);
558 558
559 mutex_unlock(&JFS_IP(dip)->commit_mutex);
560 mutex_unlock(&JFS_IP(ip)->commit_mutex); 559 mutex_unlock(&JFS_IP(ip)->commit_mutex);
561 560 mutex_unlock(&JFS_IP(dip)->commit_mutex);
562 561
563 while (new_size && (rc == 0)) { 562 while (new_size && (rc == 0)) {
564 tid = txBegin(dip->i_sb, 0); 563 tid = txBegin(dip->i_sb, 0);
@@ -847,8 +846,8 @@ static int jfs_link(struct dentry *old_dentry,
847 out: 846 out:
848 txEnd(tid); 847 txEnd(tid);
849 848
850 mutex_unlock(&JFS_IP(dir)->commit_mutex);
851 mutex_unlock(&JFS_IP(ip)->commit_mutex); 849 mutex_unlock(&JFS_IP(ip)->commit_mutex);
850 mutex_unlock(&JFS_IP(dir)->commit_mutex);
852 851
853 jfs_info("jfs_link: rc:%d", rc); 852 jfs_info("jfs_link: rc:%d", rc);
854 return rc; 853 return rc;
@@ -1037,8 +1036,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
1037 1036
1038 out3: 1037 out3:
1039 txEnd(tid); 1038 txEnd(tid);
1040 mutex_unlock(&JFS_IP(dip)->commit_mutex);
1041 mutex_unlock(&JFS_IP(ip)->commit_mutex); 1039 mutex_unlock(&JFS_IP(ip)->commit_mutex);
1040 mutex_unlock(&JFS_IP(dip)->commit_mutex);
1042 if (rc) { 1041 if (rc) {
1043 free_ea_wmap(ip); 1042 free_ea_wmap(ip);
1044 ip->i_nlink = 0; 1043 ip->i_nlink = 0;
@@ -1160,10 +1159,11 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1160 if (S_ISDIR(new_ip->i_mode)) { 1159 if (S_ISDIR(new_ip->i_mode)) {
1161 new_ip->i_nlink--; 1160 new_ip->i_nlink--;
1162 if (new_ip->i_nlink) { 1161 if (new_ip->i_nlink) {
1163 mutex_unlock(&JFS_IP(new_dir)->commit_mutex); 1162 mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
1164 mutex_unlock(&JFS_IP(old_ip)->commit_mutex);
1165 if (old_dir != new_dir) 1163 if (old_dir != new_dir)
1166 mutex_unlock(&JFS_IP(old_dir)->commit_mutex); 1164 mutex_unlock(&JFS_IP(old_dir)->commit_mutex);
1165 mutex_unlock(&JFS_IP(old_ip)->commit_mutex);
1166 mutex_unlock(&JFS_IP(new_dir)->commit_mutex);
1167 if (!S_ISDIR(old_ip->i_mode) && new_ip) 1167 if (!S_ISDIR(old_ip->i_mode) && new_ip)
1168 IWRITE_UNLOCK(new_ip); 1168 IWRITE_UNLOCK(new_ip);
1169 jfs_error(new_ip->i_sb, 1169 jfs_error(new_ip->i_sb,
@@ -1281,13 +1281,12 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1281 1281
1282 out4: 1282 out4:
1283 txEnd(tid); 1283 txEnd(tid);
1284
1285 mutex_unlock(&JFS_IP(new_dir)->commit_mutex);
1286 mutex_unlock(&JFS_IP(old_ip)->commit_mutex);
1287 if (old_dir != new_dir)
1288 mutex_unlock(&JFS_IP(old_dir)->commit_mutex);
1289 if (new_ip) 1284 if (new_ip)
1290 mutex_unlock(&JFS_IP(new_ip)->commit_mutex); 1285 mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
1286 if (old_dir != new_dir)
1287 mutex_unlock(&JFS_IP(old_dir)->commit_mutex);
1288 mutex_unlock(&JFS_IP(old_ip)->commit_mutex);
1289 mutex_unlock(&JFS_IP(new_dir)->commit_mutex);
1291 1290
1292 while (new_size && (rc == 0)) { 1291 while (new_size && (rc == 0)) {
1293 tid = txBegin(new_ip->i_sb, 0); 1292 tid = txBegin(new_ip->i_sb, 0);
diff --git a/fs/namei.c b/fs/namei.c
index c9750d755aff..e01070d7bf58 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1712,8 +1712,14 @@ do_link:
1712 if (error) 1712 if (error)
1713 goto exit_dput; 1713 goto exit_dput;
1714 error = __do_follow_link(&path, nd); 1714 error = __do_follow_link(&path, nd);
1715 if (error) 1715 if (error) {
1716 /* Does someone understand code flow here? Or it is only
1717 * me so stupid? Anathema to whoever designed this non-sense
1718 * with "intent.open".
1719 */
1720 release_open_intent(nd);
1716 return error; 1721 return error;
1722 }
1717 nd->flags &= ~LOOKUP_PARENT; 1723 nd->flags &= ~LOOKUP_PARENT;
1718 if (nd->last_type == LAST_BIND) 1724 if (nd->last_type == LAST_BIND)
1719 goto ok; 1725 goto ok;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 7495d3e20775..0b615d62a159 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -74,6 +74,7 @@
74#include <linux/times.h> 74#include <linux/times.h>
75#include <linux/cpuset.h> 75#include <linux/cpuset.h>
76#include <linux/rcupdate.h> 76#include <linux/rcupdate.h>
77#include <linux/delayacct.h>
77 78
78#include <asm/uaccess.h> 79#include <asm/uaccess.h>
79#include <asm/pgtable.h> 80#include <asm/pgtable.h>
@@ -411,7 +412,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
411 412
412 res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ 413 res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
413%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ 414%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
414%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", 415%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu %llu\n",
415 task->pid, 416 task->pid,
416 tcomm, 417 tcomm,
417 state, 418 state,
@@ -455,7 +456,8 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
455 task->exit_signal, 456 task->exit_signal,
456 task_cpu(task), 457 task_cpu(task),
457 task->rt_priority, 458 task->rt_priority,
458 task->policy); 459 task->policy,
460 (unsigned long long)delayacct_blkio_ticks(task));
459 if(mm) 461 if(mm)
460 mmput(mm); 462 mmput(mm);
461 return res; 463 return res;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 243a94af0427..fe8d55fb17cc 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -551,6 +551,27 @@ static int proc_fd_access_allowed(struct inode *inode)
551 return allowed; 551 return allowed;
552} 552}
553 553
554static int proc_setattr(struct dentry *dentry, struct iattr *attr)
555{
556 int error;
557 struct inode *inode = dentry->d_inode;
558
559 if (attr->ia_valid & ATTR_MODE)
560 return -EPERM;
561
562 error = inode_change_ok(inode, attr);
563 if (!error) {
564 error = security_inode_setattr(dentry, attr);
565 if (!error)
566 error = inode_setattr(inode, attr);
567 }
568 return error;
569}
570
571static struct inode_operations proc_def_inode_operations = {
572 .setattr = proc_setattr,
573};
574
554extern struct seq_operations mounts_op; 575extern struct seq_operations mounts_op;
555struct proc_mounts { 576struct proc_mounts {
556 struct seq_file m; 577 struct seq_file m;
@@ -1111,7 +1132,8 @@ out:
1111 1132
1112static struct inode_operations proc_pid_link_inode_operations = { 1133static struct inode_operations proc_pid_link_inode_operations = {
1113 .readlink = proc_pid_readlink, 1134 .readlink = proc_pid_readlink,
1114 .follow_link = proc_pid_follow_link 1135 .follow_link = proc_pid_follow_link,
1136 .setattr = proc_setattr,
1115}; 1137};
1116 1138
1117static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir) 1139static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
@@ -1285,6 +1307,7 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
1285 ei = PROC_I(inode); 1307 ei = PROC_I(inode);
1286 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 1308 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1287 inode->i_ino = fake_ino(task->pid, ino); 1309 inode->i_ino = fake_ino(task->pid, ino);
1310 inode->i_op = &proc_def_inode_operations;
1288 1311
1289 /* 1312 /*
1290 * grab the reference to task. 1313 * grab the reference to task.
@@ -1339,6 +1362,7 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1339 inode->i_uid = 0; 1362 inode->i_uid = 0;
1340 inode->i_gid = 0; 1363 inode->i_gid = 0;
1341 } 1364 }
1365 inode->i_mode &= ~(S_ISUID | S_ISGID);
1342 security_task_to_inode(task, inode); 1366 security_task_to_inode(task, inode);
1343 put_task_struct(task); 1367 put_task_struct(task);
1344 return 1; 1368 return 1;
@@ -1389,6 +1413,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1389 inode->i_uid = 0; 1413 inode->i_uid = 0;
1390 inode->i_gid = 0; 1414 inode->i_gid = 0;
1391 } 1415 }
1416 inode->i_mode &= ~(S_ISUID | S_ISGID);
1392 security_task_to_inode(task, inode); 1417 security_task_to_inode(task, inode);
1393 put_task_struct(task); 1418 put_task_struct(task);
1394 return 1; 1419 return 1;
@@ -1527,11 +1552,13 @@ static struct file_operations proc_task_operations = {
1527 */ 1552 */
1528static struct inode_operations proc_fd_inode_operations = { 1553static struct inode_operations proc_fd_inode_operations = {
1529 .lookup = proc_lookupfd, 1554 .lookup = proc_lookupfd,
1555 .setattr = proc_setattr,
1530}; 1556};
1531 1557
1532static struct inode_operations proc_task_inode_operations = { 1558static struct inode_operations proc_task_inode_operations = {
1533 .lookup = proc_task_lookup, 1559 .lookup = proc_task_lookup,
1534 .getattr = proc_task_getattr, 1560 .getattr = proc_task_getattr,
1561 .setattr = proc_setattr,
1535}; 1562};
1536 1563
1537#ifdef CONFIG_SECURITY 1564#ifdef CONFIG_SECURITY
@@ -1845,11 +1872,13 @@ static struct file_operations proc_tid_base_operations = {
1845static struct inode_operations proc_tgid_base_inode_operations = { 1872static struct inode_operations proc_tgid_base_inode_operations = {
1846 .lookup = proc_tgid_base_lookup, 1873 .lookup = proc_tgid_base_lookup,
1847 .getattr = pid_getattr, 1874 .getattr = pid_getattr,
1875 .setattr = proc_setattr,
1848}; 1876};
1849 1877
1850static struct inode_operations proc_tid_base_inode_operations = { 1878static struct inode_operations proc_tid_base_inode_operations = {
1851 .lookup = proc_tid_base_lookup, 1879 .lookup = proc_tid_base_lookup,
1852 .getattr = pid_getattr, 1880 .getattr = pid_getattr,
1881 .setattr = proc_setattr,
1853}; 1882};
1854 1883
1855#ifdef CONFIG_SECURITY 1884#ifdef CONFIG_SECURITY
@@ -1892,11 +1921,13 @@ static struct dentry *proc_tid_attr_lookup(struct inode *dir,
1892static struct inode_operations proc_tgid_attr_inode_operations = { 1921static struct inode_operations proc_tgid_attr_inode_operations = {
1893 .lookup = proc_tgid_attr_lookup, 1922 .lookup = proc_tgid_attr_lookup,
1894 .getattr = pid_getattr, 1923 .getattr = pid_getattr,
1924 .setattr = proc_setattr,
1895}; 1925};
1896 1926
1897static struct inode_operations proc_tid_attr_inode_operations = { 1927static struct inode_operations proc_tid_attr_inode_operations = {
1898 .lookup = proc_tid_attr_lookup, 1928 .lookup = proc_tid_attr_lookup,
1899 .getattr = pid_getattr, 1929 .getattr = pid_getattr,
1930 .setattr = proc_setattr,
1900}; 1931};
1901#endif 1932#endif
1902 1933
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 6dcef089e18e..49dfb2ab783e 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -192,7 +192,7 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
192{ 192{
193 struct inode * root_inode; 193 struct inode * root_inode;
194 194
195 s->s_flags |= MS_NODIRATIME; 195 s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
196 s->s_blocksize = 1024; 196 s->s_blocksize = 1024;
197 s->s_blocksize_bits = 10; 197 s->s_blocksize_bits = 10;
198 s->s_magic = PROC_SUPER_MAGIC; 198 s->s_magic = PROC_SUPER_MAGIC;
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 5d8a8cfebc70..c533ec1bcaec 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -492,9 +492,17 @@ static void add_file(struct super_block *sb, char *name,
492 492
493int reiserfs_proc_info_init(struct super_block *sb) 493int reiserfs_proc_info_init(struct super_block *sb)
494{ 494{
495 char b[BDEVNAME_SIZE];
496 char *s;
497
498 /* Some block devices use /'s */
499 strlcpy(b, reiserfs_bdevname(sb), BDEVNAME_SIZE);
500 s = strchr(b, '/');
501 if (s)
502 *s = '!';
503
495 spin_lock_init(&__PINFO(sb).lock); 504 spin_lock_init(&__PINFO(sb).lock);
496 REISERFS_SB(sb)->procdir = 505 REISERFS_SB(sb)->procdir = proc_mkdir(b, proc_info_root);
497 proc_mkdir(reiserfs_bdevname(sb), proc_info_root);
498 if (REISERFS_SB(sb)->procdir) { 506 if (REISERFS_SB(sb)->procdir) {
499 REISERFS_SB(sb)->procdir->owner = THIS_MODULE; 507 REISERFS_SB(sb)->procdir->owner = THIS_MODULE;
500 REISERFS_SB(sb)->procdir->data = sb; 508 REISERFS_SB(sb)->procdir->data = sb;
@@ -508,13 +516,22 @@ int reiserfs_proc_info_init(struct super_block *sb)
508 return 0; 516 return 0;
509 } 517 }
510 reiserfs_warning(sb, "reiserfs: cannot create /proc/%s/%s", 518 reiserfs_warning(sb, "reiserfs: cannot create /proc/%s/%s",
511 proc_info_root_name, reiserfs_bdevname(sb)); 519 proc_info_root_name, b);
512 return 1; 520 return 1;
513} 521}
514 522
515int reiserfs_proc_info_done(struct super_block *sb) 523int reiserfs_proc_info_done(struct super_block *sb)
516{ 524{
517 struct proc_dir_entry *de = REISERFS_SB(sb)->procdir; 525 struct proc_dir_entry *de = REISERFS_SB(sb)->procdir;
526 char b[BDEVNAME_SIZE];
527 char *s;
528
529 /* Some block devices use /'s */
530 strlcpy(b, reiserfs_bdevname(sb), BDEVNAME_SIZE);
531 s = strchr(b, '/');
532 if (s)
533 *s = '!';
534
518 if (de) { 535 if (de) {
519 remove_proc_entry("journal", de); 536 remove_proc_entry("journal", de);
520 remove_proc_entry("oidmap", de); 537 remove_proc_entry("oidmap", de);
@@ -528,7 +545,7 @@ int reiserfs_proc_info_done(struct super_block *sb)
528 __PINFO(sb).exiting = 1; 545 __PINFO(sb).exiting = 1;
529 spin_unlock(&__PINFO(sb).lock); 546 spin_unlock(&__PINFO(sb).lock);
530 if (proc_info_root) { 547 if (proc_info_root) {
531 remove_proc_entry(reiserfs_bdevname(sb), proc_info_root); 548 remove_proc_entry(b, proc_info_root);
532 REISERFS_SB(sb)->procdir = NULL; 549 REISERFS_SB(sb)->procdir = NULL;
533 } 550 }
534 return 0; 551 return 0;
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index ceda3a2859d2..7858703ed84c 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -246,8 +246,8 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
246#define BUF_BUSY XBF_DONT_BLOCK 246#define BUF_BUSY XBF_DONT_BLOCK
247 247
248#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) 248#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
249#define XFS_BUF_ZEROFLAGS(bp) \ 249#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
250 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI)) 250 ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
251 251
252#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) 252#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
253#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) 253#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9bdef9d51900..4754f342a5d3 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -314,6 +314,13 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
314 return; 314 return;
315 } 315 }
316 316
317 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
318 xfs_fs_cmn_err(CE_NOTE, mp,
319 "Disabling barriers, underlying device is readonly");
320 mp->m_flags &= ~XFS_MOUNT_BARRIER;
321 return;
322 }
323
317 error = xfs_barrier_test(mp); 324 error = xfs_barrier_test(mp);
318 if (error) { 325 if (error) {
319 xfs_fs_cmn_err(CE_NOTE, mp, 326 xfs_fs_cmn_err(CE_NOTE, mp,
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index e95e99f7168f..f137856c3261 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -217,17 +217,24 @@ xfs_qm_statvfs(
217 return 0; 217 return 0;
218 dp = &dqp->q_core; 218 dp = &dqp->q_core;
219 219
220 limit = dp->d_blk_softlimit ? dp->d_blk_softlimit : dp->d_blk_hardlimit; 220 limit = dp->d_blk_softlimit ?
221 be64_to_cpu(dp->d_blk_softlimit) :
222 be64_to_cpu(dp->d_blk_hardlimit);
221 if (limit && statp->f_blocks > limit) { 223 if (limit && statp->f_blocks > limit) {
222 statp->f_blocks = limit; 224 statp->f_blocks = limit;
223 statp->f_bfree = (statp->f_blocks > dp->d_bcount) ? 225 statp->f_bfree =
224 (statp->f_blocks - dp->d_bcount) : 0; 226 (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
227 (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
225 } 228 }
226 limit = dp->d_ino_softlimit ? dp->d_ino_softlimit : dp->d_ino_hardlimit; 229
230 limit = dp->d_ino_softlimit ?
231 be64_to_cpu(dp->d_ino_softlimit) :
232 be64_to_cpu(dp->d_ino_hardlimit);
227 if (limit && statp->f_files > limit) { 233 if (limit && statp->f_files > limit) {
228 statp->f_files = limit; 234 statp->f_files = limit;
229 statp->f_ffree = (statp->f_files > dp->d_icount) ? 235 statp->f_ffree =
230 (statp->f_ffree - dp->d_icount) : 0; 236 (statp->f_files > be64_to_cpu(dp->d_icount)) ?
237 (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0;
231 } 238 }
232 239
233 xfs_qm_dqput(dqp); 240 xfs_qm_dqput(dqp);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 86c1bf0bba9e..1f8ecff8553a 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -334,10 +334,9 @@ xfs_itobp(
334#if !defined(__KERNEL__) 334#if !defined(__KERNEL__)
335 ni = 0; 335 ni = 0;
336#elif defined(DEBUG) 336#elif defined(DEBUG)
337 ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 337 ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog;
338 (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog);
339#else /* usual case */ 338#else /* usual case */
340 ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; 339 ni = 1;
341#endif 340#endif
342 341
343 for (i = 0; i < ni; i++) { 342 for (i = 0; i < ni; i++) {
@@ -348,11 +347,15 @@ xfs_itobp(
348 (i << mp->m_sb.sb_inodelog)); 347 (i << mp->m_sb.sb_inodelog));
349 di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && 348 di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
350 XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); 349 XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
351 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, 350 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
352 XFS_RANDOM_ITOBP_INOTOBP))) { 351 XFS_ERRTAG_ITOBP_INOTOBP,
352 XFS_RANDOM_ITOBP_INOTOBP))) {
353 if (imap_flags & XFS_IMAP_BULKSTAT) {
354 xfs_trans_brelse(tp, bp);
355 return XFS_ERROR(EINVAL);
356 }
353#ifdef DEBUG 357#ifdef DEBUG
354 if (!(imap_flags & XFS_IMAP_BULKSTAT)) 358 cmn_err(CE_ALERT,
355 cmn_err(CE_ALERT,
356 "Device %s - bad inode magic/vsn " 359 "Device %s - bad inode magic/vsn "
357 "daddr %lld #%d (magic=%x)", 360 "daddr %lld #%d (magic=%x)",
358 XFS_BUFTARG_NAME(mp->m_ddev_targp), 361 XFS_BUFTARG_NAME(mp->m_ddev_targp),
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e730328636c3..21ac1a67e3e0 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1413,7 +1413,7 @@ xlog_sync(xlog_t *log,
1413 ops = iclog->ic_header.h_num_logops; 1413 ops = iclog->ic_header.h_num_logops;
1414 INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); 1414 INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops);
1415 1415
1416 bp = iclog->ic_bp; 1416 bp = iclog->ic_bp;
1417 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); 1417 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
1418 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); 1418 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
1419 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); 1419 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)));
@@ -1430,15 +1430,14 @@ xlog_sync(xlog_t *log,
1430 } 1430 }
1431 XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); 1431 XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count);
1432 XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ 1432 XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */
1433 XFS_BUF_ZEROFLAGS(bp);
1433 XFS_BUF_BUSY(bp); 1434 XFS_BUF_BUSY(bp);
1434 XFS_BUF_ASYNC(bp); 1435 XFS_BUF_ASYNC(bp);
1435 /* 1436 /*
1436 * Do an ordered write for the log block. 1437 * Do an ordered write for the log block.
1437 * 1438 * Its unnecessary to flush the first split block in the log wrap case.
1438 * It may not be needed to flush the first split block in the log wrap
1439 * case, but do it anyways to be safe -AK
1440 */ 1439 */
1441 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1440 if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER))
1442 XFS_BUF_ORDERED(bp); 1441 XFS_BUF_ORDERED(bp);
1443 1442
1444 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1443 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
@@ -1460,7 +1459,7 @@ xlog_sync(xlog_t *log,
1460 return error; 1459 return error;
1461 } 1460 }
1462 if (split) { 1461 if (split) {
1463 bp = iclog->ic_log->l_xbuf; 1462 bp = iclog->ic_log->l_xbuf;
1464 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == 1463 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) ==
1465 (unsigned long)1); 1464 (unsigned long)1);
1466 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); 1465 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
@@ -1468,6 +1467,7 @@ xlog_sync(xlog_t *log,
1468 XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ 1467 XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+
1469 (__psint_t)count), split); 1468 (__psint_t)count), split);
1470 XFS_BUF_SET_FSPRIVATE(bp, iclog); 1469 XFS_BUF_SET_FSPRIVATE(bp, iclog);
1470 XFS_BUF_ZEROFLAGS(bp);
1471 XFS_BUF_BUSY(bp); 1471 XFS_BUF_BUSY(bp);
1472 XFS_BUF_ASYNC(bp); 1472 XFS_BUF_ASYNC(bp);
1473 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1473 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 6c96391f3f1a..b427d220a169 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -515,7 +515,7 @@ xfs_mount(
515 if (error) 515 if (error)
516 goto error2; 516 goto error2;
517 517
518 if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY)) 518 if (mp->m_flags & XFS_MOUNT_BARRIER)
519 xfs_mountfs_check_barriers(mp); 519 xfs_mountfs_check_barriers(mp);
520 520
521 error = XFS_IOINIT(vfsp, args, flags); 521 error = XFS_IOINIT(vfsp, args, flags);
diff --git a/include/asm-alpha/barrier.h b/include/asm-alpha/barrier.h
index 681ff581afa5..384dc08d6f53 100644
--- a/include/asm-alpha/barrier.h
+++ b/include/asm-alpha/barrier.h
@@ -30,7 +30,4 @@ __asm__ __volatile__("mb": : :"memory")
30#define set_mb(var, value) \ 30#define set_mb(var, value) \
31do { var = value; mb(); } while (0) 31do { var = value; mb(); } while (0)
32 32
33#define set_wmb(var, value) \
34do { var = value; wmb(); } while (0)
35
36#endif /* __BARRIER_H */ 33#endif /* __BARRIER_H */
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 6001febfe63b..0947cbf9b69a 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -176,7 +176,6 @@ extern unsigned int user_debug;
176#define wmb() mb() 176#define wmb() mb()
177#define read_barrier_depends() do { } while(0) 177#define read_barrier_depends() do { } while(0)
178#define set_mb(var, value) do { var = value; mb(); } while (0) 178#define set_mb(var, value) do { var = value; mb(); } while (0)
179#define set_wmb(var, value) do { var = value; wmb(); } while (0)
180#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 179#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
181 180
182/* 181/*
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index d1f69d706198..00ae32aa1dba 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -90,7 +90,6 @@ extern unsigned int user_debug;
90 90
91#define read_barrier_depends() do { } while(0) 91#define read_barrier_depends() do { } while(0)
92#define set_mb(var, value) do { var = value; mb(); } while (0) 92#define set_mb(var, value) do { var = value; mb(); } while (0)
93#define set_wmb(var, value) do { var = value; wmb(); } while (0)
94 93
95/* 94/*
96 * We assume knowledge of how 95 * We assume knowledge of how
diff --git a/include/asm-cris/system.h b/include/asm-cris/system.h
index b1c593b6dbff..b869f6161aaa 100644
--- a/include/asm-cris/system.h
+++ b/include/asm-cris/system.h
@@ -17,7 +17,6 @@ extern struct task_struct *resume(struct task_struct *prev, struct task_struct *
17#define wmb() mb() 17#define wmb() mb()
18#define read_barrier_depends() do { } while(0) 18#define read_barrier_depends() do { } while(0)
19#define set_mb(var, value) do { var = value; mb(); } while (0) 19#define set_mb(var, value) do { var = value; mb(); } while (0)
20#define set_wmb(var, value) do { var = value; wmb(); } while (0)
21 20
22#ifdef CONFIG_SMP 21#ifdef CONFIG_SMP
23#define smp_mb() mb() 22#define smp_mb() mb()
diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h
index 351863dfd06e..1166899317d7 100644
--- a/include/asm-frv/system.h
+++ b/include/asm-frv/system.h
@@ -179,7 +179,6 @@ do { \
179#define rmb() asm volatile ("membar" : : :"memory") 179#define rmb() asm volatile ("membar" : : :"memory")
180#define wmb() asm volatile ("membar" : : :"memory") 180#define wmb() asm volatile ("membar" : : :"memory")
181#define set_mb(var, value) do { var = value; mb(); } while (0) 181#define set_mb(var, value) do { var = value; mb(); } while (0)
182#define set_wmb(var, value) do { var = value; wmb(); } while (0)
183 182
184#define smp_mb() mb() 183#define smp_mb() mb()
185#define smp_rmb() rmb() 184#define smp_rmb() rmb()
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index d8d0bcecd23f..6b16dda18115 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -1,11 +1,8 @@
1unifdef-y += a.out.h auxvec.h byteorder.h errno.h fcntl.h ioctl.h \ 1unifdef-y += a.out.h auxvec.h byteorder.h errno.h fcntl.h ioctl.h \
2 ioctls.h ipcbuf.h irq.h mman.h msgbuf.h param.h poll.h \ 2 ioctls.h ipcbuf.h mman.h msgbuf.h param.h poll.h \
3 posix_types.h ptrace.h resource.h sembuf.h shmbuf.h shmparam.h \ 3 posix_types.h ptrace.h resource.h sembuf.h shmbuf.h shmparam.h \
4 sigcontext.h siginfo.h signal.h socket.h sockios.h stat.h \ 4 sigcontext.h siginfo.h signal.h socket.h sockios.h stat.h \
5 statfs.h termbits.h termios.h timex.h types.h unistd.h user.h 5 statfs.h termbits.h termios.h timex.h types.h unistd.h user.h
6 6
7# These really shouldn't be exported
8unifdef-y += atomic.h io.h
9
10# These probably shouldn't be exported 7# These probably shouldn't be exported
11unifdef-y += elf.h page.h 8unifdef-y += elf.h page.h
diff --git a/include/asm-h8300/system.h b/include/asm-h8300/system.h
index 134e0929fce5..5084a9d42922 100644
--- a/include/asm-h8300/system.h
+++ b/include/asm-h8300/system.h
@@ -84,7 +84,6 @@ asmlinkage void resume(void);
84#define wmb() asm volatile ("" : : :"memory") 84#define wmb() asm volatile ("" : : :"memory")
85#define set_rmb(var, value) do { xchg(&var, value); } while (0) 85#define set_rmb(var, value) do { xchg(&var, value); } while (0)
86#define set_mb(var, value) set_rmb(var, value) 86#define set_mb(var, value) set_rmb(var, value)
87#define set_wmb(var, value) do { var = value; wmb(); } while (0)
88 87
89#ifdef CONFIG_SMP 88#ifdef CONFIG_SMP
90#define smp_mb() mb() 89#define smp_mb() mb()
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 2db168ef949f..49928eb33f8b 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -453,8 +453,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
453#define set_mb(var, value) do { var = value; barrier(); } while (0) 453#define set_mb(var, value) do { var = value; barrier(); } while (0)
454#endif 454#endif
455 455
456#define set_wmb(var, value) do { var = value; wmb(); } while (0)
457
458#include <linux/irqflags.h> 456#include <linux/irqflags.h>
459 457
460/* 458/*
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 65db43ce4de6..fc9677bc87ee 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -98,12 +98,11 @@ extern struct ia64_boot_param {
98#endif 98#endif
99 99
100/* 100/*
101 * XXX check on these---I suspect what Linus really wants here is 101 * XXX check on this ---I suspect what Linus really wants here is
102 * acquire vs release semantics but we can't discuss this stuff with 102 * acquire vs release semantics but we can't discuss this stuff with
103 * Linus just yet. Grrr... 103 * Linus just yet. Grrr...
104 */ 104 */
105#define set_mb(var, value) do { (var) = (value); mb(); } while (0) 105#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
106#define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
107 106
108#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ 107#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
109 108
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 311cebf44eff..9e618afec6ed 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -336,7 +336,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
336#endif 336#endif
337 337
338#define set_mb(var, value) do { xchg(&var, value); } while (0) 338#define set_mb(var, value) do { xchg(&var, value); } while (0)
339#define set_wmb(var, value) do { var = value; wmb(); } while (0)
340 339
341#define arch_align_stack(x) (x) 340#define arch_align_stack(x) (x)
342 341
diff --git a/include/asm-m68k/oplib.h b/include/asm-m68k/oplib.h
index c3594f473ef7..06caa2d08451 100644
--- a/include/asm-m68k/oplib.h
+++ b/include/asm-m68k/oplib.h
@@ -244,11 +244,6 @@ extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
244/* Does the passed node have the given "name"? YES=1 NO=0 */ 244/* Does the passed node have the given "name"? YES=1 NO=0 */
245extern int prom_nodematch(int thisnode, char *name); 245extern int prom_nodematch(int thisnode, char *name);
246 246
247/* Puts in buffer a prom name in the form name@x,y or name (x for which_io
248 * and y for first regs phys address
249 */
250extern int prom_getname(int node, char *buf, int buflen);
251
252/* Search all siblings starting at the passed node for "name" matching 247/* Search all siblings starting at the passed node for "name" matching
253 * the given string. Returns the node on success, zero on failure. 248 * the given string. Returns the node on success, zero on failure.
254 */ 249 */
diff --git a/include/asm-m68k/system.h b/include/asm-m68k/system.h
index d6dd8052cd6f..131a0cb0f491 100644
--- a/include/asm-m68k/system.h
+++ b/include/asm-m68k/system.h
@@ -80,7 +80,6 @@ static inline int irqs_disabled(void)
80#define wmb() barrier() 80#define wmb() barrier()
81#define read_barrier_depends() do { } while(0) 81#define read_barrier_depends() do { } while(0)
82#define set_mb(var, value) do { xchg(&var, value); } while (0) 82#define set_mb(var, value) do { xchg(&var, value); } while (0)
83#define set_wmb(var, value) do { var = value; wmb(); } while (0)
84 83
85#define smp_mb() barrier() 84#define smp_mb() barrier()
86#define smp_rmb() barrier() 85#define smp_rmb() barrier()
diff --git a/include/asm-m68knommu/system.h b/include/asm-m68knommu/system.h
index 2bbe2db00a22..2a814498672d 100644
--- a/include/asm-m68knommu/system.h
+++ b/include/asm-m68knommu/system.h
@@ -106,7 +106,6 @@ asmlinkage void resume(void);
106#define wmb() asm volatile ("" : : :"memory") 106#define wmb() asm volatile ("" : : :"memory")
107#define set_rmb(var, value) do { xchg(&var, value); } while (0) 107#define set_rmb(var, value) do { xchg(&var, value); } while (0)
108#define set_mb(var, value) set_rmb(var, value) 108#define set_mb(var, value) set_rmb(var, value)
109#define set_wmb(var, value) do { var = value; wmb(); } while (0)
110 109
111#ifdef CONFIG_SMP 110#ifdef CONFIG_SMP
112#define smp_mb() mb() 111#define smp_mb() mb()
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 13c98dde82dc..dcb4701d5728 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -143,9 +143,6 @@
143#define set_mb(var, value) \ 143#define set_mb(var, value) \
144do { var = value; mb(); } while (0) 144do { var = value; mb(); } while (0)
145 145
146#define set_wmb(var, value) \
147do { var = value; wmb(); } while (0)
148
149/* 146/*
150 * switch_to(n) should switch tasks to task nr n, first 147 * switch_to(n) should switch tasks to task nr n, first
151 * checking that n isn't the current task, in which case it does nothing. 148 * checking that n isn't the current task, in which case it does nothing.
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index 5fe2d2329ab5..74f037a39e6f 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -143,8 +143,6 @@ static inline void set_eiem(unsigned long val)
143#define read_barrier_depends() do { } while(0) 143#define read_barrier_depends() do { } while(0)
144 144
145#define set_mb(var, value) do { var = value; mb(); } while (0) 145#define set_mb(var, value) do { var = value; mb(); } while (0)
146#define set_wmb(var, value) do { var = value; wmb(); } while (0)
147
148 146
149#ifndef CONFIG_PA20 147#ifndef CONFIG_PA20
150/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, 148/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index c6569516ba35..7307aa775671 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -39,7 +39,6 @@
39#define read_barrier_depends() do { } while(0) 39#define read_barrier_depends() do { } while(0)
40 40
41#define set_mb(var, value) do { var = value; mb(); } while (0) 41#define set_mb(var, value) do { var = value; mb(); } while (0)
42#define set_wmb(var, value) do { var = value; wmb(); } while (0)
43 42
44#ifdef __KERNEL__ 43#ifdef __KERNEL__
45#ifdef CONFIG_SMP 44#ifdef CONFIG_SMP
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index fb49c0c49ea1..738943584c01 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -33,7 +33,6 @@
33#define read_barrier_depends() do { } while(0) 33#define read_barrier_depends() do { } while(0)
34 34
35#define set_mb(var, value) do { var = value; mb(); } while (0) 35#define set_mb(var, value) do { var = value; mb(); } while (0)
36#define set_wmb(var, value) do { var = value; wmb(); } while (0)
37 36
38#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
39#define smp_mb() mb() 38#define smp_mb() mb()
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 9ab186ffde23..16040048cd1b 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -128,8 +128,13 @@ extern void account_system_vtime(struct task_struct *);
128 128
129#define nop() __asm__ __volatile__ ("nop") 129#define nop() __asm__ __volatile__ ("nop")
130 130
131#define xchg(ptr,x) \ 131#define xchg(ptr,x) \
132 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr)))) 132({ \
133 __typeof__(*(ptr)) __ret; \
134 __ret = (__typeof__(*(ptr))) \
135 __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
136 __ret; \
137})
133 138
134static inline unsigned long __xchg(unsigned long x, void * ptr, int size) 139static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
135{ 140{
@@ -299,7 +304,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
299 304
300 305
301#define set_mb(var, value) do { var = value; mb(); } while (0) 306#define set_mb(var, value) do { var = value; mb(); } while (0)
302#define set_wmb(var, value) do { var = value; wmb(); } while (0)
303 307
304#ifdef __s390x__ 308#ifdef __s390x__
305 309
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 4848057dafe4..5d0332a4c2bd 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -19,7 +19,7 @@ static inline cycles_t get_cycles(void)
19{ 19{
20 cycles_t cycles; 20 cycles_t cycles;
21 21
22 __asm__("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc"); 22 __asm__ __volatile__ ("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc");
23 return cycles >> 2; 23 return cycles >> 2;
24} 24}
25 25
@@ -27,7 +27,7 @@ static inline unsigned long long get_clock (void)
27{ 27{
28 unsigned long long clk; 28 unsigned long long clk;
29 29
30 __asm__("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); 30 __asm__ __volatile__ ("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
31 return clk; 31 return clk;
32} 32}
33 33
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index ce2e60664a86..ad35ad4958f4 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -101,7 +101,6 @@ extern void __xchg_called_with_bad_pointer(void);
101#endif 101#endif
102 102
103#define set_mb(var, value) do { xchg(&var, value); } while (0) 103#define set_mb(var, value) do { xchg(&var, value); } while (0)
104#define set_wmb(var, value) do { var = value; wmb(); } while (0)
105 104
106/* Interrupt Control */ 105/* Interrupt Control */
107static __inline__ void local_irq_enable(void) 106static __inline__ void local_irq_enable(void)
diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h
index 7606f6e1f01e..87ef6f1ad5a4 100644
--- a/include/asm-sh64/system.h
+++ b/include/asm-sh64/system.h
@@ -66,7 +66,6 @@ extern void __xchg_called_with_bad_pointer(void);
66 66
67#define set_rmb(var, value) do { xchg(&var, value); } while (0) 67#define set_rmb(var, value) do { xchg(&var, value); } while (0)
68#define set_mb(var, value) set_rmb(var, value) 68#define set_mb(var, value) set_rmb(var, value)
69#define set_wmb(var, value) do { var = value; wmb(); } while (0)
70 69
71/* Interrupt Control */ 70/* Interrupt Control */
72#ifndef HARD_CLI 71#ifndef HARD_CLI
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h
index f283f8aaf6a9..91691e52c058 100644
--- a/include/asm-sparc/oplib.h
+++ b/include/asm-sparc/oplib.h
@@ -267,11 +267,6 @@ extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
267/* Does the passed node have the given "name"? YES=1 NO=0 */ 267/* Does the passed node have the given "name"? YES=1 NO=0 */
268extern int prom_nodematch(int thisnode, char *name); 268extern int prom_nodematch(int thisnode, char *name);
269 269
270/* Puts in buffer a prom name in the form name@x,y or name (x for which_io
271 * and y for first regs phys address
272 */
273extern int prom_getname(int node, char *buf, int buflen);
274
275/* Search all siblings starting at the passed node for "name" matching 270/* Search all siblings starting at the passed node for "name" matching
276 * the given string. Returns the node on success, zero on failure. 271 * the given string. Returns the node on success, zero on failure.
277 */ 272 */
diff --git a/include/asm-sparc/signal.h b/include/asm-sparc/signal.h
index 0ae5084c427b..d03a21c97abb 100644
--- a/include/asm-sparc/signal.h
+++ b/include/asm-sparc/signal.h
@@ -168,7 +168,7 @@ struct sigstack {
168 * statically allocated data.. which is NOT GOOD. 168 * statically allocated data.. which is NOT GOOD.
169 * 169 *
170 */ 170 */
171#define SA_STATIC_ALLOC 0x80 171#define SA_STATIC_ALLOC 0x8000
172#endif 172#endif
173 173
174#include <asm-generic/signal.h> 174#include <asm-generic/signal.h>
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index cb7dda1e5e91..100c3eaf3c1f 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -199,7 +199,6 @@ static inline unsigned long getipl(void)
199#define wmb() mb() 199#define wmb() mb()
200#define read_barrier_depends() do { } while(0) 200#define read_barrier_depends() do { } while(0)
201#define set_mb(__var, __value) do { __var = __value; mb(); } while(0) 201#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
202#define set_wmb(__var, __value) set_mb(__var, __value)
203#define smp_mb() __asm__ __volatile__("":::"memory") 202#define smp_mb() __asm__ __volatile__("":::"memory")
204#define smp_rmb() __asm__ __volatile__("":::"memory") 203#define smp_rmb() __asm__ __volatile__("":::"memory")
205#define smp_wmb() __asm__ __volatile__("":::"memory") 204#define smp_wmb() __asm__ __volatile__("":::"memory")
diff --git a/include/asm-sparc64/openprom.h b/include/asm-sparc64/openprom.h
index b4959d2b0d99..e01b80559c93 100644
--- a/include/asm-sparc64/openprom.h
+++ b/include/asm-sparc64/openprom.h
@@ -175,7 +175,7 @@ struct linux_nodeops {
175}; 175};
176 176
177/* More fun PROM structures for device probing. */ 177/* More fun PROM structures for device probing. */
178#define PROMREG_MAX 16 178#define PROMREG_MAX 24
179#define PROMVADDR_MAX 16 179#define PROMVADDR_MAX 16
180#define PROMINTR_MAX 15 180#define PROMINTR_MAX 15
181 181
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index a68b0bb05958..6a0da3b1695c 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -287,11 +287,6 @@ extern void prom_getstring(int node, const char *prop, char *buf, int bufsize);
287/* Does the passed node have the given "name"? YES=1 NO=0 */ 287/* Does the passed node have the given "name"? YES=1 NO=0 */
288extern int prom_nodematch(int thisnode, const char *name); 288extern int prom_nodematch(int thisnode, const char *name);
289 289
290/* Puts in buffer a prom name in the form name@x,y or name (x for which_io
291 * and y for first regs phys address
292 */
293extern int prom_getname(int node, char *buf, int buflen);
294
295/* Search all siblings starting at the passed node for "name" matching 290/* Search all siblings starting at the passed node for "name" matching
296 * the given string. Returns the node on success, zero on failure. 291 * the given string. Returns the node on success, zero on failure.
297 */ 292 */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 03f5bc9b6bec..1ba19eb34ce3 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -339,7 +339,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
339 " .section .sun4v_2insn_patch, \"ax\"\n" 339 " .section .sun4v_2insn_patch, \"ax\"\n"
340 " .word 661b\n" 340 " .word 661b\n"
341 " andn %0, %4, %0\n" 341 " andn %0, %4, %0\n"
342 " or %0, %3, %0\n" 342 " or %0, %5, %0\n"
343 " .previous\n" 343 " .previous\n"
344 : "=r" (val) 344 : "=r" (val)
345 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), 345 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
diff --git a/include/asm-sparc64/sfp-machine.h b/include/asm-sparc64/sfp-machine.h
index 5015bb8d6c32..89d42431efb5 100644
--- a/include/asm-sparc64/sfp-machine.h
+++ b/include/asm-sparc64/sfp-machine.h
@@ -34,7 +34,7 @@
34#define _FP_MUL_MEAT_D(R,X,Y) \ 34#define _FP_MUL_MEAT_D(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) 35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_Q(R,X,Y) \ 36#define _FP_MUL_MEAT_Q(R,X,Y) \
37 _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) 37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
38 38
39#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) 39#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
40#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) 40#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y)
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 4ca68600c670..a8b7432c9a70 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -123,8 +123,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
123#define read_barrier_depends() do { } while(0) 123#define read_barrier_depends() do { } while(0)
124#define set_mb(__var, __value) \ 124#define set_mb(__var, __value) \
125 do { __var = __value; membar_storeload_storestore(); } while(0) 125 do { __var = __value; membar_storeload_storestore(); } while(0)
126#define set_wmb(__var, __value) \
127 do { __var = __value; wmb(); } while(0)
128 126
129#ifdef CONFIG_SMP 127#ifdef CONFIG_SMP
130#define smp_mb() mb() 128#define smp_mb() mb()
diff --git a/include/asm-v850/system.h b/include/asm-v850/system.h
index 7091af4b7866..da39916f10b0 100644
--- a/include/asm-v850/system.h
+++ b/include/asm-v850/system.h
@@ -68,7 +68,6 @@ static inline int irqs_disabled (void)
68#define read_barrier_depends() ((void)0) 68#define read_barrier_depends() ((void)0)
69#define set_rmb(var, value) do { xchg (&var, value); } while (0) 69#define set_rmb(var, value) do { xchg (&var, value); } while (0)
70#define set_mb(var, value) set_rmb (var, value) 70#define set_mb(var, value) set_rmb (var, value)
71#define set_wmb(var, value) do { var = value; wmb (); } while (0)
72 71
73#define smp_mb() mb () 72#define smp_mb() mb ()
74#define smp_rmb() rmb () 73#define smp_rmb() rmb ()
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
index fbfb50136edb..4e3919524240 100644
--- a/include/asm-x86_64/calgary.h
+++ b/include/asm-x86_64/calgary.h
@@ -60,9 +60,4 @@ static inline int calgary_iommu_init(void) { return 1; }
60static inline void detect_calgary(void) { return; } 60static inline void detect_calgary(void) { return; }
61#endif 61#endif
62 62
63static inline unsigned int bus_to_phb(unsigned char busno)
64{
65 return ((busno % 15 == 0) ? 0 : busno / 2 + 1);
66}
67
68#endif /* _ASM_X86_64_CALGARY_H */ 63#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index f7bf875aae40..10f346165cab 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -19,7 +19,7 @@
19#define EXCEPTION_STACK_ORDER 0 19#define EXCEPTION_STACK_ORDER 0
20#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) 20#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
21 21
22#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER 22#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
23#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 23#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
24 24
25#define IRQSTACK_ORDER 2 25#define IRQSTACK_ORDER 2
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index 5f9a01805821..ba94ab3d2673 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -42,6 +42,8 @@ extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
42extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); 42extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
43extern void swiotlb_init(void); 43extern void swiotlb_init(void);
44 44
45extern int swiotlb_force;
46
45#ifdef CONFIG_SWIOTLB 47#ifdef CONFIG_SWIOTLB
46extern int swiotlb; 48extern int swiotlb;
47#else 49#else
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index f67f2873a922..6bf170bceae1 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -240,7 +240,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
240#endif 240#endif
241#define read_barrier_depends() do {} while(0) 241#define read_barrier_depends() do {} while(0)
242#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) 242#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
243#define set_wmb(var, value) do { var = value; wmb(); } while (0)
244 243
245#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) 244#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
246 245
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h
index f986170bd2a1..932bda92a21c 100644
--- a/include/asm-xtensa/system.h
+++ b/include/asm-xtensa/system.h
@@ -99,7 +99,6 @@ static inline void disable_coprocessor(int i)
99#endif 99#endif
100 100
101#define set_mb(var, value) do { var = value; mb(); } while (0) 101#define set_mb(var, value) do { var = value; mb(); } while (0)
102#define set_wmb(var, value) do { var = value; wmb(); } while (0)
103 102
104#if !defined (__ASSEMBLY__) 103#if !defined (__ASSEMBLY__)
105 104
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 44a11f1ccaf2..8fb344a9abd8 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -48,7 +48,6 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
48{ 48{
49} 49}
50#endif 50#endif
51extern int current_in_cpu_hotplug(void);
52 51
53int cpu_up(unsigned int cpu); 52int cpu_up(unsigned int cpu);
54 53
@@ -61,10 +60,6 @@ static inline int register_cpu_notifier(struct notifier_block *nb)
61static inline void unregister_cpu_notifier(struct notifier_block *nb) 60static inline void unregister_cpu_notifier(struct notifier_block *nb)
62{ 61{
63} 62}
64static inline int current_in_cpu_hotplug(void)
65{
66 return 0;
67}
68 63
69#endif /* CONFIG_SMP */ 64#endif /* CONFIG_SMP */
70extern struct sysdev_class cpu_sysdev_class; 65extern struct sysdev_class cpu_sysdev_class;
@@ -73,7 +68,6 @@ extern struct sysdev_class cpu_sysdev_class;
73/* Stop CPUs going up and down. */ 68/* Stop CPUs going up and down. */
74extern void lock_cpu_hotplug(void); 69extern void lock_cpu_hotplug(void);
75extern void unlock_cpu_hotplug(void); 70extern void unlock_cpu_hotplug(void);
76extern int lock_cpu_hotplug_interruptible(void);
77#define hotcpu_notifier(fn, pri) { \ 71#define hotcpu_notifier(fn, pri) { \
78 static struct notifier_block fn##_nb = \ 72 static struct notifier_block fn##_nb = \
79 { .notifier_call = fn, .priority = pri }; \ 73 { .notifier_call = fn, .priority = pri }; \
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 35e137636b0b..4ea39fee99c7 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -172,9 +172,6 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
172 unsigned int relation); 172 unsigned int relation);
173 173
174 174
175/* pass an event to the cpufreq governor */
176int cpufreq_governor(unsigned int cpu, unsigned int event);
177
178int cpufreq_register_governor(struct cpufreq_governor *governor); 175int cpufreq_register_governor(struct cpufreq_governor *governor);
179void cpufreq_unregister_governor(struct cpufreq_governor *governor); 176void cpufreq_unregister_governor(struct cpufreq_governor *governor);
180 177
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
new file mode 100644
index 000000000000..7e8b6011b8f3
--- /dev/null
+++ b/include/linux/delayacct.h
@@ -0,0 +1,119 @@
1/* delayacct.h - per-task delay accounting
2 *
3 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17#ifndef _LINUX_DELAYACCT_H
18#define _LINUX_DELAYACCT_H
19
20#include <linux/sched.h>
21#include <linux/taskstats_kern.h>
22
23/*
24 * Per-task flags relevant to delay accounting
25 * maintained privately to avoid exhausting similar flags in sched.h:PF_*
26 * Used to set current->delays->flags
27 */
28#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */
29
30#ifdef CONFIG_TASK_DELAY_ACCT
31
32extern int delayacct_on; /* Delay accounting turned on/off */
33extern kmem_cache_t *delayacct_cache;
34extern void delayacct_init(void);
35extern void __delayacct_tsk_init(struct task_struct *);
36extern void __delayacct_tsk_exit(struct task_struct *);
37extern void __delayacct_blkio_start(void);
38extern void __delayacct_blkio_end(void);
39extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
40extern __u64 __delayacct_blkio_ticks(struct task_struct *);
41
42static inline void delayacct_set_flag(int flag)
43{
44 if (current->delays)
45 current->delays->flags |= flag;
46}
47
48static inline void delayacct_clear_flag(int flag)
49{
50 if (current->delays)
51 current->delays->flags &= ~flag;
52}
53
54static inline void delayacct_tsk_init(struct task_struct *tsk)
55{
56 /* reinitialize in case parent's non-null pointer was dup'ed*/
57 tsk->delays = NULL;
58 if (unlikely(delayacct_on))
59 __delayacct_tsk_init(tsk);
60}
61
62static inline void delayacct_tsk_exit(struct task_struct *tsk)
63{
64 if (tsk->delays)
65 __delayacct_tsk_exit(tsk);
66}
67
68static inline void delayacct_blkio_start(void)
69{
70 if (current->delays)
71 __delayacct_blkio_start();
72}
73
74static inline void delayacct_blkio_end(void)
75{
76 if (current->delays)
77 __delayacct_blkio_end();
78}
79
80static inline int delayacct_add_tsk(struct taskstats *d,
81 struct task_struct *tsk)
82{
83 if (likely(!delayacct_on))
84 return -EINVAL;
85 if (!tsk->delays)
86 return 0;
87 return __delayacct_add_tsk(d, tsk);
88}
89
90static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
91{
92 if (tsk->delays)
93 return __delayacct_blkio_ticks(tsk);
94 return 0;
95}
96
97#else
98static inline void delayacct_set_flag(int flag)
99{}
100static inline void delayacct_clear_flag(int flag)
101{}
102static inline void delayacct_init(void)
103{}
104static inline void delayacct_tsk_init(struct task_struct *tsk)
105{}
106static inline void delayacct_tsk_exit(struct task_struct *tsk)
107{}
108static inline void delayacct_blkio_start(void)
109{}
110static inline void delayacct_blkio_end(void)
111{}
112static inline int delayacct_add_tsk(struct taskstats *d,
113 struct task_struct *tsk)
114{ return 0; }
115static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
116{ return 0; }
117#endif /* CONFIG_TASK_DELAY_ACCT */
118
119#endif
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 34c3a215f2cd..d097b5b72bc6 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -96,7 +96,8 @@ struct robust_list_head {
96long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, 96long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
97 u32 __user *uaddr2, u32 val2, u32 val3); 97 u32 __user *uaddr2, u32 val2, u32 val3);
98 98
99extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); 99extern int
100handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
100 101
101#ifdef CONFIG_FUTEX 102#ifdef CONFIG_FUTEX
102extern void exit_robust_list(struct task_struct *curr); 103extern void exit_robust_list(struct task_struct *curr);
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 21338bb3441d..9418519a55d1 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -115,6 +115,7 @@
115#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */ 115#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */
116#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */ 116#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */
117#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */ 117#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */
118#define I2C_DRIVERID_ISL1208 88 /* Intersil ISL1208 RTC */
118 119
119#define I2C_DRIVERID_I2CDEV 900 120#define I2C_DRIVERID_I2CDEV 900
120#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */ 121#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index dc7abef10965..99620451d958 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -571,6 +571,7 @@ typedef struct ide_drive_s {
571 u8 waiting_for_dma; /* dma currently in progress */ 571 u8 waiting_for_dma; /* dma currently in progress */
572 u8 unmask; /* okay to unmask other irqs */ 572 u8 unmask; /* okay to unmask other irqs */
573 u8 bswap; /* byte swap data */ 573 u8 bswap; /* byte swap data */
574 u8 noflush; /* don't attempt flushes */
574 u8 dsc_overlap; /* DSC overlap */ 575 u8 dsc_overlap; /* DSC overlap */
575 u8 nice1; /* give potential excess bandwidth */ 576 u8 nice1; /* give potential excess bandwidth */
576 577
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index eef0876d8307..383627ad328f 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -23,8 +23,8 @@ struct vlan_collection;
23struct vlan_dev_info; 23struct vlan_dev_info;
24struct hlist_node; 24struct hlist_node;
25 25
26#include <linux/proc_fs.h> /* for proc_dir_entry */
27#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28 28
29#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) 29#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header)
30 * that VLAN requires. 30 * that VLAN requires.
@@ -185,7 +185,8 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
185 * This allows the VLAN to have a different MAC than the underlying 185 * This allows the VLAN to have a different MAC than the underlying
186 * device, and still route correctly. 186 * device, and still route correctly.
187 */ 187 */
188 if (!memcmp(eth_hdr(skb)->h_dest, skb->dev->dev_addr, ETH_ALEN)) 188 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
189 skb->dev->dev_addr))
189 skb->pkt_type = PACKET_HOST; 190 skb->pkt_type = PACKET_HOST;
190 break; 191 break;
191 }; 192 };
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 7cce5dfa092f..1c65e7a9f186 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -28,7 +28,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
28 28
29void kthread_bind(struct task_struct *k, unsigned int cpu); 29void kthread_bind(struct task_struct *k, unsigned int cpu);
30int kthread_stop(struct task_struct *k); 30int kthread_stop(struct task_struct *k);
31int kthread_stop_sem(struct task_struct *k, struct semaphore *s);
32int kthread_should_stop(void); 31int kthread_should_stop(void);
33 32
34#endif /* _LINUX_KTHREAD_H */ 33#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6cc497a2b6da..66c3100c2b94 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -265,12 +265,14 @@ enum {
265 265
266 /* ata_eh_info->flags */ 266 /* ata_eh_info->flags */
267 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 267 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
268 ATA_EHI_RESUME_LINK = (1 << 1), /* need to resume link */ 268 ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */
269 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 269 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
270 ATA_EHI_QUIET = (1 << 3), /* be quiet */ 270 ATA_EHI_QUIET = (1 << 3), /* be quiet */
271 271
272 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ 272 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
273 273
274 ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
275
274 /* max repeat if error condition is still set after ->error_handler */ 276 /* max repeat if error condition is still set after ->error_handler */
275 ATA_EH_MAX_REPEAT = 5, 277 ATA_EH_MAX_REPEAT = 5,
276 278
diff --git a/include/linux/list.h b/include/linux/list.h
index 6b74adf5297f..65a5b5ceda49 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -265,6 +265,17 @@ static inline void list_move_tail(struct list_head *list,
265} 265}
266 266
267/** 267/**
268 * list_is_last - tests whether @list is the last entry in list @head
269 * @list: the entry to test
270 * @head: the head of the list
271 */
272static inline int list_is_last(const struct list_head *list,
273 const struct list_head *head)
274{
275 return list->next == head;
276}
277
278/**
268 * list_empty - tests whether a list is empty 279 * list_empty - tests whether a list is empty
269 * @head: the list to test. 280 * @head: the list to test.
270 */ 281 */
diff --git a/include/linux/module.h b/include/linux/module.h
index d06c74fb8c26..0dfb794c52d3 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -362,10 +362,8 @@ int is_module_address(unsigned long addr);
362 362
363/* Returns module and fills in value, defined and namebuf, or NULL if 363/* Returns module and fills in value, defined and namebuf, or NULL if
364 symnum out of range. */ 364 symnum out of range. */
365struct module *module_get_kallsym(unsigned int symnum, 365struct module *module_get_kallsym(unsigned int symnum, unsigned long *value,
366 unsigned long *value, 366 char *type, char *name, size_t namelen);
367 char *type,
368 char namebuf[128]);
369 367
370/* Look for this name: can be of form module:name. */ 368/* Look for this name: can be of form module:name. */
371unsigned long module_kallsyms_lookup_name(const char *name); 369unsigned long module_kallsyms_lookup_name(const char *name);
@@ -535,8 +533,8 @@ static inline const char *module_address_lookup(unsigned long addr,
535 533
536static inline struct module *module_get_kallsym(unsigned int symnum, 534static inline struct module *module_get_kallsym(unsigned int symnum,
537 unsigned long *value, 535 unsigned long *value,
538 char *type, 536 char *type, char *name,
539 char namebuf[128]) 537 size_t namelen)
540{ 538{
541 return NULL; 539 return NULL;
542} 540}
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 58cb3d3d44b4..45511a5918d3 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -11,7 +11,7 @@ struct open_intent {
11 struct file *file; 11 struct file *file;
12}; 12};
13 13
14enum { MAX_NESTED_LINKS = 5 }; 14enum { MAX_NESTED_LINKS = 8 };
15 15
16struct nameidata { 16struct nameidata {
17 struct dentry *dentry; 17 struct dentry *dentry;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 76cc099c8580..75f02d8c6ed3 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -924,10 +924,10 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
924 924
925static inline int netif_tx_trylock(struct net_device *dev) 925static inline int netif_tx_trylock(struct net_device *dev)
926{ 926{
927 int err = spin_trylock(&dev->_xmit_lock); 927 int ok = spin_trylock(&dev->_xmit_lock);
928 if (!err) 928 if (likely(ok))
929 dev->xmit_lock_owner = smp_processor_id(); 929 dev->xmit_lock_owner = smp_processor_id();
930 return err; 930 return ok;
931} 931}
932 932
933static inline void netif_tx_unlock(struct net_device *dev) 933static inline void netif_tx_unlock(struct net_device *dev)
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 87764022cc67..31f02ba036ce 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -79,6 +79,8 @@ struct bridge_skb_cb {
79 __u32 ipv4; 79 __u32 ipv4;
80 } daddr; 80 } daddr;
81}; 81};
82
83extern int brnf_deferred_hooks;
82#endif /* CONFIG_BRIDGE_NETFILTER */ 84#endif /* CONFIG_BRIDGE_NETFILTER */
83 85
84#endif /* __KERNEL__ */ 86#endif /* __KERNEL__ */
diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h
index 135742cfada5..7da0cf3702ee 100644
--- a/include/linux/nsc_gpio.h
+++ b/include/linux/nsc_gpio.h
@@ -25,8 +25,6 @@ struct nsc_gpio_ops {
25 void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor); 25 void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor);
26 int (*gpio_get) (unsigned iminor); 26 int (*gpio_get) (unsigned iminor);
27 void (*gpio_set) (unsigned iminor, int state); 27 void (*gpio_set) (unsigned iminor, int state);
28 void (*gpio_set_high)(unsigned iminor);
29 void (*gpio_set_low) (unsigned iminor);
30 void (*gpio_change) (unsigned iminor); 28 void (*gpio_change) (unsigned iminor);
31 int (*gpio_current) (unsigned iminor); 29 int (*gpio_current) (unsigned iminor);
32 struct device* dev; /* for dev_dbg() support, set in init */ 30 struct device* dev; /* for dev_dbg() support, set in init */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1c876e27ff93..6afa72e080cb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -463,6 +463,10 @@ struct signal_struct {
463#ifdef CONFIG_BSD_PROCESS_ACCT 463#ifdef CONFIG_BSD_PROCESS_ACCT
464 struct pacct_struct pacct; /* per-process accounting information */ 464 struct pacct_struct pacct; /* per-process accounting information */
465#endif 465#endif
466#ifdef CONFIG_TASKSTATS
467 spinlock_t stats_lock;
468 struct taskstats *stats;
469#endif
466}; 470};
467 471
468/* Context switch must be unlocked if interrupts are to be enabled */ 472/* Context switch must be unlocked if interrupts are to be enabled */
@@ -537,7 +541,7 @@ extern struct user_struct root_user;
537struct backing_dev_info; 541struct backing_dev_info;
538struct reclaim_state; 542struct reclaim_state;
539 543
540#ifdef CONFIG_SCHEDSTATS 544#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
541struct sched_info { 545struct sched_info {
542 /* cumulative counters */ 546 /* cumulative counters */
543 unsigned long cpu_time, /* time spent on the cpu */ 547 unsigned long cpu_time, /* time spent on the cpu */
@@ -548,9 +552,53 @@ struct sched_info {
548 unsigned long last_arrival, /* when we last ran on a cpu */ 552 unsigned long last_arrival, /* when we last ran on a cpu */
549 last_queued; /* when we were last queued to run */ 553 last_queued; /* when we were last queued to run */
550}; 554};
555#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
551 556
557#ifdef CONFIG_SCHEDSTATS
552extern struct file_operations proc_schedstat_operations; 558extern struct file_operations proc_schedstat_operations;
559#endif /* CONFIG_SCHEDSTATS */
560
561#ifdef CONFIG_TASK_DELAY_ACCT
562struct task_delay_info {
563 spinlock_t lock;
564 unsigned int flags; /* Private per-task flags */
565
566 /* For each stat XXX, add following, aligned appropriately
567 *
568 * struct timespec XXX_start, XXX_end;
569 * u64 XXX_delay;
570 * u32 XXX_count;
571 *
572 * Atomicity of updates to XXX_delay, XXX_count protected by
573 * single lock above (split into XXX_lock if contention is an issue).
574 */
575
576 /*
577 * XXX_count is incremented on every XXX operation, the delay
578 * associated with the operation is added to XXX_delay.
579 * XXX_delay contains the accumulated delay time in nanoseconds.
580 */
581 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
582 u64 blkio_delay; /* wait for sync block io completion */
583 u64 swapin_delay; /* wait for swapin block io completion */
584 u32 blkio_count; /* total count of the number of sync block */
585 /* io operations performed */
586 u32 swapin_count; /* total count of the number of swapin block */
587 /* io operations performed */
588};
589#endif /* CONFIG_TASK_DELAY_ACCT */
590
591static inline int sched_info_on(void)
592{
593#ifdef CONFIG_SCHEDSTATS
594 return 1;
595#elif defined(CONFIG_TASK_DELAY_ACCT)
596 extern int delayacct_on;
597 return delayacct_on;
598#else
599 return 0;
553#endif 600#endif
601}
554 602
555enum idle_type 603enum idle_type
556{ 604{
@@ -747,7 +795,7 @@ struct task_struct {
747 cpumask_t cpus_allowed; 795 cpumask_t cpus_allowed;
748 unsigned int time_slice, first_time_slice; 796 unsigned int time_slice, first_time_slice;
749 797
750#ifdef CONFIG_SCHEDSTATS 798#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
751 struct sched_info sched_info; 799 struct sched_info sched_info;
752#endif 800#endif
753 801
@@ -945,6 +993,10 @@ struct task_struct {
945 * cache last used pipe for splice 993 * cache last used pipe for splice
946 */ 994 */
947 struct pipe_inode_info *splice_pipe; 995 struct pipe_inode_info *splice_pipe;
996#ifdef CONFIG_TASK_DELAY_ACCT
997 spinlock_t delays_lock;
998 struct task_delay_info *delays;
999#endif
948}; 1000};
949 1001
950static inline pid_t process_group(struct task_struct *tsk) 1002static inline pid_t process_group(struct task_struct *tsk)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0bf31b83578c..4307e764ef0a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1066,9 +1066,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
1066 kfree_skb(skb); 1066 kfree_skb(skb);
1067} 1067}
1068 1068
1069#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
1070/** 1069/**
1071 * __dev_alloc_skb - allocate an skbuff for sending 1070 * __dev_alloc_skb - allocate an skbuff for receiving
1072 * @length: length to allocate 1071 * @length: length to allocate
1073 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1072 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1074 * 1073 *
@@ -1087,12 +1086,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1087 skb_reserve(skb, NET_SKB_PAD); 1086 skb_reserve(skb, NET_SKB_PAD);
1088 return skb; 1087 return skb;
1089} 1088}
1090#else
1091extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
1092#endif
1093 1089
1094/** 1090/**
1095 * dev_alloc_skb - allocate an skbuff for sending 1091 * dev_alloc_skb - allocate an skbuff for receiving
1096 * @length: length to allocate 1092 * @length: length to allocate
1097 * 1093 *
1098 * Allocate a new &sk_buff and assign it a usage count of one. The 1094 * Allocate a new &sk_buff and assign it a usage count of one. The
diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h
new file mode 100644
index 000000000000..f1cb6cddd19d
--- /dev/null
+++ b/include/linux/taskstats.h
@@ -0,0 +1,137 @@
1/* taskstats.h - exporting per-task statistics
2 *
3 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
4 * (C) Balbir Singh, IBM Corp. 2006
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2.1 of the GNU Lesser General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 */
14
15#ifndef _LINUX_TASKSTATS_H
16#define _LINUX_TASKSTATS_H
17
18/* Format for per-task data returned to userland when
19 * - a task exits
20 * - listener requests stats for a task
21 *
22 * The struct is versioned. Newer versions should only add fields to
23 * the bottom of the struct to maintain backward compatibility.
24 *
25 *
26 * To add new fields
27 * a) bump up TASKSTATS_VERSION
28 * b) add comment indicating new version number at end of struct
29 * c) add new fields after version comment; maintain 64-bit alignment
30 */
31
32#define TASKSTATS_VERSION 1
33
34struct taskstats {
35
36 /* Version 1 */
37 __u16 version;
38 __u16 padding[3]; /* Userspace should not interpret the padding
39 * field which can be replaced by useful
40 * fields if struct taskstats is extended.
41 */
42
43 /* Delay accounting fields start
44 *
45 * All values, until comment "Delay accounting fields end" are
46 * available only if delay accounting is enabled, even though the last
47 * few fields are not delays
48 *
49 * xxx_count is the number of delay values recorded
50 * xxx_delay_total is the corresponding cumulative delay in nanoseconds
51 *
52 * xxx_delay_total wraps around to zero on overflow
53 * xxx_count incremented regardless of overflow
54 */
55
56 /* Delay waiting for cpu, while runnable
57 * count, delay_total NOT updated atomically
58 */
59 __u64 cpu_count;
60 __u64 cpu_delay_total;
61
62 /* Following four fields atomically updated using task->delays->lock */
63
64 /* Delay waiting for synchronous block I/O to complete
65 * does not account for delays in I/O submission
66 */
67 __u64 blkio_count;
68 __u64 blkio_delay_total;
69
70 /* Delay waiting for page fault I/O (swap in only) */
71 __u64 swapin_count;
72 __u64 swapin_delay_total;
73
74 /* cpu "wall-clock" running time
75 * On some architectures, value will adjust for cpu time stolen
76 * from the kernel in involuntary waits due to virtualization.
77 * Value is cumulative, in nanoseconds, without a corresponding count
78 * and wraps around to zero silently on overflow
79 */
80 __u64 cpu_run_real_total;
81
82 /* cpu "virtual" running time
83 * Uses time intervals seen by the kernel i.e. no adjustment
84 * for kernel's involuntary waits due to virtualization.
85 * Value is cumulative, in nanoseconds, without a corresponding count
86 * and wraps around to zero silently on overflow
87 */
88 __u64 cpu_run_virtual_total;
89 /* Delay accounting fields end */
90 /* version 1 ends here */
91};
92
93
94/*
95 * Commands sent from userspace
96 * Not versioned. New commands should only be inserted at the enum's end
97 * prior to __TASKSTATS_CMD_MAX
98 */
99
100enum {
101 TASKSTATS_CMD_UNSPEC = 0, /* Reserved */
102 TASKSTATS_CMD_GET, /* user->kernel request/get-response */
103 TASKSTATS_CMD_NEW, /* kernel->user event */
104 __TASKSTATS_CMD_MAX,
105};
106
107#define TASKSTATS_CMD_MAX (__TASKSTATS_CMD_MAX - 1)
108
109enum {
110 TASKSTATS_TYPE_UNSPEC = 0, /* Reserved */
111 TASKSTATS_TYPE_PID, /* Process id */
112 TASKSTATS_TYPE_TGID, /* Thread group id */
113 TASKSTATS_TYPE_STATS, /* taskstats structure */
114 TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */
115 TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */
116 __TASKSTATS_TYPE_MAX,
117};
118
119#define TASKSTATS_TYPE_MAX (__TASKSTATS_TYPE_MAX - 1)
120
121enum {
122 TASKSTATS_CMD_ATTR_UNSPEC = 0,
123 TASKSTATS_CMD_ATTR_PID,
124 TASKSTATS_CMD_ATTR_TGID,
125 TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
126 TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
127 __TASKSTATS_CMD_ATTR_MAX,
128};
129
130#define TASKSTATS_CMD_ATTR_MAX (__TASKSTATS_CMD_ATTR_MAX - 1)
131
132/* NETLINK_GENERIC related info */
133
134#define TASKSTATS_GENL_NAME "TASKSTATS"
135#define TASKSTATS_GENL_VERSION 0x1
136
137#endif /* _LINUX_TASKSTATS_H */
diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h
new file mode 100644
index 000000000000..16894b7edcc8
--- /dev/null
+++ b/include/linux/taskstats_kern.h
@@ -0,0 +1,89 @@
1/* taskstats_kern.h - kernel header for per-task statistics interface
2 *
3 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
4 * (C) Balbir Singh, IBM Corp. 2006
5 */
6
7#ifndef _LINUX_TASKSTATS_KERN_H
8#define _LINUX_TASKSTATS_KERN_H
9
10#include <linux/taskstats.h>
11#include <linux/sched.h>
12#include <net/genetlink.h>
13
14#ifdef CONFIG_TASKSTATS
15extern kmem_cache_t *taskstats_cache;
16extern struct mutex taskstats_exit_mutex;
17
18static inline void taskstats_exit_free(struct taskstats *tidstats)
19{
20 if (tidstats)
21 kmem_cache_free(taskstats_cache, tidstats);
22}
23
24static inline void taskstats_tgid_init(struct signal_struct *sig)
25{
26 spin_lock_init(&sig->stats_lock);
27 sig->stats = NULL;
28}
29
30static inline void taskstats_tgid_alloc(struct signal_struct *sig)
31{
32 struct taskstats *stats;
33 unsigned long flags;
34
35 stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
36 if (!stats)
37 return;
38
39 spin_lock_irqsave(&sig->stats_lock, flags);
40 if (!sig->stats) {
41 sig->stats = stats;
42 stats = NULL;
43 }
44 spin_unlock_irqrestore(&sig->stats_lock, flags);
45
46 if (stats)
47 kmem_cache_free(taskstats_cache, stats);
48}
49
50static inline void taskstats_tgid_free(struct signal_struct *sig)
51{
52 struct taskstats *stats = NULL;
53 unsigned long flags;
54
55 spin_lock_irqsave(&sig->stats_lock, flags);
56 if (sig->stats) {
57 stats = sig->stats;
58 sig->stats = NULL;
59 }
60 spin_unlock_irqrestore(&sig->stats_lock, flags);
61 if (stats)
62 kmem_cache_free(taskstats_cache, stats);
63}
64
65extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
66extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
67extern void taskstats_init_early(void);
68extern void taskstats_tgid_alloc(struct signal_struct *);
69#else
70static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
71{}
72static inline void taskstats_exit_free(struct taskstats *ptidstats)
73{}
74static inline void taskstats_exit_send(struct task_struct *tsk,
75 struct taskstats *tidstats,
76 int group_dead, unsigned int cpu)
77{}
78static inline void taskstats_tgid_init(struct signal_struct *sig)
79{}
80static inline void taskstats_tgid_alloc(struct signal_struct *sig)
81{}
82static inline void taskstats_tgid_free(struct signal_struct *sig)
83{}
84static inline void taskstats_init_early(void)
85{}
86#endif /* CONFIG_TASKSTATS */
87
88#endif
89
diff --git a/include/linux/time.h b/include/linux/time.h
index c05f8bb9a323..a5b739967b74 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -71,6 +71,18 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon,
71extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); 71extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
72 72
73/* 73/*
74 * sub = lhs - rhs, in normalized form
75 */
76static inline struct timespec timespec_sub(struct timespec lhs,
77 struct timespec rhs)
78{
79 struct timespec ts_delta;
80 set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
81 lhs.tv_nsec - rhs.tv_nsec);
82 return ts_delta;
83}
84
85/*
74 * Returns true if the timespec is norm, false if denorm: 86 * Returns true if the timespec is norm, false if denorm:
75 */ 87 */
76#define timespec_valid(ts) \ 88#define timespec_valid(ts) \
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index f6024ab4eff0..71b6363caaaf 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -11,6 +11,7 @@ struct vm_area_struct;
11#define VM_ALLOC 0x00000002 /* vmalloc() */ 11#define VM_ALLOC 0x00000002 /* vmalloc() */
12#define VM_MAP 0x00000004 /* vmap()ed pages */ 12#define VM_MAP 0x00000004 /* vmap()ed pages */
13#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 13#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
14#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
14/* bits [20..32] reserved for arch specific ioremap internals */ 15/* bits [20..32] reserved for arch specific ioremap internals */
15 16
16/* 17/*
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 805de50df00d..8c2287264266 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -150,4 +150,24 @@ static inline int genlmsg_unicast(struct sk_buff *skb, u32 pid)
150 return nlmsg_unicast(genl_sock, skb, pid); 150 return nlmsg_unicast(genl_sock, skb, pid);
151} 151}
152 152
153/**
154 * gennlmsg_data - head of message payload
155 * @gnlh: genetlink messsage header
156 */
157static inline void *genlmsg_data(const struct genlmsghdr *gnlh)
158{
159 return ((unsigned char *) gnlh + GENL_HDRLEN);
160}
161
162/**
163 * genlmsg_len - length of message payload
164 * @gnlh: genetlink message header
165 */
166static inline int genlmsg_len(const struct genlmsghdr *gnlh)
167{
168 struct nlmsghdr *nlh = (struct nlmsghdr *)((unsigned char *)gnlh -
169 NLMSG_HDRLEN);
170 return (nlh->nlmsg_len - GENL_HDRLEN - NLMSG_HDRLEN);
171}
172
153#endif /* __NET_GENERIC_NETLINK_H */ 173#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/netdma.h b/include/net/netdma.h
index 19760eb131aa..ceae5ee85c04 100644
--- a/include/net/netdma.h
+++ b/include/net/netdma.h
@@ -37,7 +37,7 @@ static inline struct dma_chan *get_softnet_dma(void)
37} 37}
38 38
39int dma_skb_copy_datagram_iovec(struct dma_chan* chan, 39int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
40 const struct sk_buff *skb, int offset, struct iovec *to, 40 struct sk_buff *skb, int offset, struct iovec *to,
41 size_t len, struct dma_pinned_list *pinned_list); 41 size_t len, struct dma_pinned_list *pinned_list);
42 42
43#endif /* CONFIG_NET_DMA */ 43#endif /* CONFIG_NET_DMA */
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 1925c65e617b..f6afee73235d 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -169,23 +169,17 @@ psched_tod_diff(int delta_sec, int bound)
169 169
170#define PSCHED_TADD2(tv, delta, tv_res) \ 170#define PSCHED_TADD2(tv, delta, tv_res) \
171({ \ 171({ \
172 int __delta = (delta); \ 172 int __delta = (tv).tv_usec + (delta); \
173 (tv_res) = (tv); \ 173 (tv_res).tv_sec = (tv).tv_sec; \
174 while(__delta >= USEC_PER_SEC){ \ 174 while (__delta >= USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \
175 (tv_res).tv_sec++; \
176 __delta -= USEC_PER_SEC; \
177 } \
178 (tv_res).tv_usec = __delta; \ 175 (tv_res).tv_usec = __delta; \
179}) 176})
180 177
181#define PSCHED_TADD(tv, delta) \ 178#define PSCHED_TADD(tv, delta) \
182({ \ 179({ \
183 int __delta = (delta); \ 180 (tv).tv_usec += (delta); \
184 while(__delta >= USEC_PER_SEC){ \ 181 while ((tv).tv_usec >= USEC_PER_SEC) { (tv).tv_sec++; \
185 (tv).tv_sec++; \ 182 (tv).tv_usec -= USEC_PER_SEC; } \
186 __delta -= USEC_PER_SEC; \
187 } \
188 (tv).tv_usec = __delta; \
189}) 183})
190 184
191/* Set/check that time is in the "past perfect"; 185/* Set/check that time is in the "past perfect";
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 5f69158c1006..e5aa7ff1f5b5 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -445,6 +445,7 @@ typedef struct sctp_sender_hb_info {
445 struct sctp_paramhdr param_hdr; 445 struct sctp_paramhdr param_hdr;
446 union sctp_addr daddr; 446 union sctp_addr daddr;
447 unsigned long sent_at; 447 unsigned long sent_at;
448 __u64 hb_nonce;
448} __attribute__((packed)) sctp_sender_hb_info_t; 449} __attribute__((packed)) sctp_sender_hb_info_t;
449 450
450/* 451/*
@@ -730,13 +731,10 @@ void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *,
730const union sctp_addr *sctp_source(const struct sctp_chunk *chunk); 731const union sctp_addr *sctp_source(const struct sctp_chunk *chunk);
731 732
732/* This is a structure for holding either an IPv6 or an IPv4 address. */ 733/* This is a structure for holding either an IPv6 or an IPv4 address. */
733/* sin_family -- AF_INET or AF_INET6
734 * sin_port -- ordinary port number
735 * sin_addr -- cast to either (struct in_addr) or (struct in6_addr)
736 */
737struct sctp_sockaddr_entry { 734struct sctp_sockaddr_entry {
738 struct list_head list; 735 struct list_head list;
739 union sctp_addr a; 736 union sctp_addr a;
737 __u8 use_as_src;
740}; 738};
741 739
742typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *); 740typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *);
@@ -984,6 +982,9 @@ struct sctp_transport {
984 */ 982 */
985 char cacc_saw_newack; 983 char cacc_saw_newack;
986 } cacc; 984 } cacc;
985
986 /* 64-bit random number sent with heartbeat. */
987 __u64 hb_nonce;
987}; 988};
988 989
989struct sctp_transport *sctp_transport_new(const union sctp_addr *, 990struct sctp_transport *sctp_transport_new(const union sctp_addr *,
@@ -1138,7 +1139,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
1138 sctp_scope_t scope, gfp_t gfp, 1139 sctp_scope_t scope, gfp_t gfp,
1139 int flags); 1140 int flags);
1140int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1141int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
1141 gfp_t gfp); 1142 __u8 use_as_src, gfp_t gfp);
1142int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); 1143int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
1143int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, 1144int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
1144 struct sctp_sock *); 1145 struct sctp_sock *);
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 8a6bef6f91eb..1b7aae6cdd82 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -560,9 +560,18 @@ struct sctp_paddrinfo {
560} __attribute__((packed, aligned(4))); 560} __attribute__((packed, aligned(4)));
561 561
562/* Peer addresses's state. */ 562/* Peer addresses's state. */
563/* UNKNOWN: Peer address passed by the upper layer in sendmsg or connect[x]
564 * calls.
565 * UNCONFIRMED: Peer address received in INIT/INIT-ACK address parameters.
566 * Not yet confirmed by a heartbeat and not available for data
567 * transfers.
568 * ACTIVE : Peer address confirmed, active and available for data transfers.
569 * INACTIVE: Peer address inactive and not available for data transfers.
570 */
563enum sctp_spinfo_state { 571enum sctp_spinfo_state {
564 SCTP_INACTIVE, 572 SCTP_INACTIVE,
565 SCTP_ACTIVE, 573 SCTP_ACTIVE,
574 SCTP_UNCONFIRMED,
566 SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */ 575 SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */
567}; 576};
568 577
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index fcb5ba87dcc5..0ff67398928d 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -89,9 +89,10 @@ static inline void ib_addr_set_pkey(struct rdma_dev_addr *dev_addr, u16 pkey)
89 dev_addr->broadcast[9] = (unsigned char) pkey; 89 dev_addr->broadcast[9] = (unsigned char) pkey;
90} 90}
91 91
92static inline union ib_gid *ib_addr_get_sgid(struct rdma_dev_addr *dev_addr) 92static inline void ib_addr_get_sgid(struct rdma_dev_addr *dev_addr,
93 union ib_gid *gid)
93{ 94{
94 return (union ib_gid *) (dev_addr->src_dev_addr + 4); 95 memcpy(gid, dev_addr->src_dev_addr + 4, sizeof *gid);
95} 96}
96 97
97static inline void ib_addr_set_sgid(struct rdma_dev_addr *dev_addr, 98static inline void ib_addr_set_sgid(struct rdma_dev_addr *dev_addr,
@@ -100,9 +101,10 @@ static inline void ib_addr_set_sgid(struct rdma_dev_addr *dev_addr,
100 memcpy(dev_addr->src_dev_addr + 4, gid, sizeof *gid); 101 memcpy(dev_addr->src_dev_addr + 4, gid, sizeof *gid);
101} 102}
102 103
103static inline union ib_gid *ib_addr_get_dgid(struct rdma_dev_addr *dev_addr) 104static inline void ib_addr_get_dgid(struct rdma_dev_addr *dev_addr,
105 union ib_gid *gid)
104{ 106{
105 return (union ib_gid *) (dev_addr->dst_dev_addr + 4); 107 memcpy(gid, dev_addr->dst_dev_addr + 4, sizeof *gid);
106} 108}
107 109
108static inline void ib_addr_set_dgid(struct rdma_dev_addr *dev_addr, 110static inline void ib_addr_set_dgid(struct rdma_dev_addr *dev_addr,
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
index 4ace54cd0cce..00dadbf94e1d 100644
--- a/include/rdma/ib_fmr_pool.h
+++ b/include/rdma/ib_fmr_pool.h
@@ -88,7 +88,7 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
88struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, 88struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
89 u64 *page_list, 89 u64 *page_list,
90 int list_len, 90 int list_len,
91 u64 *io_virtual_address); 91 u64 io_virtual_address);
92 92
93int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr); 93int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
94 94
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 5ff77558013b..585d28e960dd 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -75,6 +75,7 @@
75#define IB_MGMT_METHOD_TRAP_REPRESS 0x07 75#define IB_MGMT_METHOD_TRAP_REPRESS 0x07
76 76
77#define IB_MGMT_METHOD_RESP 0x80 77#define IB_MGMT_METHOD_RESP 0x80
78#define IB_BM_ATTR_MOD_RESP cpu_to_be32(1)
78 79
79#define IB_MGMT_MAX_METHODS 128 80#define IB_MGMT_MAX_METHODS 128
80 81
@@ -247,6 +248,12 @@ struct ib_mad_send_buf {
247}; 248};
248 249
249/** 250/**
251 * ib_response_mad - Returns if the specified MAD has been generated in
252 * response to a sent request or trap.
253 */
254int ib_response_mad(struct ib_mad *mad);
255
256/**
250 * ib_get_rmpp_resptime - Returns the RMPP response time. 257 * ib_get_rmpp_resptime - Returns the RMPP response time.
251 * @rmpp_hdr: An RMPP header. 258 * @rmpp_hdr: An RMPP header.
252 */ 259 */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 371f70d9aa92..58e6444eebee 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -58,9 +58,7 @@ struct scsi_cmnd {
58 int timeout_per_command; 58 int timeout_per_command;
59 59
60 unsigned char cmd_len; 60 unsigned char cmd_len;
61 unsigned char old_cmd_len;
62 enum dma_data_direction sc_data_direction; 61 enum dma_data_direction sc_data_direction;
63 enum dma_data_direction sc_old_data_direction;
64 62
65 /* These elements define the operation we are about to perform */ 63 /* These elements define the operation we are about to perform */
66#define MAX_COMMAND_SIZE 16 64#define MAX_COMMAND_SIZE 16
@@ -71,18 +69,11 @@ struct scsi_cmnd {
71 void *request_buffer; /* Actual requested buffer */ 69 void *request_buffer; /* Actual requested buffer */
72 70
73 /* These elements define the operation we ultimately want to perform */ 71 /* These elements define the operation we ultimately want to perform */
74 unsigned char data_cmnd[MAX_COMMAND_SIZE];
75 unsigned short old_use_sg; /* We save use_sg here when requesting
76 * sense info */
77 unsigned short use_sg; /* Number of pieces of scatter-gather */ 72 unsigned short use_sg; /* Number of pieces of scatter-gather */
78 unsigned short sglist_len; /* size of malloc'd scatter-gather list */ 73 unsigned short sglist_len; /* size of malloc'd scatter-gather list */
79 unsigned bufflen; /* Size of data buffer */
80 void *buffer; /* Data buffer */
81 74
82 unsigned underflow; /* Return error if less than 75 unsigned underflow; /* Return error if less than
83 this amount is transferred */ 76 this amount is transferred */
84 unsigned old_underflow; /* save underflow here when reusing the
85 * command for error handling */
86 77
87 unsigned transfersize; /* How much we are guaranteed to 78 unsigned transfersize; /* How much we are guaranteed to
88 transfer with each SCSI transfer 79 transfer with each SCSI transfer
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index e3c503cd175e..6cc2314098cf 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -106,6 +106,7 @@ struct sas_end_device {
106 106
107struct sas_expander_device { 107struct sas_expander_device {
108 int level; 108 int level;
109 int next_port_id;
109 110
110 #define SAS_EXPANDER_VENDOR_ID_LEN 8 111 #define SAS_EXPANDER_VENDOR_ID_LEN 8
111 char vendor_id[SAS_EXPANDER_VENDOR_ID_LEN+1]; 112 char vendor_id[SAS_EXPANDER_VENDOR_ID_LEN+1];
@@ -127,8 +128,10 @@ struct sas_expander_device {
127struct sas_port { 128struct sas_port {
128 struct device dev; 129 struct device dev;
129 130
130 u8 port_identifier; 131 int port_identifier;
131 int num_phys; 132 int num_phys;
133 /* port flags */
134 unsigned int is_backlink:1;
132 135
133 /* the other end of the link */ 136 /* the other end of the link */
134 struct sas_rphy *rphy; 137 struct sas_rphy *rphy;
@@ -168,11 +171,13 @@ extern void sas_rphy_delete(struct sas_rphy *);
168extern int scsi_is_sas_rphy(const struct device *); 171extern int scsi_is_sas_rphy(const struct device *);
169 172
170struct sas_port *sas_port_alloc(struct device *, int); 173struct sas_port *sas_port_alloc(struct device *, int);
174struct sas_port *sas_port_alloc_num(struct device *);
171int sas_port_add(struct sas_port *); 175int sas_port_add(struct sas_port *);
172void sas_port_free(struct sas_port *); 176void sas_port_free(struct sas_port *);
173void sas_port_delete(struct sas_port *); 177void sas_port_delete(struct sas_port *);
174void sas_port_add_phy(struct sas_port *, struct sas_phy *); 178void sas_port_add_phy(struct sas_port *, struct sas_phy *);
175void sas_port_delete_phy(struct sas_port *, struct sas_phy *); 179void sas_port_delete_phy(struct sas_port *, struct sas_phy *);
180void sas_port_mark_backlink(struct sas_port *);
176int scsi_is_sas_port(const struct device *); 181int scsi_is_sas_port(const struct device *);
177 182
178extern struct scsi_transport_template * 183extern struct scsi_transport_template *
diff --git a/include/video/mbxfb.h b/include/video/mbxfb.h
new file mode 100644
index 000000000000..3bde0f5cd55c
--- /dev/null
+++ b/include/video/mbxfb.h
@@ -0,0 +1,28 @@
1#ifndef __MBX_FB_H
2#define __MBX_FB_H
3
4struct mbxfb_val {
5 unsigned int defval;
6 unsigned int min;
7 unsigned int max;
8};
9
10struct fb_info;
11
12struct mbxfb_platform_data {
13 /* Screen info */
14 struct mbxfb_val xres;
15 struct mbxfb_val yres;
16 struct mbxfb_val bpp;
17
18 /* Memory info */
19 unsigned long memsize; /* if 0 use ODFB? */
20 unsigned long timings1;
21 unsigned long timings2;
22 unsigned long timings3;
23
24 int (*probe)(struct fb_info *fb);
25 int (*remove)(struct fb_info *fb);
26};
27
28#endif /* __MBX_FB_H */
diff --git a/init/Kconfig b/init/Kconfig
index a5b073a103e7..a099fc6526d9 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -158,6 +158,30 @@ config BSD_PROCESS_ACCT_V3
158 for processing it. A preliminary version of these tools is available 158 for processing it. A preliminary version of these tools is available
159 at <http://www.physik3.uni-rostock.de/tim/kernel/utils/acct/>. 159 at <http://www.physik3.uni-rostock.de/tim/kernel/utils/acct/>.
160 160
161config TASKSTATS
162 bool "Export task/process statistics through netlink (EXPERIMENTAL)"
163 depends on NET
164 default n
165 help
166 Export selected statistics for tasks/processes through the
167 generic netlink interface. Unlike BSD process accounting, the
168 statistics are available during the lifetime of tasks/processes as
169 responses to commands. Like BSD accounting, they are sent to user
170 space on task exit.
171
172 Say N if unsure.
173
174config TASK_DELAY_ACCT
175 bool "Enable per-task delay accounting (EXPERIMENTAL)"
176 depends on TASKSTATS
177 help
178 Collect information on time spent by a task waiting for system
179 resources like cpu, synchronous block I/O completion and swapping
180 in pages. Such statistics can help in setting a task's priorities
181 relative to other tasks for cpu, io, rss limits etc.
182
183 Say N if unsure.
184
161config SYSCTL 185config SYSCTL
162 bool "Sysctl support" if EMBEDDED 186 bool "Sysctl support" if EMBEDDED
163 default y 187 default y
diff --git a/init/main.c b/init/main.c
index 628b8e9e841a..8651a720a092 100644
--- a/init/main.c
+++ b/init/main.c
@@ -41,6 +41,8 @@
41#include <linux/cpu.h> 41#include <linux/cpu.h>
42#include <linux/cpuset.h> 42#include <linux/cpuset.h>
43#include <linux/efi.h> 43#include <linux/efi.h>
44#include <linux/taskstats_kern.h>
45#include <linux/delayacct.h>
44#include <linux/unistd.h> 46#include <linux/unistd.h>
45#include <linux/rmap.h> 47#include <linux/rmap.h>
46#include <linux/mempolicy.h> 48#include <linux/mempolicy.h>
@@ -574,6 +576,8 @@ asmlinkage void __init start_kernel(void)
574 proc_root_init(); 576 proc_root_init();
575#endif 577#endif
576 cpuset_init(); 578 cpuset_init();
579 taskstats_init_early();
580 delayacct_init();
577 581
578 check_bugs(); 582 check_bugs();
579 583
diff --git a/kernel/Makefile b/kernel/Makefile
index 47dbcd570cd8..d62ec66c1af2 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -48,6 +48,8 @@ obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
48obj-$(CONFIG_SECCOMP) += seccomp.o 48obj-$(CONFIG_SECCOMP) += seccomp.o
49obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o 49obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
50obj-$(CONFIG_RELAY) += relay.o 50obj-$(CONFIG_RELAY) += relay.o
51obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
52obj-$(CONFIG_TASKSTATS) += taskstats.o
51 53
52ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) 54ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
53# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 55# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/acct.c b/kernel/acct.c
index f18e0b8df3e1..2a7c933651c7 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -488,7 +488,7 @@ static void do_acct_process(struct file *file)
488 old_encode_dev(tty_devnum(current->signal->tty)) : 0; 488 old_encode_dev(tty_devnum(current->signal->tty)) : 0;
489 read_unlock(&tasklist_lock); 489 read_unlock(&tasklist_lock);
490 490
491 spin_lock(&current->sighand->siglock); 491 spin_lock_irq(&current->sighand->siglock);
492 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime))); 492 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime)));
493 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime))); 493 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime)));
494 ac.ac_flag = pacct->ac_flag; 494 ac.ac_flag = pacct->ac_flag;
@@ -496,7 +496,7 @@ static void do_acct_process(struct file *file)
496 ac.ac_minflt = encode_comp_t(pacct->ac_minflt); 496 ac.ac_minflt = encode_comp_t(pacct->ac_minflt);
497 ac.ac_majflt = encode_comp_t(pacct->ac_majflt); 497 ac.ac_majflt = encode_comp_t(pacct->ac_majflt);
498 ac.ac_exitcode = pacct->ac_exitcode; 498 ac.ac_exitcode = pacct->ac_exitcode;
499 spin_unlock(&current->sighand->siglock); 499 spin_unlock_irq(&current->sighand->siglock);
500 ac.ac_io = encode_comp_t(0 /* current->io_usage */); /* %% */ 500 ac.ac_io = encode_comp_t(0 /* current->io_usage */); /* %% */
501 ac.ac_rw = encode_comp_t(ac.ac_io / 1024); 501 ac.ac_rw = encode_comp_t(ac.ac_io / 1024);
502 ac.ac_swaps = encode_comp_t(0); 502 ac.ac_swaps = encode_comp_t(0);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 70fbf2e83766..f230f9ae01c2 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -16,56 +16,48 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17 17
18/* This protects CPUs going up and down... */ 18/* This protects CPUs going up and down... */
19static DEFINE_MUTEX(cpucontrol); 19static DEFINE_MUTEX(cpu_add_remove_lock);
20static DEFINE_MUTEX(cpu_bitmask_lock);
20 21
21static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); 22static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain);
22 23
23#ifdef CONFIG_HOTPLUG_CPU 24#ifdef CONFIG_HOTPLUG_CPU
24static struct task_struct *lock_cpu_hotplug_owner;
25static int lock_cpu_hotplug_depth;
26 25
27static int __lock_cpu_hotplug(int interruptible) 26/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */
28{ 27static struct task_struct *recursive;
29 int ret = 0; 28static int recursive_depth;
30
31 if (lock_cpu_hotplug_owner != current) {
32 if (interruptible)
33 ret = mutex_lock_interruptible(&cpucontrol);
34 else
35 mutex_lock(&cpucontrol);
36 }
37
38 /*
39 * Set only if we succeed in locking
40 */
41 if (!ret) {
42 lock_cpu_hotplug_depth++;
43 lock_cpu_hotplug_owner = current;
44 }
45
46 return ret;
47}
48 29
49void lock_cpu_hotplug(void) 30void lock_cpu_hotplug(void)
50{ 31{
51 __lock_cpu_hotplug(0); 32 struct task_struct *tsk = current;
33
34 if (tsk == recursive) {
35 static int warnings = 10;
36 if (warnings) {
37 printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n");
38 WARN_ON(1);
39 warnings--;
40 }
41 recursive_depth++;
42 return;
43 }
44 mutex_lock(&cpu_bitmask_lock);
45 recursive = tsk;
52} 46}
53EXPORT_SYMBOL_GPL(lock_cpu_hotplug); 47EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
54 48
55void unlock_cpu_hotplug(void) 49void unlock_cpu_hotplug(void)
56{ 50{
57 if (--lock_cpu_hotplug_depth == 0) { 51 WARN_ON(recursive != current);
58 lock_cpu_hotplug_owner = NULL; 52 if (recursive_depth) {
59 mutex_unlock(&cpucontrol); 53 recursive_depth--;
54 return;
60 } 55 }
56 mutex_unlock(&cpu_bitmask_lock);
57 recursive = NULL;
61} 58}
62EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); 59EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
63 60
64int lock_cpu_hotplug_interruptible(void)
65{
66 return __lock_cpu_hotplug(1);
67}
68EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
69#endif /* CONFIG_HOTPLUG_CPU */ 61#endif /* CONFIG_HOTPLUG_CPU */
70 62
71/* Need to know about CPUs going up/down? */ 63/* Need to know about CPUs going up/down? */
@@ -122,9 +114,7 @@ int cpu_down(unsigned int cpu)
122 struct task_struct *p; 114 struct task_struct *p;
123 cpumask_t old_allowed, tmp; 115 cpumask_t old_allowed, tmp;
124 116
125 if ((err = lock_cpu_hotplug_interruptible()) != 0) 117 mutex_lock(&cpu_add_remove_lock);
126 return err;
127
128 if (num_online_cpus() == 1) { 118 if (num_online_cpus() == 1) {
129 err = -EBUSY; 119 err = -EBUSY;
130 goto out; 120 goto out;
@@ -150,7 +140,10 @@ int cpu_down(unsigned int cpu)
150 cpu_clear(cpu, tmp); 140 cpu_clear(cpu, tmp);
151 set_cpus_allowed(current, tmp); 141 set_cpus_allowed(current, tmp);
152 142
143 mutex_lock(&cpu_bitmask_lock);
153 p = __stop_machine_run(take_cpu_down, NULL, cpu); 144 p = __stop_machine_run(take_cpu_down, NULL, cpu);
145 mutex_unlock(&cpu_bitmask_lock);
146
154 if (IS_ERR(p)) { 147 if (IS_ERR(p)) {
155 /* CPU didn't die: tell everyone. Can't complain. */ 148 /* CPU didn't die: tell everyone. Can't complain. */
156 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, 149 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
@@ -187,7 +180,7 @@ out_thread:
187out_allowed: 180out_allowed:
188 set_cpus_allowed(current, old_allowed); 181 set_cpus_allowed(current, old_allowed);
189out: 182out:
190 unlock_cpu_hotplug(); 183 mutex_unlock(&cpu_add_remove_lock);
191 return err; 184 return err;
192} 185}
193#endif /*CONFIG_HOTPLUG_CPU*/ 186#endif /*CONFIG_HOTPLUG_CPU*/
@@ -197,9 +190,7 @@ int __devinit cpu_up(unsigned int cpu)
197 int ret; 190 int ret;
198 void *hcpu = (void *)(long)cpu; 191 void *hcpu = (void *)(long)cpu;
199 192
200 if ((ret = lock_cpu_hotplug_interruptible()) != 0) 193 mutex_lock(&cpu_add_remove_lock);
201 return ret;
202
203 if (cpu_online(cpu) || !cpu_present(cpu)) { 194 if (cpu_online(cpu) || !cpu_present(cpu)) {
204 ret = -EINVAL; 195 ret = -EINVAL;
205 goto out; 196 goto out;
@@ -214,7 +205,9 @@ int __devinit cpu_up(unsigned int cpu)
214 } 205 }
215 206
216 /* Arch-specific enabling code. */ 207 /* Arch-specific enabling code. */
208 mutex_lock(&cpu_bitmask_lock);
217 ret = __cpu_up(cpu); 209 ret = __cpu_up(cpu);
210 mutex_unlock(&cpu_bitmask_lock);
218 if (ret != 0) 211 if (ret != 0)
219 goto out_notify; 212 goto out_notify;
220 BUG_ON(!cpu_online(cpu)); 213 BUG_ON(!cpu_online(cpu));
@@ -227,6 +220,6 @@ out_notify:
227 blocking_notifier_call_chain(&cpu_chain, 220 blocking_notifier_call_chain(&cpu_chain,
228 CPU_UP_CANCELED, hcpu); 221 CPU_UP_CANCELED, hcpu);
229out: 222out:
230 unlock_cpu_hotplug(); 223 mutex_unlock(&cpu_add_remove_lock);
231 return ret; 224 return ret;
232} 225}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c232dc077438..1a649f2bb9bb 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -762,6 +762,8 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
762 * 762 *
763 * Call with manage_mutex held. May nest a call to the 763 * Call with manage_mutex held. May nest a call to the
764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
765 * Must not be called holding callback_mutex, because we must
766 * not call lock_cpu_hotplug() while holding callback_mutex.
765 */ 767 */
766 768
767static void update_cpu_domains(struct cpuset *cur) 769static void update_cpu_domains(struct cpuset *cur)
@@ -781,7 +783,7 @@ static void update_cpu_domains(struct cpuset *cur)
781 if (is_cpu_exclusive(c)) 783 if (is_cpu_exclusive(c))
782 cpus_andnot(pspan, pspan, c->cpus_allowed); 784 cpus_andnot(pspan, pspan, c->cpus_allowed);
783 } 785 }
784 if (is_removed(cur) || !is_cpu_exclusive(cur)) { 786 if (!is_cpu_exclusive(cur)) {
785 cpus_or(pspan, pspan, cur->cpus_allowed); 787 cpus_or(pspan, pspan, cur->cpus_allowed);
786 if (cpus_equal(pspan, cur->cpus_allowed)) 788 if (cpus_equal(pspan, cur->cpus_allowed))
787 return; 789 return;
@@ -1917,6 +1919,17 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1917 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); 1919 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
1918} 1920}
1919 1921
1922/*
1923 * Locking note on the strange update_flag() call below:
1924 *
1925 * If the cpuset being removed is marked cpu_exclusive, then simulate
1926 * turning cpu_exclusive off, which will call update_cpu_domains().
1927 * The lock_cpu_hotplug() call in update_cpu_domains() must not be
1928 * made while holding callback_mutex. Elsewhere the kernel nests
1929 * callback_mutex inside lock_cpu_hotplug() calls. So the reverse
1930 * nesting would risk an ABBA deadlock.
1931 */
1932
1920static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) 1933static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1921{ 1934{
1922 struct cpuset *cs = dentry->d_fsdata; 1935 struct cpuset *cs = dentry->d_fsdata;
@@ -1936,11 +1949,16 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1936 mutex_unlock(&manage_mutex); 1949 mutex_unlock(&manage_mutex);
1937 return -EBUSY; 1950 return -EBUSY;
1938 } 1951 }
1952 if (is_cpu_exclusive(cs)) {
1953 int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");
1954 if (retval < 0) {
1955 mutex_unlock(&manage_mutex);
1956 return retval;
1957 }
1958 }
1939 parent = cs->parent; 1959 parent = cs->parent;
1940 mutex_lock(&callback_mutex); 1960 mutex_lock(&callback_mutex);
1941 set_bit(CS_REMOVED, &cs->flags); 1961 set_bit(CS_REMOVED, &cs->flags);
1942 if (is_cpu_exclusive(cs))
1943 update_cpu_domains(cs);
1944 list_del(&cs->sibling); /* delete my sibling from parent->children */ 1962 list_del(&cs->sibling); /* delete my sibling from parent->children */
1945 spin_lock(&cs->dentry->d_lock); 1963 spin_lock(&cs->dentry->d_lock);
1946 d = dget(cs->dentry); 1964 d = dget(cs->dentry);
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
new file mode 100644
index 000000000000..f05392d64267
--- /dev/null
+++ b/kernel/delayacct.c
@@ -0,0 +1,178 @@
1/* delayacct.c - per-task delay accounting
2 *
3 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 */
15
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/time.h>
19#include <linux/sysctl.h>
20#include <linux/delayacct.h>
21
22int delayacct_on __read_mostly; /* Delay accounting turned on/off */
23kmem_cache_t *delayacct_cache;
24
25static int __init delayacct_setup_enable(char *str)
26{
27 delayacct_on = 1;
28 return 1;
29}
30__setup("delayacct", delayacct_setup_enable);
31
32void delayacct_init(void)
33{
34 delayacct_cache = kmem_cache_create("delayacct_cache",
35 sizeof(struct task_delay_info),
36 0,
37 SLAB_PANIC,
38 NULL, NULL);
39 delayacct_tsk_init(&init_task);
40}
41
42void __delayacct_tsk_init(struct task_struct *tsk)
43{
44 spin_lock_init(&tsk->delays_lock);
45 /* No need to acquire tsk->delays_lock for allocation here unless
46 __delayacct_tsk_init called after tsk is attached to tasklist
47 */
48 tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL);
49 if (tsk->delays)
50 spin_lock_init(&tsk->delays->lock);
51}
52
53void __delayacct_tsk_exit(struct task_struct *tsk)
54{
55 struct task_delay_info *delays = tsk->delays;
56 spin_lock(&tsk->delays_lock);
57 tsk->delays = NULL;
58 spin_unlock(&tsk->delays_lock);
59 kmem_cache_free(delayacct_cache, delays);
60}
61
62/*
63 * Start accounting for a delay statistic using
64 * its starting timestamp (@start)
65 */
66
67static inline void delayacct_start(struct timespec *start)
68{
69 do_posix_clock_monotonic_gettime(start);
70}
71
72/*
73 * Finish delay accounting for a statistic using
74 * its timestamps (@start, @end), accumalator (@total) and @count
75 */
76
77static void delayacct_end(struct timespec *start, struct timespec *end,
78 u64 *total, u32 *count)
79{
80 struct timespec ts;
81 s64 ns;
82
83 do_posix_clock_monotonic_gettime(end);
84 ts = timespec_sub(*end, *start);
85 ns = timespec_to_ns(&ts);
86 if (ns < 0)
87 return;
88
89 spin_lock(&current->delays->lock);
90 *total += ns;
91 (*count)++;
92 spin_unlock(&current->delays->lock);
93}
94
95void __delayacct_blkio_start(void)
96{
97 delayacct_start(&current->delays->blkio_start);
98}
99
100void __delayacct_blkio_end(void)
101{
102 if (current->delays->flags & DELAYACCT_PF_SWAPIN)
103 /* Swapin block I/O */
104 delayacct_end(&current->delays->blkio_start,
105 &current->delays->blkio_end,
106 &current->delays->swapin_delay,
107 &current->delays->swapin_count);
108 else /* Other block I/O */
109 delayacct_end(&current->delays->blkio_start,
110 &current->delays->blkio_end,
111 &current->delays->blkio_delay,
112 &current->delays->blkio_count);
113}
114
115int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
116{
117 s64 tmp;
118 struct timespec ts;
119 unsigned long t1,t2,t3;
120
121 spin_lock(&tsk->delays_lock);
122
123 /* Though tsk->delays accessed later, early exit avoids
124 * unnecessary returning of other data
125 */
126 if (!tsk->delays)
127 goto done;
128
129 tmp = (s64)d->cpu_run_real_total;
130 cputime_to_timespec(tsk->utime + tsk->stime, &ts);
131 tmp += timespec_to_ns(&ts);
132 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
133
134 /*
135 * No locking available for sched_info (and too expensive to add one)
136 * Mitigate by taking snapshot of values
137 */
138 t1 = tsk->sched_info.pcnt;
139 t2 = tsk->sched_info.run_delay;
140 t3 = tsk->sched_info.cpu_time;
141
142 d->cpu_count += t1;
143
144 jiffies_to_timespec(t2, &ts);
145 tmp = (s64)d->cpu_delay_total + timespec_to_ns(&ts);
146 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;
147
148 tmp = (s64)d->cpu_run_virtual_total + (s64)jiffies_to_usecs(t3) * 1000;
149 d->cpu_run_virtual_total =
150 (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp;
151
152 /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
153
154 spin_lock(&tsk->delays->lock);
155 tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
156 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
157 tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
158 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
159 d->blkio_count += tsk->delays->blkio_count;
160 d->swapin_count += tsk->delays->swapin_count;
161 spin_unlock(&tsk->delays->lock);
162
163done:
164 spin_unlock(&tsk->delays_lock);
165 return 0;
166}
167
168__u64 __delayacct_blkio_ticks(struct task_struct *tsk)
169{
170 __u64 ret;
171
172 spin_lock(&tsk->delays->lock);
173 ret = nsec_to_clock_t(tsk->delays->blkio_delay +
174 tsk->delays->swapin_delay);
175 spin_unlock(&tsk->delays->lock);
176 return ret;
177}
178
diff --git a/kernel/exit.c b/kernel/exit.c
index 6664c084783d..dba194a8d416 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -25,6 +25,8 @@
25#include <linux/mount.h> 25#include <linux/mount.h>
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/mempolicy.h> 27#include <linux/mempolicy.h>
28#include <linux/taskstats_kern.h>
29#include <linux/delayacct.h>
28#include <linux/cpuset.h> 30#include <linux/cpuset.h>
29#include <linux/syscalls.h> 31#include <linux/syscalls.h>
30#include <linux/signal.h> 32#include <linux/signal.h>
@@ -843,7 +845,9 @@ static void exit_notify(struct task_struct *tsk)
843fastcall NORET_TYPE void do_exit(long code) 845fastcall NORET_TYPE void do_exit(long code)
844{ 846{
845 struct task_struct *tsk = current; 847 struct task_struct *tsk = current;
848 struct taskstats *tidstats;
846 int group_dead; 849 int group_dead;
850 unsigned int mycpu;
847 851
848 profile_task_exit(tsk); 852 profile_task_exit(tsk);
849 853
@@ -881,6 +885,8 @@ fastcall NORET_TYPE void do_exit(long code)
881 current->comm, current->pid, 885 current->comm, current->pid,
882 preempt_count()); 886 preempt_count());
883 887
888 taskstats_exit_alloc(&tidstats, &mycpu);
889
884 acct_update_integrals(tsk); 890 acct_update_integrals(tsk);
885 if (tsk->mm) { 891 if (tsk->mm) {
886 update_hiwater_rss(tsk->mm); 892 update_hiwater_rss(tsk->mm);
@@ -900,6 +906,10 @@ fastcall NORET_TYPE void do_exit(long code)
900#endif 906#endif
901 if (unlikely(tsk->audit_context)) 907 if (unlikely(tsk->audit_context))
902 audit_free(tsk); 908 audit_free(tsk);
909 taskstats_exit_send(tsk, tidstats, group_dead, mycpu);
910 taskstats_exit_free(tidstats);
911 delayacct_tsk_exit(tsk);
912
903 exit_mm(tsk); 913 exit_mm(tsk);
904 914
905 if (group_dead) 915 if (group_dead)
diff --git a/kernel/fork.c b/kernel/fork.c
index 926e5a68ea9e..1b0f7b1e0881 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -43,6 +43,8 @@
43#include <linux/rmap.h> 43#include <linux/rmap.h>
44#include <linux/acct.h> 44#include <linux/acct.h>
45#include <linux/cn_proc.h> 45#include <linux/cn_proc.h>
46#include <linux/delayacct.h>
47#include <linux/taskstats_kern.h>
46 48
47#include <asm/pgtable.h> 49#include <asm/pgtable.h>
48#include <asm/pgalloc.h> 50#include <asm/pgalloc.h>
@@ -818,6 +820,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
818 if (clone_flags & CLONE_THREAD) { 820 if (clone_flags & CLONE_THREAD) {
819 atomic_inc(&current->signal->count); 821 atomic_inc(&current->signal->count);
820 atomic_inc(&current->signal->live); 822 atomic_inc(&current->signal->live);
823 taskstats_tgid_alloc(current->signal);
821 return 0; 824 return 0;
822 } 825 }
823 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 826 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
@@ -862,6 +865,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
862 INIT_LIST_HEAD(&sig->cpu_timers[0]); 865 INIT_LIST_HEAD(&sig->cpu_timers[0]);
863 INIT_LIST_HEAD(&sig->cpu_timers[1]); 866 INIT_LIST_HEAD(&sig->cpu_timers[1]);
864 INIT_LIST_HEAD(&sig->cpu_timers[2]); 867 INIT_LIST_HEAD(&sig->cpu_timers[2]);
868 taskstats_tgid_init(sig);
865 869
866 task_lock(current->group_leader); 870 task_lock(current->group_leader);
867 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 871 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
@@ -883,6 +887,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
883void __cleanup_signal(struct signal_struct *sig) 887void __cleanup_signal(struct signal_struct *sig)
884{ 888{
885 exit_thread_group_keys(sig); 889 exit_thread_group_keys(sig);
890 taskstats_tgid_free(sig);
886 kmem_cache_free(signal_cachep, sig); 891 kmem_cache_free(signal_cachep, sig);
887} 892}
888 893
@@ -1000,6 +1005,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1000 goto bad_fork_cleanup_put_domain; 1005 goto bad_fork_cleanup_put_domain;
1001 1006
1002 p->did_exec = 0; 1007 p->did_exec = 0;
1008 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
1003 copy_flags(clone_flags, p); 1009 copy_flags(clone_flags, p);
1004 p->pid = pid; 1010 p->pid = pid;
1005 retval = -EFAULT; 1011 retval = -EFAULT;
diff --git a/kernel/futex.c b/kernel/futex.c
index cf0c8e21d1ab..dda2049692a2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -415,15 +415,15 @@ out_unlock:
415 */ 415 */
416void exit_pi_state_list(struct task_struct *curr) 416void exit_pi_state_list(struct task_struct *curr)
417{ 417{
418 struct futex_hash_bucket *hb;
419 struct list_head *next, *head = &curr->pi_state_list; 418 struct list_head *next, *head = &curr->pi_state_list;
420 struct futex_pi_state *pi_state; 419 struct futex_pi_state *pi_state;
420 struct futex_hash_bucket *hb;
421 union futex_key key; 421 union futex_key key;
422 422
423 /* 423 /*
424 * We are a ZOMBIE and nobody can enqueue itself on 424 * We are a ZOMBIE and nobody can enqueue itself on
425 * pi_state_list anymore, but we have to be careful 425 * pi_state_list anymore, but we have to be careful
426 * versus waiters unqueueing themselfs 426 * versus waiters unqueueing themselves:
427 */ 427 */
428 spin_lock_irq(&curr->pi_lock); 428 spin_lock_irq(&curr->pi_lock);
429 while (!list_empty(head)) { 429 while (!list_empty(head)) {
@@ -431,21 +431,24 @@ void exit_pi_state_list(struct task_struct *curr)
431 next = head->next; 431 next = head->next;
432 pi_state = list_entry(next, struct futex_pi_state, list); 432 pi_state = list_entry(next, struct futex_pi_state, list);
433 key = pi_state->key; 433 key = pi_state->key;
434 hb = hash_futex(&key);
434 spin_unlock_irq(&curr->pi_lock); 435 spin_unlock_irq(&curr->pi_lock);
435 436
436 hb = hash_futex(&key);
437 spin_lock(&hb->lock); 437 spin_lock(&hb->lock);
438 438
439 spin_lock_irq(&curr->pi_lock); 439 spin_lock_irq(&curr->pi_lock);
440 /*
441 * We dropped the pi-lock, so re-check whether this
442 * task still owns the PI-state:
443 */
440 if (head->next != next) { 444 if (head->next != next) {
441 spin_unlock(&hb->lock); 445 spin_unlock(&hb->lock);
442 continue; 446 continue;
443 } 447 }
444 448
445 list_del_init(&pi_state->list);
446
447 WARN_ON(pi_state->owner != curr); 449 WARN_ON(pi_state->owner != curr);
448 450 WARN_ON(list_empty(&pi_state->list));
451 list_del_init(&pi_state->list);
449 pi_state->owner = NULL; 452 pi_state->owner = NULL;
450 spin_unlock_irq(&curr->pi_lock); 453 spin_unlock_irq(&curr->pi_lock);
451 454
@@ -470,7 +473,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
470 head = &hb->chain; 473 head = &hb->chain;
471 474
472 list_for_each_entry_safe(this, next, head, list) { 475 list_for_each_entry_safe(this, next, head, list) {
473 if (match_futex (&this->key, &me->key)) { 476 if (match_futex(&this->key, &me->key)) {
474 /* 477 /*
475 * Another waiter already exists - bump up 478 * Another waiter already exists - bump up
476 * the refcount and return its pi_state: 479 * the refcount and return its pi_state:
@@ -482,6 +485,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
482 if (unlikely(!pi_state)) 485 if (unlikely(!pi_state))
483 return -EINVAL; 486 return -EINVAL;
484 487
488 WARN_ON(!atomic_read(&pi_state->refcount));
489
485 atomic_inc(&pi_state->refcount); 490 atomic_inc(&pi_state->refcount);
486 me->pi_state = pi_state; 491 me->pi_state = pi_state;
487 492
@@ -490,10 +495,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
490 } 495 }
491 496
492 /* 497 /*
493 * We are the first waiter - try to look up the real owner and 498 * We are the first waiter - try to look up the real owner and attach
494 * attach the new pi_state to it: 499 * the new pi_state to it, but bail out when the owner died bit is set
500 * and TID = 0:
495 */ 501 */
496 pid = uval & FUTEX_TID_MASK; 502 pid = uval & FUTEX_TID_MASK;
503 if (!pid && (uval & FUTEX_OWNER_DIED))
504 return -ESRCH;
497 p = futex_find_get_task(pid); 505 p = futex_find_get_task(pid);
498 if (!p) 506 if (!p)
499 return -ESRCH; 507 return -ESRCH;
@@ -510,6 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
510 pi_state->key = me->key; 518 pi_state->key = me->key;
511 519
512 spin_lock_irq(&p->pi_lock); 520 spin_lock_irq(&p->pi_lock);
521 WARN_ON(!list_empty(&pi_state->list));
513 list_add(&pi_state->list, &p->pi_state_list); 522 list_add(&pi_state->list, &p->pi_state_list);
514 pi_state->owner = p; 523 pi_state->owner = p;
515 spin_unlock_irq(&p->pi_lock); 524 spin_unlock_irq(&p->pi_lock);
@@ -573,20 +582,29 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
573 * kept enabled while there is PI state around. We must also 582 * kept enabled while there is PI state around. We must also
574 * preserve the owner died bit.) 583 * preserve the owner died bit.)
575 */ 584 */
576 newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; 585 if (!(uval & FUTEX_OWNER_DIED)) {
586 newval = FUTEX_WAITERS | new_owner->pid;
577 587
578 inc_preempt_count(); 588 inc_preempt_count();
579 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); 589 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
580 dec_preempt_count(); 590 dec_preempt_count();
591 if (curval == -EFAULT)
592 return -EFAULT;
593 if (curval != uval)
594 return -EINVAL;
595 }
581 596
582 if (curval == -EFAULT) 597 spin_lock_irq(&pi_state->owner->pi_lock);
583 return -EFAULT; 598 WARN_ON(list_empty(&pi_state->list));
584 if (curval != uval) 599 list_del_init(&pi_state->list);
585 return -EINVAL; 600 spin_unlock_irq(&pi_state->owner->pi_lock);
586 601
587 list_del_init(&pi_state->owner->pi_state_list); 602 spin_lock_irq(&new_owner->pi_lock);
603 WARN_ON(!list_empty(&pi_state->list));
588 list_add(&pi_state->list, &new_owner->pi_state_list); 604 list_add(&pi_state->list, &new_owner->pi_state_list);
589 pi_state->owner = new_owner; 605 pi_state->owner = new_owner;
606 spin_unlock_irq(&new_owner->pi_lock);
607
590 rt_mutex_unlock(&pi_state->pi_mutex); 608 rt_mutex_unlock(&pi_state->pi_mutex);
591 609
592 return 0; 610 return 0;
@@ -1236,6 +1254,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1236 /* Owner died? */ 1254 /* Owner died? */
1237 if (q.pi_state->owner != NULL) { 1255 if (q.pi_state->owner != NULL) {
1238 spin_lock_irq(&q.pi_state->owner->pi_lock); 1256 spin_lock_irq(&q.pi_state->owner->pi_lock);
1257 WARN_ON(list_empty(&q.pi_state->list));
1239 list_del_init(&q.pi_state->list); 1258 list_del_init(&q.pi_state->list);
1240 spin_unlock_irq(&q.pi_state->owner->pi_lock); 1259 spin_unlock_irq(&q.pi_state->owner->pi_lock);
1241 } else 1260 } else
@@ -1244,6 +1263,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1244 q.pi_state->owner = current; 1263 q.pi_state->owner = current;
1245 1264
1246 spin_lock_irq(&current->pi_lock); 1265 spin_lock_irq(&current->pi_lock);
1266 WARN_ON(!list_empty(&q.pi_state->list));
1247 list_add(&q.pi_state->list, &current->pi_state_list); 1267 list_add(&q.pi_state->list, &current->pi_state_list);
1248 spin_unlock_irq(&current->pi_lock); 1268 spin_unlock_irq(&current->pi_lock);
1249 1269
@@ -1427,9 +1447,11 @@ retry_locked:
1427 * again. If it succeeds then we can return without waking 1447 * again. If it succeeds then we can return without waking
1428 * anyone else up: 1448 * anyone else up:
1429 */ 1449 */
1430 inc_preempt_count(); 1450 if (!(uval & FUTEX_OWNER_DIED)) {
1431 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); 1451 inc_preempt_count();
1432 dec_preempt_count(); 1452 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
1453 dec_preempt_count();
1454 }
1433 1455
1434 if (unlikely(uval == -EFAULT)) 1456 if (unlikely(uval == -EFAULT))
1435 goto pi_faulted; 1457 goto pi_faulted;
@@ -1462,9 +1484,11 @@ retry_locked:
1462 /* 1484 /*
1463 * No waiters - kernel unlocks the futex: 1485 * No waiters - kernel unlocks the futex:
1464 */ 1486 */
1465 ret = unlock_futex_pi(uaddr, uval); 1487 if (!(uval & FUTEX_OWNER_DIED)) {
1466 if (ret == -EFAULT) 1488 ret = unlock_futex_pi(uaddr, uval);
1467 goto pi_faulted; 1489 if (ret == -EFAULT)
1490 goto pi_faulted;
1491 }
1468 1492
1469out_unlock: 1493out_unlock:
1470 spin_unlock(&hb->lock); 1494 spin_unlock(&hb->lock);
@@ -1683,9 +1707,9 @@ err_unlock:
1683 * Process a futex-list entry, check whether it's owned by the 1707 * Process a futex-list entry, check whether it's owned by the
1684 * dying task, and do notification if so: 1708 * dying task, and do notification if so:
1685 */ 1709 */
1686int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) 1710int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1687{ 1711{
1688 u32 uval, nval; 1712 u32 uval, nval, mval;
1689 1713
1690retry: 1714retry:
1691 if (get_user(uval, uaddr)) 1715 if (get_user(uval, uaddr))
@@ -1702,21 +1726,45 @@ retry:
1702 * thread-death.) The rest of the cleanup is done in 1726 * thread-death.) The rest of the cleanup is done in
1703 * userspace. 1727 * userspace.
1704 */ 1728 */
1705 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 1729 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1706 uval | FUTEX_OWNER_DIED); 1730 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1731
1707 if (nval == -EFAULT) 1732 if (nval == -EFAULT)
1708 return -1; 1733 return -1;
1709 1734
1710 if (nval != uval) 1735 if (nval != uval)
1711 goto retry; 1736 goto retry;
1712 1737
1713 if (uval & FUTEX_WAITERS) 1738 /*
1714 futex_wake(uaddr, 1); 1739 * Wake robust non-PI futexes here. The wakeup of
1740 * PI futexes happens in exit_pi_state():
1741 */
1742 if (!pi) {
1743 if (uval & FUTEX_WAITERS)
1744 futex_wake(uaddr, 1);
1745 }
1715 } 1746 }
1716 return 0; 1747 return 0;
1717} 1748}
1718 1749
1719/* 1750/*
1751 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1752 */
1753static inline int fetch_robust_entry(struct robust_list __user **entry,
1754 struct robust_list __user **head, int *pi)
1755{
1756 unsigned long uentry;
1757
1758 if (get_user(uentry, (unsigned long *)head))
1759 return -EFAULT;
1760
1761 *entry = (void *)(uentry & ~1UL);
1762 *pi = uentry & 1;
1763
1764 return 0;
1765}
1766
1767/*
1720 * Walk curr->robust_list (very carefully, it's a userspace list!) 1768 * Walk curr->robust_list (very carefully, it's a userspace list!)
1721 * and mark any locks found there dead, and notify any waiters. 1769 * and mark any locks found there dead, and notify any waiters.
1722 * 1770 *
@@ -1726,14 +1774,14 @@ void exit_robust_list(struct task_struct *curr)
1726{ 1774{
1727 struct robust_list_head __user *head = curr->robust_list; 1775 struct robust_list_head __user *head = curr->robust_list;
1728 struct robust_list __user *entry, *pending; 1776 struct robust_list __user *entry, *pending;
1729 unsigned int limit = ROBUST_LIST_LIMIT; 1777 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1730 unsigned long futex_offset; 1778 unsigned long futex_offset;
1731 1779
1732 /* 1780 /*
1733 * Fetch the list head (which was registered earlier, via 1781 * Fetch the list head (which was registered earlier, via
1734 * sys_set_robust_list()): 1782 * sys_set_robust_list()):
1735 */ 1783 */
1736 if (get_user(entry, &head->list.next)) 1784 if (fetch_robust_entry(&entry, &head->list.next, &pi))
1737 return; 1785 return;
1738 /* 1786 /*
1739 * Fetch the relative futex offset: 1787 * Fetch the relative futex offset:
@@ -1744,10 +1792,11 @@ void exit_robust_list(struct task_struct *curr)
1744 * Fetch any possibly pending lock-add first, and handle it 1792 * Fetch any possibly pending lock-add first, and handle it
1745 * if it exists: 1793 * if it exists:
1746 */ 1794 */
1747 if (get_user(pending, &head->list_op_pending)) 1795 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1748 return; 1796 return;
1797
1749 if (pending) 1798 if (pending)
1750 handle_futex_death((void *)pending + futex_offset, curr); 1799 handle_futex_death((void *)pending + futex_offset, curr, pip);
1751 1800
1752 while (entry != &head->list) { 1801 while (entry != &head->list) {
1753 /* 1802 /*
@@ -1756,12 +1805,12 @@ void exit_robust_list(struct task_struct *curr)
1756 */ 1805 */
1757 if (entry != pending) 1806 if (entry != pending)
1758 if (handle_futex_death((void *)entry + futex_offset, 1807 if (handle_futex_death((void *)entry + futex_offset,
1759 curr)) 1808 curr, pi))
1760 return; 1809 return;
1761 /* 1810 /*
1762 * Fetch the next entry in the list: 1811 * Fetch the next entry in the list:
1763 */ 1812 */
1764 if (get_user(entry, &entry->next)) 1813 if (fetch_robust_entry(&entry, &entry->next, &pi))
1765 return; 1814 return;
1766 /* 1815 /*
1767 * Avoid excessively long or circular lists: 1816 * Avoid excessively long or circular lists:
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index d1d92b441fb7..d1aab1a452cc 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -12,6 +12,23 @@
12 12
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14 14
15
16/*
17 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
18 */
19static inline int
20fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
21 compat_uptr_t *head, int *pi)
22{
23 if (get_user(*uentry, head))
24 return -EFAULT;
25
26 *entry = compat_ptr((*uentry) & ~1);
27 *pi = (unsigned int)(*uentry) & 1;
28
29 return 0;
30}
31
15/* 32/*
16 * Walk curr->robust_list (very carefully, it's a userspace list!) 33 * Walk curr->robust_list (very carefully, it's a userspace list!)
17 * and mark any locks found there dead, and notify any waiters. 34 * and mark any locks found there dead, and notify any waiters.
@@ -22,17 +39,16 @@ void compat_exit_robust_list(struct task_struct *curr)
22{ 39{
23 struct compat_robust_list_head __user *head = curr->compat_robust_list; 40 struct compat_robust_list_head __user *head = curr->compat_robust_list;
24 struct robust_list __user *entry, *pending; 41 struct robust_list __user *entry, *pending;
42 unsigned int limit = ROBUST_LIST_LIMIT, pi;
25 compat_uptr_t uentry, upending; 43 compat_uptr_t uentry, upending;
26 unsigned int limit = ROBUST_LIST_LIMIT;
27 compat_long_t futex_offset; 44 compat_long_t futex_offset;
28 45
29 /* 46 /*
30 * Fetch the list head (which was registered earlier, via 47 * Fetch the list head (which was registered earlier, via
31 * sys_set_robust_list()): 48 * sys_set_robust_list()):
32 */ 49 */
33 if (get_user(uentry, &head->list.next)) 50 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
34 return; 51 return;
35 entry = compat_ptr(uentry);
36 /* 52 /*
37 * Fetch the relative futex offset: 53 * Fetch the relative futex offset:
38 */ 54 */
@@ -42,11 +58,11 @@ void compat_exit_robust_list(struct task_struct *curr)
42 * Fetch any possibly pending lock-add first, and handle it 58 * Fetch any possibly pending lock-add first, and handle it
43 * if it exists: 59 * if it exists:
44 */ 60 */
45 if (get_user(upending, &head->list_op_pending)) 61 if (fetch_robust_entry(&upending, &pending,
62 &head->list_op_pending, &pi))
46 return; 63 return;
47 pending = compat_ptr(upending);
48 if (upending) 64 if (upending)
49 handle_futex_death((void *)pending + futex_offset, curr); 65 handle_futex_death((void *)pending + futex_offset, curr, pi);
50 66
51 while (compat_ptr(uentry) != &head->list) { 67 while (compat_ptr(uentry) != &head->list) {
52 /* 68 /*
@@ -55,15 +71,15 @@ void compat_exit_robust_list(struct task_struct *curr)
55 */ 71 */
56 if (entry != pending) 72 if (entry != pending)
57 if (handle_futex_death((void *)entry + futex_offset, 73 if (handle_futex_death((void *)entry + futex_offset,
58 curr)) 74 curr, pi))
59 return; 75 return;
60 76
61 /* 77 /*
62 * Fetch the next entry in the list: 78 * Fetch the next entry in the list:
63 */ 79 */
64 if (get_user(uentry, (compat_uptr_t *)&entry->next)) 80 if (fetch_robust_entry(&uentry, &entry,
81 (compat_uptr_t *)&entry->next, &pi))
65 return; 82 return;
66 entry = compat_ptr(uentry);
67 /* 83 /*
68 * Avoid excessively long or circular lists: 84 * Avoid excessively long or circular lists:
69 */ 85 */
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 39277dd6bf90..ab16a5a4cfe9 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -275,8 +275,8 @@ static void upcase_if_global(struct kallsym_iter *iter)
275static int get_ksymbol_mod(struct kallsym_iter *iter) 275static int get_ksymbol_mod(struct kallsym_iter *iter)
276{ 276{
277 iter->owner = module_get_kallsym(iter->pos - kallsyms_num_syms, 277 iter->owner = module_get_kallsym(iter->pos - kallsyms_num_syms,
278 &iter->value, 278 &iter->value, &iter->type,
279 &iter->type, iter->name); 279 iter->name, sizeof(iter->name));
280 if (iter->owner == NULL) 280 if (iter->owner == NULL)
281 return 0; 281 return 0;
282 282
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 24be714b04c7..4f9c60ef95e8 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -216,23 +216,6 @@ EXPORT_SYMBOL(kthread_bind);
216 */ 216 */
217int kthread_stop(struct task_struct *k) 217int kthread_stop(struct task_struct *k)
218{ 218{
219 return kthread_stop_sem(k, NULL);
220}
221EXPORT_SYMBOL(kthread_stop);
222
223/**
224 * kthread_stop_sem - stop a thread created by kthread_create().
225 * @k: thread created by kthread_create().
226 * @s: semaphore that @k waits on while idle.
227 *
228 * Does essentially the same thing as kthread_stop() above, but wakes
229 * @k by calling up(@s).
230 *
231 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
232 * was never called.
233 */
234int kthread_stop_sem(struct task_struct *k, struct semaphore *s)
235{
236 int ret; 219 int ret;
237 220
238 mutex_lock(&kthread_stop_lock); 221 mutex_lock(&kthread_stop_lock);
@@ -246,10 +229,7 @@ int kthread_stop_sem(struct task_struct *k, struct semaphore *s)
246 229
247 /* Now set kthread_should_stop() to true, and wake it up. */ 230 /* Now set kthread_should_stop() to true, and wake it up. */
248 kthread_stop_info.k = k; 231 kthread_stop_info.k = k;
249 if (s) 232 wake_up_process(k);
250 up(s);
251 else
252 wake_up_process(k);
253 put_task_struct(k); 233 put_task_struct(k);
254 234
255 /* Once it dies, reset stop ptr, gather result and we're done. */ 235 /* Once it dies, reset stop ptr, gather result and we're done. */
@@ -260,7 +240,7 @@ int kthread_stop_sem(struct task_struct *k, struct semaphore *s)
260 240
261 return ret; 241 return ret;
262} 242}
263EXPORT_SYMBOL(kthread_stop_sem); 243EXPORT_SYMBOL(kthread_stop);
264 244
265static __init int helper_init(void) 245static __init int helper_init(void)
266{ 246{
diff --git a/kernel/module.c b/kernel/module.c
index 35e1b1f859d7..2a19cd47c046 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2019,10 +2019,8 @@ const char *module_address_lookup(unsigned long addr,
2019 return NULL; 2019 return NULL;
2020} 2020}
2021 2021
2022struct module *module_get_kallsym(unsigned int symnum, 2022struct module *module_get_kallsym(unsigned int symnum, unsigned long *value,
2023 unsigned long *value, 2023 char *type, char *name, size_t namelen)
2024 char *type,
2025 char namebuf[128])
2026{ 2024{
2027 struct module *mod; 2025 struct module *mod;
2028 2026
@@ -2031,9 +2029,8 @@ struct module *module_get_kallsym(unsigned int symnum,
2031 if (symnum < mod->num_symtab) { 2029 if (symnum < mod->num_symtab) {
2032 *value = mod->symtab[symnum].st_value; 2030 *value = mod->symtab[symnum].st_value;
2033 *type = mod->symtab[symnum].st_info; 2031 *type = mod->symtab[symnum].st_info;
2034 strncpy(namebuf, 2032 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
2035 mod->strtab + mod->symtab[symnum].st_name, 2033 namelen);
2036 127);
2037 mutex_unlock(&module_mutex); 2034 mutex_unlock(&module_mutex);
2038 return mod; 2035 return mod;
2039 } 2036 }
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 494dac872a13..948bd8f643e2 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -275,6 +275,7 @@ static int test_func(void *data)
275 275
276 /* Wait for the next command to be executed */ 276 /* Wait for the next command to be executed */
277 schedule(); 277 schedule();
278 try_to_freeze();
278 279
279 if (signal_pending(current)) 280 if (signal_pending(current))
280 flush_signals(current); 281 flush_signals(current);
diff --git a/kernel/sched.c b/kernel/sched.c
index d714611f1691..b44b9a43b0fc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -51,6 +51,7 @@
51#include <linux/times.h> 51#include <linux/times.h>
52#include <linux/acct.h> 52#include <linux/acct.h>
53#include <linux/kprobes.h> 53#include <linux/kprobes.h>
54#include <linux/delayacct.h>
54#include <asm/tlb.h> 55#include <asm/tlb.h>
55 56
56#include <asm/unistd.h> 57#include <asm/unistd.h>
@@ -501,9 +502,36 @@ struct file_operations proc_schedstat_operations = {
501 .release = single_release, 502 .release = single_release,
502}; 503};
503 504
505/*
506 * Expects runqueue lock to be held for atomicity of update
507 */
508static inline void
509rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
510{
511 if (rq) {
512 rq->rq_sched_info.run_delay += delta_jiffies;
513 rq->rq_sched_info.pcnt++;
514 }
515}
516
517/*
518 * Expects runqueue lock to be held for atomicity of update
519 */
520static inline void
521rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
522{
523 if (rq)
524 rq->rq_sched_info.cpu_time += delta_jiffies;
525}
504# define schedstat_inc(rq, field) do { (rq)->field++; } while (0) 526# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
505# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) 527# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
506#else /* !CONFIG_SCHEDSTATS */ 528#else /* !CONFIG_SCHEDSTATS */
529static inline void
530rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
531{}
532static inline void
533rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
534{}
507# define schedstat_inc(rq, field) do { } while (0) 535# define schedstat_inc(rq, field) do { } while (0)
508# define schedstat_add(rq, field, amt) do { } while (0) 536# define schedstat_add(rq, field, amt) do { } while (0)
509#endif 537#endif
@@ -523,7 +551,7 @@ static inline struct rq *this_rq_lock(void)
523 return rq; 551 return rq;
524} 552}
525 553
526#ifdef CONFIG_SCHEDSTATS 554#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
527/* 555/*
528 * Called when a process is dequeued from the active array and given 556 * Called when a process is dequeued from the active array and given
529 * the cpu. We should note that with the exception of interactive 557 * the cpu. We should note that with the exception of interactive
@@ -551,21 +579,16 @@ static inline void sched_info_dequeued(struct task_struct *t)
551 */ 579 */
552static void sched_info_arrive(struct task_struct *t) 580static void sched_info_arrive(struct task_struct *t)
553{ 581{
554 unsigned long now = jiffies, diff = 0; 582 unsigned long now = jiffies, delta_jiffies = 0;
555 struct rq *rq = task_rq(t);
556 583
557 if (t->sched_info.last_queued) 584 if (t->sched_info.last_queued)
558 diff = now - t->sched_info.last_queued; 585 delta_jiffies = now - t->sched_info.last_queued;
559 sched_info_dequeued(t); 586 sched_info_dequeued(t);
560 t->sched_info.run_delay += diff; 587 t->sched_info.run_delay += delta_jiffies;
561 t->sched_info.last_arrival = now; 588 t->sched_info.last_arrival = now;
562 t->sched_info.pcnt++; 589 t->sched_info.pcnt++;
563 590
564 if (!rq) 591 rq_sched_info_arrive(task_rq(t), delta_jiffies);
565 return;
566
567 rq->rq_sched_info.run_delay += diff;
568 rq->rq_sched_info.pcnt++;
569} 592}
570 593
571/* 594/*
@@ -585,8 +608,9 @@ static void sched_info_arrive(struct task_struct *t)
585 */ 608 */
586static inline void sched_info_queued(struct task_struct *t) 609static inline void sched_info_queued(struct task_struct *t)
587{ 610{
588 if (!t->sched_info.last_queued) 611 if (unlikely(sched_info_on()))
589 t->sched_info.last_queued = jiffies; 612 if (!t->sched_info.last_queued)
613 t->sched_info.last_queued = jiffies;
590} 614}
591 615
592/* 616/*
@@ -595,13 +619,10 @@ static inline void sched_info_queued(struct task_struct *t)
595 */ 619 */
596static inline void sched_info_depart(struct task_struct *t) 620static inline void sched_info_depart(struct task_struct *t)
597{ 621{
598 struct rq *rq = task_rq(t); 622 unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
599 unsigned long diff = jiffies - t->sched_info.last_arrival;
600
601 t->sched_info.cpu_time += diff;
602 623
603 if (rq) 624 t->sched_info.cpu_time += delta_jiffies;
604 rq->rq_sched_info.cpu_time += diff; 625 rq_sched_info_depart(task_rq(t), delta_jiffies);
605} 626}
606 627
607/* 628/*
@@ -610,7 +631,7 @@ static inline void sched_info_depart(struct task_struct *t)
610 * the idle task.) We are only called when prev != next. 631 * the idle task.) We are only called when prev != next.
611 */ 632 */
612static inline void 633static inline void
613sched_info_switch(struct task_struct *prev, struct task_struct *next) 634__sched_info_switch(struct task_struct *prev, struct task_struct *next)
614{ 635{
615 struct rq *rq = task_rq(prev); 636 struct rq *rq = task_rq(prev);
616 637
@@ -625,10 +646,16 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
625 if (next != rq->idle) 646 if (next != rq->idle)
626 sched_info_arrive(next); 647 sched_info_arrive(next);
627} 648}
649static inline void
650sched_info_switch(struct task_struct *prev, struct task_struct *next)
651{
652 if (unlikely(sched_info_on()))
653 __sched_info_switch(prev, next);
654}
628#else 655#else
629#define sched_info_queued(t) do { } while (0) 656#define sched_info_queued(t) do { } while (0)
630#define sched_info_switch(t, next) do { } while (0) 657#define sched_info_switch(t, next) do { } while (0)
631#endif /* CONFIG_SCHEDSTATS */ 658#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
632 659
633/* 660/*
634 * Adding/removing a task to/from a priority array: 661 * Adding/removing a task to/from a priority array:
@@ -1530,8 +1557,9 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags)
1530 1557
1531 INIT_LIST_HEAD(&p->run_list); 1558 INIT_LIST_HEAD(&p->run_list);
1532 p->array = NULL; 1559 p->array = NULL;
1533#ifdef CONFIG_SCHEDSTATS 1560#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1534 memset(&p->sched_info, 0, sizeof(p->sched_info)); 1561 if (unlikely(sched_info_on()))
1562 memset(&p->sched_info, 0, sizeof(p->sched_info));
1535#endif 1563#endif
1536#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 1564#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
1537 p->oncpu = 0; 1565 p->oncpu = 0;
@@ -1788,7 +1816,15 @@ context_switch(struct rq *rq, struct task_struct *prev,
1788 WARN_ON(rq->prev_mm); 1816 WARN_ON(rq->prev_mm);
1789 rq->prev_mm = oldmm; 1817 rq->prev_mm = oldmm;
1790 } 1818 }
1819 /*
1820 * Since the runqueue lock will be released by the next
1821 * task (which is an invalid locking op but in the case
1822 * of the scheduler it's an obvious special-case), so we
1823 * do an early lockdep release here:
1824 */
1825#ifndef __ARCH_WANT_UNLOCKED_CTXSW
1791 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 1826 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1827#endif
1792 1828
1793 /* Here we just switch the register state and the stack. */ 1829 /* Here we just switch the register state and the stack. */
1794 switch_to(prev, next, prev); 1830 switch_to(prev, next, prev);
@@ -4526,9 +4562,11 @@ void __sched io_schedule(void)
4526{ 4562{
4527 struct rq *rq = &__raw_get_cpu_var(runqueues); 4563 struct rq *rq = &__raw_get_cpu_var(runqueues);
4528 4564
4565 delayacct_blkio_start();
4529 atomic_inc(&rq->nr_iowait); 4566 atomic_inc(&rq->nr_iowait);
4530 schedule(); 4567 schedule();
4531 atomic_dec(&rq->nr_iowait); 4568 atomic_dec(&rq->nr_iowait);
4569 delayacct_blkio_end();
4532} 4570}
4533EXPORT_SYMBOL(io_schedule); 4571EXPORT_SYMBOL(io_schedule);
4534 4572
@@ -4537,9 +4575,11 @@ long __sched io_schedule_timeout(long timeout)
4537 struct rq *rq = &__raw_get_cpu_var(runqueues); 4575 struct rq *rq = &__raw_get_cpu_var(runqueues);
4538 long ret; 4576 long ret;
4539 4577
4578 delayacct_blkio_start();
4540 atomic_inc(&rq->nr_iowait); 4579 atomic_inc(&rq->nr_iowait);
4541 ret = schedule_timeout(timeout); 4580 ret = schedule_timeout(timeout);
4542 atomic_dec(&rq->nr_iowait); 4581 atomic_dec(&rq->nr_iowait);
4582 delayacct_blkio_end();
4543 return ret; 4583 return ret;
4544} 4584}
4545 4585
diff --git a/kernel/softirq.c b/kernel/softirq.c
index fd12f2556f0d..0f08a84ae307 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -311,8 +311,6 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
311 softirq_vec[nr].action = action; 311 softirq_vec[nr].action = action;
312} 312}
313 313
314EXPORT_UNUSED_SYMBOL(open_softirq); /* June 2006 */
315
316/* Tasklets */ 314/* Tasklets */
317struct tasklet_head 315struct tasklet_head
318{ 316{
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
new file mode 100644
index 000000000000..f45179ce028e
--- /dev/null
+++ b/kernel/taskstats.c
@@ -0,0 +1,568 @@
1/*
2 * taskstats.c - Export per-task statistics to userland
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/taskstats_kern.h>
21#include <linux/delayacct.h>
22#include <linux/cpumask.h>
23#include <linux/percpu.h>
24#include <net/genetlink.h>
25#include <asm/atomic.h>
26
27/*
28 * Maximum length of a cpumask that can be specified in
29 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
30 */
31#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
32
33static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
34static int family_registered;
35kmem_cache_t *taskstats_cache;
36
37static struct genl_family family = {
38 .id = GENL_ID_GENERATE,
39 .name = TASKSTATS_GENL_NAME,
40 .version = TASKSTATS_GENL_VERSION,
41 .maxattr = TASKSTATS_CMD_ATTR_MAX,
42};
43
44static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
45__read_mostly = {
46 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
47 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
48 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
49 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
50
51struct listener {
52 struct list_head list;
53 pid_t pid;
54 char valid;
55};
56
57struct listener_list {
58 struct rw_semaphore sem;
59 struct list_head list;
60};
61static DEFINE_PER_CPU(struct listener_list, listener_array);
62
63enum actions {
64 REGISTER,
65 DEREGISTER,
66 CPU_DONT_CARE
67};
68
69static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
70 void **replyp, size_t size)
71{
72 struct sk_buff *skb;
73 void *reply;
74
75 /*
76 * If new attributes are added, please revisit this allocation
77 */
78 skb = nlmsg_new(size);
79 if (!skb)
80 return -ENOMEM;
81
82 if (!info) {
83 int seq = get_cpu_var(taskstats_seqnum)++;
84 put_cpu_var(taskstats_seqnum);
85
86 reply = genlmsg_put(skb, 0, seq,
87 family.id, 0, 0,
88 cmd, family.version);
89 } else
90 reply = genlmsg_put(skb, info->snd_pid, info->snd_seq,
91 family.id, 0, 0,
92 cmd, family.version);
93 if (reply == NULL) {
94 nlmsg_free(skb);
95 return -EINVAL;
96 }
97
98 *skbp = skb;
99 *replyp = reply;
100 return 0;
101}
102
103/*
104 * Send taskstats data in @skb to listener with nl_pid @pid
105 */
106static int send_reply(struct sk_buff *skb, pid_t pid)
107{
108 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
109 void *reply = genlmsg_data(genlhdr);
110 int rc;
111
112 rc = genlmsg_end(skb, reply);
113 if (rc < 0) {
114 nlmsg_free(skb);
115 return rc;
116 }
117
118 return genlmsg_unicast(skb, pid);
119}
120
121/*
122 * Send taskstats data in @skb to listeners registered for @cpu's exit data
123 */
124static int send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
125{
126 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
127 struct listener_list *listeners;
128 struct listener *s, *tmp;
129 struct sk_buff *skb_next, *skb_cur = skb;
130 void *reply = genlmsg_data(genlhdr);
131 int rc, ret, delcount = 0;
132
133 rc = genlmsg_end(skb, reply);
134 if (rc < 0) {
135 nlmsg_free(skb);
136 return rc;
137 }
138
139 rc = 0;
140 listeners = &per_cpu(listener_array, cpu);
141 down_read(&listeners->sem);
142 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
143 skb_next = NULL;
144 if (!list_is_last(&s->list, &listeners->list)) {
145 skb_next = skb_clone(skb_cur, GFP_KERNEL);
146 if (!skb_next) {
147 nlmsg_free(skb_cur);
148 rc = -ENOMEM;
149 break;
150 }
151 }
152 ret = genlmsg_unicast(skb_cur, s->pid);
153 if (ret == -ECONNREFUSED) {
154 s->valid = 0;
155 delcount++;
156 rc = ret;
157 }
158 skb_cur = skb_next;
159 }
160 up_read(&listeners->sem);
161
162 if (!delcount)
163 return rc;
164
165 /* Delete invalidated entries */
166 down_write(&listeners->sem);
167 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
168 if (!s->valid) {
169 list_del(&s->list);
170 kfree(s);
171 }
172 }
173 up_write(&listeners->sem);
174 return rc;
175}
176
177static int fill_pid(pid_t pid, struct task_struct *pidtsk,
178 struct taskstats *stats)
179{
180 int rc;
181 struct task_struct *tsk = pidtsk;
182
183 if (!pidtsk) {
184 read_lock(&tasklist_lock);
185 tsk = find_task_by_pid(pid);
186 if (!tsk) {
187 read_unlock(&tasklist_lock);
188 return -ESRCH;
189 }
190 get_task_struct(tsk);
191 read_unlock(&tasklist_lock);
192 } else
193 get_task_struct(tsk);
194
195 /*
196 * Each accounting subsystem adds calls to its functions to
197 * fill in relevant parts of struct taskstsats as follows
198 *
199 * rc = per-task-foo(stats, tsk);
200 * if (rc)
201 * goto err;
202 */
203
204 rc = delayacct_add_tsk(stats, tsk);
205 stats->version = TASKSTATS_VERSION;
206
207 /* Define err: label here if needed */
208 put_task_struct(tsk);
209 return rc;
210
211}
212
213static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk,
214 struct taskstats *stats)
215{
216 struct task_struct *tsk, *first;
217 unsigned long flags;
218
219 /*
220 * Add additional stats from live tasks except zombie thread group
221 * leaders who are already counted with the dead tasks
222 */
223 first = tgidtsk;
224 if (!first) {
225 read_lock(&tasklist_lock);
226 first = find_task_by_pid(tgid);
227 if (!first) {
228 read_unlock(&tasklist_lock);
229 return -ESRCH;
230 }
231 get_task_struct(first);
232 read_unlock(&tasklist_lock);
233 } else
234 get_task_struct(first);
235
236 /* Start with stats from dead tasks */
237 spin_lock_irqsave(&first->signal->stats_lock, flags);
238 if (first->signal->stats)
239 memcpy(stats, first->signal->stats, sizeof(*stats));
240 spin_unlock_irqrestore(&first->signal->stats_lock, flags);
241
242 tsk = first;
243 read_lock(&tasklist_lock);
244 do {
245 if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk))
246 continue;
247 /*
248 * Accounting subsystem can call its functions here to
249 * fill in relevant parts of struct taskstsats as follows
250 *
251 * per-task-foo(stats, tsk);
252 */
253 delayacct_add_tsk(stats, tsk);
254
255 } while_each_thread(first, tsk);
256 read_unlock(&tasklist_lock);
257 stats->version = TASKSTATS_VERSION;
258
259 /*
260 * Accounting subsytems can also add calls here to modify
261 * fields of taskstats.
262 */
263
264 return 0;
265}
266
267
268static void fill_tgid_exit(struct task_struct *tsk)
269{
270 unsigned long flags;
271
272 spin_lock_irqsave(&tsk->signal->stats_lock, flags);
273 if (!tsk->signal->stats)
274 goto ret;
275
276 /*
277 * Each accounting subsystem calls its functions here to
278 * accumalate its per-task stats for tsk, into the per-tgid structure
279 *
280 * per-task-foo(tsk->signal->stats, tsk);
281 */
282 delayacct_add_tsk(tsk->signal->stats, tsk);
283ret:
284 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
285 return;
286}
287
288static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
289{
290 struct listener_list *listeners;
291 struct listener *s, *tmp;
292 unsigned int cpu;
293 cpumask_t mask = *maskp;
294
295 if (!cpus_subset(mask, cpu_possible_map))
296 return -EINVAL;
297
298 if (isadd == REGISTER) {
299 for_each_cpu_mask(cpu, mask) {
300 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
301 cpu_to_node(cpu));
302 if (!s)
303 goto cleanup;
304 s->pid = pid;
305 INIT_LIST_HEAD(&s->list);
306 s->valid = 1;
307
308 listeners = &per_cpu(listener_array, cpu);
309 down_write(&listeners->sem);
310 list_add(&s->list, &listeners->list);
311 up_write(&listeners->sem);
312 }
313 return 0;
314 }
315
316 /* Deregister or cleanup */
317cleanup:
318 for_each_cpu_mask(cpu, mask) {
319 listeners = &per_cpu(listener_array, cpu);
320 down_write(&listeners->sem);
321 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
322 if (s->pid == pid) {
323 list_del(&s->list);
324 kfree(s);
325 break;
326 }
327 }
328 up_write(&listeners->sem);
329 }
330 return 0;
331}
332
333static int parse(struct nlattr *na, cpumask_t *mask)
334{
335 char *data;
336 int len;
337 int ret;
338
339 if (na == NULL)
340 return 1;
341 len = nla_len(na);
342 if (len > TASKSTATS_CPUMASK_MAXLEN)
343 return -E2BIG;
344 if (len < 1)
345 return -EINVAL;
346 data = kmalloc(len, GFP_KERNEL);
347 if (!data)
348 return -ENOMEM;
349 nla_strlcpy(data, na, len);
350 ret = cpulist_parse(data, *mask);
351 kfree(data);
352 return ret;
353}
354
355static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
356{
357 int rc = 0;
358 struct sk_buff *rep_skb;
359 struct taskstats stats;
360 void *reply;
361 size_t size;
362 struct nlattr *na;
363 cpumask_t mask;
364
365 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
366 if (rc < 0)
367 return rc;
368 if (rc == 0)
369 return add_del_listener(info->snd_pid, &mask, REGISTER);
370
371 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
372 if (rc < 0)
373 return rc;
374 if (rc == 0)
375 return add_del_listener(info->snd_pid, &mask, DEREGISTER);
376
377 /*
378 * Size includes space for nested attributes
379 */
380 size = nla_total_size(sizeof(u32)) +
381 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
382
383 memset(&stats, 0, sizeof(stats));
384 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
385 if (rc < 0)
386 return rc;
387
388 if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
389 u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
390 rc = fill_pid(pid, NULL, &stats);
391 if (rc < 0)
392 goto err;
393
394 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
395 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid);
396 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
397 stats);
398 } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
399 u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
400 rc = fill_tgid(tgid, NULL, &stats);
401 if (rc < 0)
402 goto err;
403
404 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
405 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid);
406 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
407 stats);
408 } else {
409 rc = -EINVAL;
410 goto err;
411 }
412
413 nla_nest_end(rep_skb, na);
414
415 return send_reply(rep_skb, info->snd_pid);
416
417nla_put_failure:
418 return genlmsg_cancel(rep_skb, reply);
419err:
420 nlmsg_free(rep_skb);
421 return rc;
422}
423
424void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
425{
426 struct listener_list *listeners;
427 struct taskstats *tmp;
428 /*
429 * This is the cpu on which the task is exiting currently and will
430 * be the one for which the exit event is sent, even if the cpu
431 * on which this function is running changes later.
432 */
433 *mycpu = raw_smp_processor_id();
434
435 *ptidstats = NULL;
436 tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
437 if (!tmp)
438 return;
439
440 listeners = &per_cpu(listener_array, *mycpu);
441 down_read(&listeners->sem);
442 if (!list_empty(&listeners->list)) {
443 *ptidstats = tmp;
444 tmp = NULL;
445 }
446 up_read(&listeners->sem);
447 kfree(tmp);
448}
449
450/* Send pid data out on exit */
451void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
452 int group_dead, unsigned int mycpu)
453{
454 int rc;
455 struct sk_buff *rep_skb;
456 void *reply;
457 size_t size;
458 int is_thread_group;
459 struct nlattr *na;
460 unsigned long flags;
461
462 if (!family_registered || !tidstats)
463 return;
464
465 spin_lock_irqsave(&tsk->signal->stats_lock, flags);
466 is_thread_group = tsk->signal->stats ? 1 : 0;
467 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
468
469 rc = 0;
470 /*
471 * Size includes space for nested attributes
472 */
473 size = nla_total_size(sizeof(u32)) +
474 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
475
476 if (is_thread_group)
477 size = 2 * size; /* PID + STATS + TGID + STATS */
478
479 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
480 if (rc < 0)
481 goto ret;
482
483 rc = fill_pid(tsk->pid, tsk, tidstats);
484 if (rc < 0)
485 goto err_skb;
486
487 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
488 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid);
489 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
490 *tidstats);
491 nla_nest_end(rep_skb, na);
492
493 if (!is_thread_group)
494 goto send;
495
496 /*
497 * tsk has/had a thread group so fill the tsk->signal->stats structure
498 * Doesn't matter if tsk is the leader or the last group member leaving
499 */
500
501 fill_tgid_exit(tsk);
502 if (!group_dead)
503 goto send;
504
505 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
506 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid);
507 /* No locking needed for tsk->signal->stats since group is dead */
508 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
509 *tsk->signal->stats);
510 nla_nest_end(rep_skb, na);
511
512send:
513 send_cpu_listeners(rep_skb, mycpu);
514 return;
515
516nla_put_failure:
517 genlmsg_cancel(rep_skb, reply);
518 goto ret;
519err_skb:
520 nlmsg_free(rep_skb);
521ret:
522 return;
523}
524
525static struct genl_ops taskstats_ops = {
526 .cmd = TASKSTATS_CMD_GET,
527 .doit = taskstats_user_cmd,
528 .policy = taskstats_cmd_get_policy,
529};
530
531/* Needed early in initialization */
532void __init taskstats_init_early(void)
533{
534 unsigned int i;
535
536 taskstats_cache = kmem_cache_create("taskstats_cache",
537 sizeof(struct taskstats),
538 0, SLAB_PANIC, NULL, NULL);
539 for_each_possible_cpu(i) {
540 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
541 init_rwsem(&(per_cpu(listener_array, i).sem));
542 }
543}
544
545static int __init taskstats_init(void)
546{
547 int rc;
548
549 rc = genl_register_family(&family);
550 if (rc)
551 return rc;
552
553 rc = genl_register_ops(&family, &taskstats_ops);
554 if (rc < 0)
555 goto err;
556
557 family_registered = 1;
558 return 0;
559err:
560 genl_unregister_family(&family);
561 return rc;
562}
563
564/*
565 * late initcall ensures initialization of statistics collection
566 * mechanisms precedes initialization of the taskstats interface
567 */
568late_initcall(taskstats_init);
diff --git a/kernel/timer.c b/kernel/timer.c
index 2a87430a58d4..05809c2e2fd6 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -374,6 +374,7 @@ int del_timer_sync(struct timer_list *timer)
374 int ret = try_to_del_timer_sync(timer); 374 int ret = try_to_del_timer_sync(timer);
375 if (ret >= 0) 375 if (ret >= 0)
376 return ret; 376 return ret;
377 cpu_relax();
377 } 378 }
378} 379}
379 380
@@ -968,6 +969,7 @@ void __init timekeeping_init(void)
968} 969}
969 970
970 971
972static int timekeeping_suspended;
971/* 973/*
972 * timekeeping_resume - Resumes the generic timekeeping subsystem. 974 * timekeeping_resume - Resumes the generic timekeeping subsystem.
973 * @dev: unused 975 * @dev: unused
@@ -983,6 +985,18 @@ static int timekeeping_resume(struct sys_device *dev)
983 write_seqlock_irqsave(&xtime_lock, flags); 985 write_seqlock_irqsave(&xtime_lock, flags);
984 /* restart the last cycle value */ 986 /* restart the last cycle value */
985 clock->cycle_last = clocksource_read(clock); 987 clock->cycle_last = clocksource_read(clock);
988 clock->error = 0;
989 timekeeping_suspended = 0;
990 write_sequnlock_irqrestore(&xtime_lock, flags);
991 return 0;
992}
993
994static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
995{
996 unsigned long flags;
997
998 write_seqlock_irqsave(&xtime_lock, flags);
999 timekeeping_suspended = 1;
986 write_sequnlock_irqrestore(&xtime_lock, flags); 1000 write_sequnlock_irqrestore(&xtime_lock, flags);
987 return 0; 1001 return 0;
988} 1002}
@@ -990,6 +1004,7 @@ static int timekeeping_resume(struct sys_device *dev)
990/* sysfs resume/suspend bits for timekeeping */ 1004/* sysfs resume/suspend bits for timekeeping */
991static struct sysdev_class timekeeping_sysclass = { 1005static struct sysdev_class timekeeping_sysclass = {
992 .resume = timekeeping_resume, 1006 .resume = timekeeping_resume,
1007 .suspend = timekeeping_suspend,
993 set_kset_name("timekeeping"), 1008 set_kset_name("timekeeping"),
994}; 1009};
995 1010
@@ -1100,13 +1115,16 @@ static void update_wall_time(void)
1100{ 1115{
1101 cycle_t offset; 1116 cycle_t offset;
1102 1117
1103 clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; 1118 /* Make sure we're fully resumed: */
1119 if (unlikely(timekeeping_suspended))
1120 return;
1104 1121
1105#ifdef CONFIG_GENERIC_TIME 1122#ifdef CONFIG_GENERIC_TIME
1106 offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; 1123 offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
1107#else 1124#else
1108 offset = clock->cycle_interval; 1125 offset = clock->cycle_interval;
1109#endif 1126#endif
1127 clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
1110 1128
1111 /* normally this loop will run just once, however in the 1129 /* normally this loop will run just once, however in the
1112 * case of lost or late ticks, it will accumulate correctly. 1130 * case of lost or late ticks, it will accumulate correctly.
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e5889b1a33ff..554ee688a9f8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -158,7 +158,7 @@ config DEBUG_RWSEMS
158 158
159config DEBUG_LOCK_ALLOC 159config DEBUG_LOCK_ALLOC
160 bool "Lock debugging: detect incorrect freeing of live locks" 160 bool "Lock debugging: detect incorrect freeing of live locks"
161 depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 161 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
162 select DEBUG_SPINLOCK 162 select DEBUG_SPINLOCK
163 select DEBUG_MUTEXES 163 select DEBUG_MUTEXES
164 select DEBUG_RWSEMS 164 select DEBUG_RWSEMS
@@ -173,7 +173,7 @@ config DEBUG_LOCK_ALLOC
173 173
174config PROVE_LOCKING 174config PROVE_LOCKING
175 bool "Lock debugging: prove locking correctness" 175 bool "Lock debugging: prove locking correctness"
176 depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 176 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
177 select LOCKDEP 177 select LOCKDEP
178 select DEBUG_SPINLOCK 178 select DEBUG_SPINLOCK
179 select DEBUG_MUTEXES 179 select DEBUG_MUTEXES
@@ -216,7 +216,7 @@ config PROVE_LOCKING
216 216
217config LOCKDEP 217config LOCKDEP
218 bool 218 bool
219 depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 219 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
220 select STACKTRACE 220 select STACKTRACE
221 select FRAME_POINTER 221 select FRAME_POINTER
222 select KALLSYMS 222 select KALLSYMS
@@ -224,13 +224,14 @@ config LOCKDEP
224 224
225config DEBUG_LOCKDEP 225config DEBUG_LOCKDEP
226 bool "Lock dependency engine debugging" 226 bool "Lock dependency engine debugging"
227 depends on LOCKDEP 227 depends on DEBUG_KERNEL && LOCKDEP
228 help 228 help
229 If you say Y here, the lock dependency engine will do 229 If you say Y here, the lock dependency engine will do
230 additional runtime checks to debug itself, at the price 230 additional runtime checks to debug itself, at the price
231 of more runtime overhead. 231 of more runtime overhead.
232 232
233config TRACE_IRQFLAGS 233config TRACE_IRQFLAGS
234 depends on DEBUG_KERNEL
234 bool 235 bool
235 default y 236 default y
236 depends on TRACE_IRQFLAGS_SUPPORT 237 depends on TRACE_IRQFLAGS_SUPPORT
@@ -256,6 +257,7 @@ config DEBUG_LOCKING_API_SELFTESTS
256 257
257config STACKTRACE 258config STACKTRACE
258 bool 259 bool
260 depends on DEBUG_KERNEL
259 depends on STACKTRACE_SUPPORT 261 depends on STACKTRACE_SUPPORT
260 262
261config DEBUG_KOBJECT 263config DEBUG_KOBJECT
diff --git a/lib/idr.c b/lib/idr.c
index 4d096819511a..16d2143fea48 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -38,14 +38,15 @@ static kmem_cache_t *idr_layer_cache;
38static struct idr_layer *alloc_layer(struct idr *idp) 38static struct idr_layer *alloc_layer(struct idr *idp)
39{ 39{
40 struct idr_layer *p; 40 struct idr_layer *p;
41 unsigned long flags;
41 42
42 spin_lock(&idp->lock); 43 spin_lock_irqsave(&idp->lock, flags);
43 if ((p = idp->id_free)) { 44 if ((p = idp->id_free)) {
44 idp->id_free = p->ary[0]; 45 idp->id_free = p->ary[0];
45 idp->id_free_cnt--; 46 idp->id_free_cnt--;
46 p->ary[0] = NULL; 47 p->ary[0] = NULL;
47 } 48 }
48 spin_unlock(&idp->lock); 49 spin_unlock_irqrestore(&idp->lock, flags);
49 return(p); 50 return(p);
50} 51}
51 52
@@ -59,12 +60,14 @@ static void __free_layer(struct idr *idp, struct idr_layer *p)
59 60
60static void free_layer(struct idr *idp, struct idr_layer *p) 61static void free_layer(struct idr *idp, struct idr_layer *p)
61{ 62{
63 unsigned long flags;
64
62 /* 65 /*
63 * Depends on the return element being zeroed. 66 * Depends on the return element being zeroed.
64 */ 67 */
65 spin_lock(&idp->lock); 68 spin_lock_irqsave(&idp->lock, flags);
66 __free_layer(idp, p); 69 __free_layer(idp, p);
67 spin_unlock(&idp->lock); 70 spin_unlock_irqrestore(&idp->lock, flags);
68} 71}
69 72
70/** 73/**
@@ -168,6 +171,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
168{ 171{
169 struct idr_layer *p, *new; 172 struct idr_layer *p, *new;
170 int layers, v, id; 173 int layers, v, id;
174 unsigned long flags;
171 175
172 id = starting_id; 176 id = starting_id;
173build_up: 177build_up:
@@ -191,14 +195,14 @@ build_up:
191 * The allocation failed. If we built part of 195 * The allocation failed. If we built part of
192 * the structure tear it down. 196 * the structure tear it down.
193 */ 197 */
194 spin_lock(&idp->lock); 198 spin_lock_irqsave(&idp->lock, flags);
195 for (new = p; p && p != idp->top; new = p) { 199 for (new = p; p && p != idp->top; new = p) {
196 p = p->ary[0]; 200 p = p->ary[0];
197 new->ary[0] = NULL; 201 new->ary[0] = NULL;
198 new->bitmap = new->count = 0; 202 new->bitmap = new->count = 0;
199 __free_layer(idp, new); 203 __free_layer(idp, new);
200 } 204 }
201 spin_unlock(&idp->lock); 205 spin_unlock_irqrestore(&idp->lock, flags);
202 return -1; 206 return -1;
203 } 207 }
204 new->ary[0] = p; 208 new->ary[0] = p;
diff --git a/mm/filemap.c b/mm/filemap.c
index d087fc3d3281..b9a60c43b61a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -849,8 +849,6 @@ static void shrink_readahead_size_eio(struct file *filp,
849 return; 849 return;
850 850
851 ra->ra_pages /= 4; 851 ra->ra_pages /= 4;
852 printk(KERN_WARNING "Reducing readahead size to %luK\n",
853 ra->ra_pages << (PAGE_CACHE_SHIFT - 10));
854} 852}
855 853
856/** 854/**
diff --git a/mm/memory.c b/mm/memory.c
index dc0d82cf2a1c..109e9866237e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -47,6 +47,7 @@
47#include <linux/pagemap.h> 47#include <linux/pagemap.h>
48#include <linux/rmap.h> 48#include <linux/rmap.h>
49#include <linux/module.h> 49#include <linux/module.h>
50#include <linux/delayacct.h>
50#include <linux/init.h> 51#include <linux/init.h>
51 52
52#include <asm/pgalloc.h> 53#include <asm/pgalloc.h>
@@ -1549,9 +1550,9 @@ gotten:
1549 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1550 flush_cache_page(vma, address, pte_pfn(orig_pte));
1550 entry = mk_pte(new_page, vma->vm_page_prot); 1551 entry = mk_pte(new_page, vma->vm_page_prot);
1551 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1552 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1553 lazy_mmu_prot_update(entry);
1552 ptep_establish(vma, address, page_table, entry); 1554 ptep_establish(vma, address, page_table, entry);
1553 update_mmu_cache(vma, address, entry); 1555 update_mmu_cache(vma, address, entry);
1554 lazy_mmu_prot_update(entry);
1555 lru_cache_add_active(new_page); 1556 lru_cache_add_active(new_page);
1556 page_add_new_anon_rmap(new_page, vma, address); 1557 page_add_new_anon_rmap(new_page, vma, address);
1557 1558
@@ -1934,6 +1935,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1934 migration_entry_wait(mm, pmd, address); 1935 migration_entry_wait(mm, pmd, address);
1935 goto out; 1936 goto out;
1936 } 1937 }
1938 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
1937 page = lookup_swap_cache(entry); 1939 page = lookup_swap_cache(entry);
1938 if (!page) { 1940 if (!page) {
1939 swapin_readahead(entry, address, vma); 1941 swapin_readahead(entry, address, vma);
@@ -1946,6 +1948,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1946 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1948 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1947 if (likely(pte_same(*page_table, orig_pte))) 1949 if (likely(pte_same(*page_table, orig_pte)))
1948 ret = VM_FAULT_OOM; 1950 ret = VM_FAULT_OOM;
1951 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
1949 goto unlock; 1952 goto unlock;
1950 } 1953 }
1951 1954
@@ -1955,6 +1958,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1955 grab_swap_token(); 1958 grab_swap_token();
1956 } 1959 }
1957 1960
1961 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
1958 mark_page_accessed(page); 1962 mark_page_accessed(page);
1959 lock_page(page); 1963 lock_page(page);
1960 1964
diff --git a/mm/nommu.c b/mm/nommu.c
index 5151c44a8257..c576df71e3bb 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1070,6 +1070,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1070 vma->vm_start = vma->vm_pgoff << PAGE_SHIFT; 1070 vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
1071 return 0; 1071 return 0;
1072} 1072}
1073EXPORT_SYMBOL(remap_pfn_range);
1073 1074
1074void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1075void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1075{ 1076{
@@ -1090,6 +1091,7 @@ void unmap_mapping_range(struct address_space *mapping,
1090 int even_cows) 1091 int even_cows)
1091{ 1092{
1092} 1093}
1094EXPORT_SYMBOL(unmap_mapping_range);
1093 1095
1094/* 1096/*
1095 * Check that a process has enough memory to allocate a new virtual 1097 * Check that a process has enough memory to allocate a new virtual
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 7b450798b458..266162d2ba28 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -340,7 +340,7 @@ void __vunmap(void *addr, int deallocate_pages)
340 __free_page(area->pages[i]); 340 __free_page(area->pages[i]);
341 } 341 }
342 342
343 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) 343 if (area->flags & VM_VPAGES)
344 vfree(area->pages); 344 vfree(area->pages);
345 else 345 else
346 kfree(area->pages); 346 kfree(area->pages);
@@ -427,9 +427,10 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
427 427
428 area->nr_pages = nr_pages; 428 area->nr_pages = nr_pages;
429 /* Please note that the recursion is strictly bounded. */ 429 /* Please note that the recursion is strictly bounded. */
430 if (array_size > PAGE_SIZE) 430 if (array_size > PAGE_SIZE) {
431 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); 431 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
432 else 432 area->flags |= VM_VPAGES;
433 } else
433 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); 434 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
434 area->pages = pages; 435 area->pages = pages;
435 if (!area->pages) { 436 if (!area->pages) {
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 458031bfff55..18fcb9fa518d 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -67,10 +67,6 @@ static struct packet_type vlan_packet_type = {
67 .func = vlan_skb_recv, /* VLAN receive method */ 67 .func = vlan_skb_recv, /* VLAN receive method */
68}; 68};
69 69
70/* Bits of netdev state that are propagated from real device to virtual */
71#define VLAN_LINK_STATE_MASK \
72 ((1<<__LINK_STATE_PRESENT)|(1<<__LINK_STATE_NOCARRIER)|(1<<__LINK_STATE_DORMANT))
73
74/* End of global variables definitions. */ 70/* End of global variables definitions. */
75 71
76/* 72/*
@@ -479,7 +475,9 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
479 new_dev->flags = real_dev->flags; 475 new_dev->flags = real_dev->flags;
480 new_dev->flags &= ~IFF_UP; 476 new_dev->flags &= ~IFF_UP;
481 477
482 new_dev->state = real_dev->state & ~(1<<__LINK_STATE_START); 478 new_dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
479 (1<<__LINK_STATE_DORMANT))) |
480 (1<<__LINK_STATE_PRESENT);
483 481
484 /* need 4 bytes for extra VLAN header info, 482 /* need 4 bytes for extra VLAN header info,
485 * hope the underlying device can handle it. 483 * hope the underlying device can handle it.
@@ -542,12 +540,11 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
542 * so it cannot "appear" on us. 540 * so it cannot "appear" on us.
543 */ 541 */
544 if (!grp) { /* need to add a new group */ 542 if (!grp) { /* need to add a new group */
545 grp = kmalloc(sizeof(struct vlan_group), GFP_KERNEL); 543 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
546 if (!grp) 544 if (!grp)
547 goto out_free_unregister; 545 goto out_free_unregister;
548 546
549 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 547 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
550 memset(grp, 0, sizeof(struct vlan_group));
551 grp->real_dev_ifindex = real_dev->ifindex; 548 grp->real_dev_ifindex = real_dev->ifindex;
552 549
553 hlist_add_head_rcu(&grp->hlist, 550 hlist_add_head_rcu(&grp->hlist,
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 5ee96d4b40e9..96dc6bb52d14 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -227,12 +227,11 @@ static void atif_drop_device(struct net_device *dev)
227static struct atalk_iface *atif_add_device(struct net_device *dev, 227static struct atalk_iface *atif_add_device(struct net_device *dev,
228 struct atalk_addr *sa) 228 struct atalk_addr *sa)
229{ 229{
230 struct atalk_iface *iface = kmalloc(sizeof(*iface), GFP_KERNEL); 230 struct atalk_iface *iface = kzalloc(sizeof(*iface), GFP_KERNEL);
231 231
232 if (!iface) 232 if (!iface)
233 goto out; 233 goto out;
234 234
235 memset(iface, 0, sizeof(*iface));
236 dev_hold(dev); 235 dev_hold(dev);
237 iface->dev = dev; 236 iface->dev = dev;
238 dev->atalk_ptr = iface; 237 dev->atalk_ptr = iface;
@@ -559,12 +558,11 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint)
559 } 558 }
560 559
561 if (!rt) { 560 if (!rt) {
562 rt = kmalloc(sizeof(*rt), GFP_ATOMIC); 561 rt = kzalloc(sizeof(*rt), GFP_ATOMIC);
563 562
564 retval = -ENOBUFS; 563 retval = -ENOBUFS;
565 if (!rt) 564 if (!rt)
566 goto out_unlock; 565 goto out_unlock;
567 memset(rt, 0, sizeof(*rt));
568 566
569 rt->next = atalk_routes; 567 rt->next = atalk_routes;
570 atalk_routes = rt; 568 atalk_routes = rt;
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index a487233dc466..d00cca97eb33 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -508,10 +508,9 @@ Note: we do not have explicit unassign, but look at _push()
508 508
509 if (copy_from_user(&be, arg, sizeof be)) 509 if (copy_from_user(&be, arg, sizeof be))
510 return -EFAULT; 510 return -EFAULT;
511 brvcc = kmalloc(sizeof(struct br2684_vcc), GFP_KERNEL); 511 brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
512 if (!brvcc) 512 if (!brvcc)
513 return -ENOMEM; 513 return -ENOMEM;
514 memset(brvcc, 0, sizeof(struct br2684_vcc));
515 write_lock_irq(&devs_lock); 514 write_lock_irq(&devs_lock);
516 net_dev = br2684_find_dev(&be.ifspec); 515 net_dev = br2684_find_dev(&be.ifspec);
517 if (net_dev == NULL) { 516 if (net_dev == NULL) {
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 2e62105d91bd..7ce7bfe3fbad 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -929,12 +929,11 @@ static int arp_seq_open(struct inode *inode, struct file *file)
929 struct seq_file *seq; 929 struct seq_file *seq;
930 int rc = -EAGAIN; 930 int rc = -EAGAIN;
931 931
932 state = kmalloc(sizeof(*state), GFP_KERNEL); 932 state = kzalloc(sizeof(*state), GFP_KERNEL);
933 if (!state) { 933 if (!state) {
934 rc = -ENOMEM; 934 rc = -ENOMEM;
935 goto out_kfree; 935 goto out_kfree;
936 } 936 }
937 memset(state, 0, sizeof(*state));
938 state->ns.neigh_sub_iter = clip_seq_sub_iter; 937 state->ns.neigh_sub_iter = clip_seq_sub_iter;
939 938
940 rc = seq_open(file, &arp_seq_ops); 939 rc = seq_open(file, &arp_seq_ops);
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 4b68a18171cf..b4aa489849df 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1811,12 +1811,11 @@ make_entry(struct lec_priv *priv, unsigned char *mac_addr)
1811{ 1811{
1812 struct lec_arp_table *to_return; 1812 struct lec_arp_table *to_return;
1813 1813
1814 to_return = kmalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1814 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
1815 if (!to_return) { 1815 if (!to_return) {
1816 printk("LEC: Arp entry kmalloc failed\n"); 1816 printk("LEC: Arp entry kmalloc failed\n");
1817 return NULL; 1817 return NULL;
1818 } 1818 }
1819 memset(to_return, 0, sizeof(struct lec_arp_table));
1820 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); 1819 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
1821 init_timer(&to_return->timer); 1820 init_timer(&to_return->timer);
1822 to_return->timer.function = lec_arp_expire_arp; 1821 to_return->timer.function = lec_arp_expire_arp;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 9aafe1e2f048..00704661e83f 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -258,10 +258,9 @@ static struct mpoa_client *alloc_mpc(void)
258{ 258{
259 struct mpoa_client *mpc; 259 struct mpoa_client *mpc;
260 260
261 mpc = kmalloc(sizeof (struct mpoa_client), GFP_KERNEL); 261 mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL);
262 if (mpc == NULL) 262 if (mpc == NULL)
263 return NULL; 263 return NULL;
264 memset(mpc, 0, sizeof(struct mpoa_client));
265 rwlock_init(&mpc->ingress_lock); 264 rwlock_init(&mpc->ingress_lock);
266 rwlock_init(&mpc->egress_lock); 265 rwlock_init(&mpc->egress_lock);
267 mpc->next = mpcs; 266 mpc->next = mpcs;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 76a7d8ff6c0e..19d5dfc0702f 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -287,10 +287,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
287 if (be.encaps != PPPOATM_ENCAPS_AUTODETECT && 287 if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
288 be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC) 288 be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
289 return -EINVAL; 289 return -EINVAL;
290 pvcc = kmalloc(sizeof(*pvcc), GFP_KERNEL); 290 pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
291 if (pvcc == NULL) 291 if (pvcc == NULL)
292 return -ENOMEM; 292 return -ENOMEM;
293 memset(pvcc, 0, sizeof(*pvcc));
294 pvcc->atmvcc = atmvcc; 293 pvcc->atmvcc = atmvcc;
295 pvcc->old_push = atmvcc->push; 294 pvcc->old_push = atmvcc->push;
296 pvcc->old_pop = atmvcc->pop; 295 pvcc->old_pop = atmvcc->pop;
diff --git a/net/atm/resources.c b/net/atm/resources.c
index de25c6408b04..529f7e64aa2c 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -33,10 +33,9 @@ static struct atm_dev *__alloc_atm_dev(const char *type)
33{ 33{
34 struct atm_dev *dev; 34 struct atm_dev *dev;
35 35
36 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 36 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
37 if (!dev) 37 if (!dev)
38 return NULL; 38 return NULL;
39 memset(dev, 0, sizeof(*dev));
40 dev->type = type; 39 dev->type = type;
41 dev->signal = ATM_PHY_SIG_UNKNOWN; 40 dev->signal = ATM_PHY_SIG_UNKNOWN;
42 dev->link_rate = ATM_OC3_PCR; 41 dev->link_rate = ATM_OC3_PCR;
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index 369a75b160f2..867d42537979 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -203,13 +203,11 @@ void ax25_register_sysctl(void)
203 for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) 203 for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
204 ax25_table_size += sizeof(ctl_table); 204 ax25_table_size += sizeof(ctl_table);
205 205
206 if ((ax25_table = kmalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { 206 if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) {
207 spin_unlock_bh(&ax25_dev_lock); 207 spin_unlock_bh(&ax25_dev_lock);
208 return; 208 return;
209 } 209 }
210 210
211 memset(ax25_table, 0x00, ax25_table_size);
212
213 for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { 211 for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) {
214 ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC); 212 ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC);
215 if (!child) { 213 if (!child) {
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 77eab8f4c7fd..332dd8f436ea 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -55,6 +55,7 @@
55#define VERSION "1.8" 55#define VERSION "1.8"
56 56
57static int disable_cfc = 0; 57static int disable_cfc = 0;
58static int channel_mtu = -1;
58static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; 59static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
59 60
60static struct task_struct *rfcomm_thread; 61static struct task_struct *rfcomm_thread;
@@ -812,7 +813,10 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
812 pn->credits = 0; 813 pn->credits = 0;
813 } 814 }
814 815
815 pn->mtu = htobs(d->mtu); 816 if (cr && channel_mtu >= 0)
817 pn->mtu = htobs(channel_mtu);
818 else
819 pn->mtu = htobs(d->mtu);
816 820
817 *ptr = __fcs(buf); ptr++; 821 *ptr = __fcs(buf); ptr++;
818 822
@@ -1243,7 +1247,10 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn)
1243 1247
1244 d->priority = pn->priority; 1248 d->priority = pn->priority;
1245 1249
1246 d->mtu = s->mtu = btohs(pn->mtu); 1250 d->mtu = btohs(pn->mtu);
1251
1252 if (cr && d->mtu > s->mtu)
1253 d->mtu = s->mtu;
1247 1254
1248 return 0; 1255 return 0;
1249} 1256}
@@ -1770,6 +1777,11 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1770 s = rfcomm_session_add(nsock, BT_OPEN); 1777 s = rfcomm_session_add(nsock, BT_OPEN);
1771 if (s) { 1778 if (s) {
1772 rfcomm_session_hold(s); 1779 rfcomm_session_hold(s);
1780
1781 /* We should adjust MTU on incoming sessions.
1782 * L2CAP MTU minus UIH header and FCS. */
1783 s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5;
1784
1773 rfcomm_schedule(RFCOMM_SCHED_RX); 1785 rfcomm_schedule(RFCOMM_SCHED_RX);
1774 } else 1786 } else
1775 sock_release(nsock); 1787 sock_release(nsock);
@@ -2087,6 +2099,9 @@ module_exit(rfcomm_exit);
2087module_param(disable_cfc, bool, 0644); 2099module_param(disable_cfc, bool, 0644);
2088MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); 2100MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control");
2089 2101
2102module_param(channel_mtu, int, 0644);
2103MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel");
2104
2090module_param(l2cap_mtu, uint, 0644); 2105module_param(l2cap_mtu, uint, 0644);
2091MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); 2106MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection");
2092 2107
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 159fb8409824..4e4119a12139 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -162,12 +162,10 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
162 if (num > BR_MAX_PORTS) 162 if (num > BR_MAX_PORTS)
163 num = BR_MAX_PORTS; 163 num = BR_MAX_PORTS;
164 164
165 indices = kmalloc(num*sizeof(int), GFP_KERNEL); 165 indices = kcalloc(num, sizeof(int), GFP_KERNEL);
166 if (indices == NULL) 166 if (indices == NULL)
167 return -ENOMEM; 167 return -ENOMEM;
168 168
169 memset(indices, 0, num*sizeof(int));
170
171 get_port_ifindices(br, indices, num); 169 get_port_ifindices(br, indices, num);
172 if (copy_to_user((void __user *)args[1], indices, num*sizeof(int))) 170 if (copy_to_user((void __user *)args[1], indices, num*sizeof(int)))
173 num = -EFAULT; 171 num = -EFAULT;
@@ -327,11 +325,10 @@ static int old_deviceless(void __user *uarg)
327 325
328 if (args[2] >= 2048) 326 if (args[2] >= 2048)
329 return -ENOMEM; 327 return -ENOMEM;
330 indices = kmalloc(args[2]*sizeof(int), GFP_KERNEL); 328 indices = kcalloc(args[2], sizeof(int), GFP_KERNEL);
331 if (indices == NULL) 329 if (indices == NULL)
332 return -ENOMEM; 330 return -ENOMEM;
333 331
334 memset(indices, 0, args[2]*sizeof(int));
335 args[2] = get_bridge_ifindices(indices, args[2]); 332 args[2] = get_bridge_ifindices(indices, args[2]);
336 333
337 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) 334 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index cbc8a389a0a8..05b3de888243 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -61,6 +61,9 @@ static int brnf_filter_vlan_tagged = 1;
61#define brnf_filter_vlan_tagged 1 61#define brnf_filter_vlan_tagged 1
62#endif 62#endif
63 63
64int brnf_deferred_hooks;
65EXPORT_SYMBOL_GPL(brnf_deferred_hooks);
66
64static __be16 inline vlan_proto(const struct sk_buff *skb) 67static __be16 inline vlan_proto(const struct sk_buff *skb)
65{ 68{
66 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 69 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -890,6 +893,8 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
890 return NF_ACCEPT; 893 return NF_ACCEPT;
891 else if (ip->version == 6 && !brnf_call_ip6tables) 894 else if (ip->version == 6 && !brnf_call_ip6tables)
892 return NF_ACCEPT; 895 return NF_ACCEPT;
896 else if (!brnf_deferred_hooks)
897 return NF_ACCEPT;
893#endif 898#endif
894 if (hook == NF_IP_POST_ROUTING) 899 if (hook == NF_IP_POST_ROUTING)
895 return NF_ACCEPT; 900 return NF_ACCEPT;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 27ce1683caf5..2797e2815418 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -437,7 +437,7 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
437{ 437{
438 struct ethtool_pauseparam pauseparam; 438 struct ethtool_pauseparam pauseparam;
439 439
440 if (!dev->ethtool_ops->get_pauseparam) 440 if (!dev->ethtool_ops->set_pauseparam)
441 return -EOPNOTSUPP; 441 return -EOPNOTSUPP;
442 442
443 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) 443 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 44f6a181a754..476aa3978504 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -257,11 +257,11 @@ nodata:
257} 257}
258 258
259 259
260static void skb_drop_fraglist(struct sk_buff *skb) 260static void skb_drop_list(struct sk_buff **listp)
261{ 261{
262 struct sk_buff *list = skb_shinfo(skb)->frag_list; 262 struct sk_buff *list = *listp;
263 263
264 skb_shinfo(skb)->frag_list = NULL; 264 *listp = NULL;
265 265
266 do { 266 do {
267 struct sk_buff *this = list; 267 struct sk_buff *this = list;
@@ -270,6 +270,11 @@ static void skb_drop_fraglist(struct sk_buff *skb)
270 } while (list); 270 } while (list);
271} 271}
272 272
273static inline void skb_drop_fraglist(struct sk_buff *skb)
274{
275 skb_drop_list(&skb_shinfo(skb)->frag_list);
276}
277
273static void skb_clone_fraglist(struct sk_buff *skb) 278static void skb_clone_fraglist(struct sk_buff *skb)
274{ 279{
275 struct sk_buff *list; 280 struct sk_buff *list;
@@ -830,41 +835,75 @@ free_skb:
830 835
831int ___pskb_trim(struct sk_buff *skb, unsigned int len) 836int ___pskb_trim(struct sk_buff *skb, unsigned int len)
832{ 837{
838 struct sk_buff **fragp;
839 struct sk_buff *frag;
833 int offset = skb_headlen(skb); 840 int offset = skb_headlen(skb);
834 int nfrags = skb_shinfo(skb)->nr_frags; 841 int nfrags = skb_shinfo(skb)->nr_frags;
835 int i; 842 int i;
843 int err;
844
845 if (skb_cloned(skb) &&
846 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
847 return err;
836 848
837 for (i = 0; i < nfrags; i++) { 849 for (i = 0; i < nfrags; i++) {
838 int end = offset + skb_shinfo(skb)->frags[i].size; 850 int end = offset + skb_shinfo(skb)->frags[i].size;
839 if (end > len) { 851
840 if (skb_cloned(skb)) { 852 if (end < len) {
841 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 853 offset = end;
842 return -ENOMEM; 854 continue;
843 }
844 if (len <= offset) {
845 put_page(skb_shinfo(skb)->frags[i].page);
846 skb_shinfo(skb)->nr_frags--;
847 } else {
848 skb_shinfo(skb)->frags[i].size = len - offset;
849 }
850 } 855 }
851 offset = end; 856
857 if (len > offset)
858 skb_shinfo(skb)->frags[i++].size = len - offset;
859
860 skb_shinfo(skb)->nr_frags = i;
861
862 for (; i < nfrags; i++)
863 put_page(skb_shinfo(skb)->frags[i].page);
864
865 if (skb_shinfo(skb)->frag_list)
866 skb_drop_fraglist(skb);
867 break;
852 } 868 }
853 869
854 if (offset < len) { 870 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
871 fragp = &frag->next) {
872 int end = offset + frag->len;
873
874 if (skb_shared(frag)) {
875 struct sk_buff *nfrag;
876
877 nfrag = skb_clone(frag, GFP_ATOMIC);
878 if (unlikely(!nfrag))
879 return -ENOMEM;
880
881 nfrag->next = frag->next;
882 frag = nfrag;
883 *fragp = frag;
884 }
885
886 if (end < len) {
887 offset = end;
888 continue;
889 }
890
891 if (end > len &&
892 unlikely((err = pskb_trim(frag, len - offset))))
893 return err;
894
895 if (frag->next)
896 skb_drop_list(&frag->next);
897 break;
898 }
899
900 if (len > skb_headlen(skb)) {
855 skb->data_len -= skb->len - len; 901 skb->data_len -= skb->len - len;
856 skb->len = len; 902 skb->len = len;
857 } else { 903 } else {
858 if (len <= skb_headlen(skb)) { 904 skb->len = len;
859 skb->len = len; 905 skb->data_len = 0;
860 skb->data_len = 0; 906 skb->tail = skb->data + len;
861 skb->tail = skb->data + len;
862 if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
863 skb_drop_fraglist(skb);
864 } else {
865 skb->data_len -= skb->len - len;
866 skb->len = len;
867 }
868 } 907 }
869 908
870 return 0; 909 return 0;
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index b7c98dbcdb81..248a6b666aff 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -29,6 +29,7 @@
29#include <linux/socket.h> 29#include <linux/socket.h>
30#include <linux/rtnetlink.h> /* for BUG_TRAP */ 30#include <linux/rtnetlink.h> /* for BUG_TRAP */
31#include <net/tcp.h> 31#include <net/tcp.h>
32#include <net/netdma.h>
32 33
33#define NET_DMA_DEFAULT_COPYBREAK 4096 34#define NET_DMA_DEFAULT_COPYBREAK 4096
34 35
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index 6048373c7186..b44c45504fb6 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -26,4 +26,6 @@ extern void dccp_feat_clean(struct dccp_minisock *dmsk);
26extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk); 26extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk);
27extern int dccp_feat_init(struct dccp_minisock *dmsk); 27extern int dccp_feat_init(struct dccp_minisock *dmsk);
28 28
29extern int dccp_feat_default_sequence_window;
30
29#endif /* _DCCP_FEAT_H */ 31#endif /* _DCCP_FEAT_H */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index c3073e7e81d3..7f56f7e8f571 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -504,8 +504,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
504 ireq = inet_rsk(req); 504 ireq = inet_rsk(req);
505 ireq->loc_addr = daddr; 505 ireq->loc_addr = daddr;
506 ireq->rmt_addr = saddr; 506 ireq->rmt_addr = saddr;
507 req->rcv_wnd = 100; /* Fake, option parsing will get the 507 req->rcv_wnd = dccp_feat_default_sequence_window;
508 right value */
509 ireq->opt = NULL; 508 ireq->opt = NULL;
510 509
511 /* 510 /*
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index ff42bc43263d..9f3d4d7cd0bf 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -31,6 +31,7 @@
31 31
32#include "dccp.h" 32#include "dccp.h"
33#include "ipv6.h" 33#include "ipv6.h"
34#include "feat.h"
34 35
35/* Socket used for sending RSTs and ACKs */ 36/* Socket used for sending RSTs and ACKs */
36static struct socket *dccp_v6_ctl_socket; 37static struct socket *dccp_v6_ctl_socket;
@@ -707,8 +708,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
707 ireq = inet_rsk(req); 708 ireq = inet_rsk(req);
708 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); 709 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
709 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); 710 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
710 req->rcv_wnd = 100; /* Fake, option parsing will get the 711 req->rcv_wnd = dccp_feat_default_sequence_window;
711 right value */
712 ireq6->pktopts = NULL; 712 ireq6->pktopts = NULL;
713 713
714 if (ipv6_opt_accepted(sk, skb) || 714 if (ipv6_opt_accepted(sk, skb) ||
diff --git a/net/dccp/options.c b/net/dccp/options.c
index c3cda1e39aa8..daf72bb671f0 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -29,6 +29,8 @@ int dccp_feat_default_ack_ratio = DCCPF_INITIAL_ACK_RATIO;
29int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; 29int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR;
30int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; 30int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT;
31 31
32EXPORT_SYMBOL_GPL(dccp_feat_default_sequence_window);
33
32void dccp_minisock_init(struct dccp_minisock *dmsk) 34void dccp_minisock_init(struct dccp_minisock *dmsk)
33{ 35{
34 dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window; 36 dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 98a25208440d..476455fbdb03 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -413,11 +413,7 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void)
413{ 413{
414 struct dn_ifaddr *ifa; 414 struct dn_ifaddr *ifa;
415 415
416 ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); 416 ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
417
418 if (ifa) {
419 memset(ifa, 0, sizeof(*ifa));
420 }
421 417
422 return ifa; 418 return ifa;
423} 419}
@@ -1105,10 +1101,9 @@ struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1105 return NULL; 1101 return NULL;
1106 1102
1107 *err = -ENOBUFS; 1103 *err = -ENOBUFS;
1108 if ((dn_db = kmalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) 1104 if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
1109 return NULL; 1105 return NULL;
1110 1106
1111 memset(dn_db, 0, sizeof(struct dn_dev));
1112 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1107 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1113 smp_wmb(); 1108 smp_wmb();
1114 dev->dn_ptr = dn_db; 1109 dev->dn_ptr = dn_db;
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 0375077391b7..fa20e2efcfc1 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -283,11 +283,10 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
283 goto err_inval; 283 goto err_inval;
284 } 284 }
285 285
286 fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); 286 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL);
287 err = -ENOBUFS; 287 err = -ENOBUFS;
288 if (fi == NULL) 288 if (fi == NULL)
289 goto failure; 289 goto failure;
290 memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct dn_fib_nh));
291 290
292 fi->fib_protocol = r->rtm_protocol; 291 fi->fib_protocol = r->rtm_protocol;
293 fi->fib_nhs = nhs; 292 fi->fib_nhs = nhs;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 5ce9c9e0565c..ff0ebe99137d 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -580,12 +580,11 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file)
580{ 580{
581 struct seq_file *seq; 581 struct seq_file *seq;
582 int rc = -ENOMEM; 582 int rc = -ENOMEM;
583 struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 583 struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
584 584
585 if (!s) 585 if (!s)
586 goto out; 586 goto out;
587 587
588 memset(s, 0, sizeof(*s));
589 rc = seq_open(file, &dn_neigh_seq_ops); 588 rc = seq_open(file, &dn_neigh_seq_ops);
590 if (rc) 589 if (rc)
591 goto out_kfree; 590 goto out_kfree;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 22f321d9bf9d..6986be754ef2 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -151,10 +151,9 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
151 } 151 }
152 } 152 }
153 153
154 new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); 154 new_r = kzalloc(sizeof(*new_r), GFP_KERNEL);
155 if (!new_r) 155 if (!new_r)
156 return -ENOMEM; 156 return -ENOMEM;
157 memset(new_r, 0, sizeof(*new_r));
158 157
159 if (rta[RTA_SRC-1]) 158 if (rta[RTA_SRC-1])
160 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2); 159 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 37d9d0a1ac8c..e926c952e363 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -158,12 +158,10 @@ static void dn_rehash_zone(struct dn_zone *dz)
158 break; 158 break;
159 } 159 }
160 160
161 ht = kmalloc(new_divisor*sizeof(struct dn_fib_node*), GFP_KERNEL); 161 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
162
163 if (ht == NULL) 162 if (ht == NULL)
164 return; 163 return;
165 164
166 memset(ht, 0, new_divisor*sizeof(struct dn_fib_node *));
167 write_lock_bh(&dn_fib_tables_lock); 165 write_lock_bh(&dn_fib_tables_lock);
168 old_ht = dz->dz_hash; 166 old_ht = dz->dz_hash;
169 dz->dz_hash = ht; 167 dz->dz_hash = ht;
@@ -184,11 +182,10 @@ static void dn_free_node(struct dn_fib_node *f)
184static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) 182static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
185{ 183{
186 int i; 184 int i;
187 struct dn_zone *dz = kmalloc(sizeof(struct dn_zone), GFP_KERNEL); 185 struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL);
188 if (!dz) 186 if (!dz)
189 return NULL; 187 return NULL;
190 188
191 memset(dz, 0, sizeof(struct dn_zone));
192 if (z) { 189 if (z) {
193 dz->dz_divisor = 16; 190 dz->dz_divisor = 16;
194 dz->dz_hashmask = 0x0F; 191 dz->dz_hashmask = 0x0F;
@@ -197,14 +194,12 @@ static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
197 dz->dz_hashmask = 0; 194 dz->dz_hashmask = 0;
198 } 195 }
199 196
200 dz->dz_hash = kmalloc(dz->dz_divisor*sizeof(struct dn_fib_node *), GFP_KERNEL); 197 dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL);
201
202 if (!dz->dz_hash) { 198 if (!dz->dz_hash) {
203 kfree(dz); 199 kfree(dz);
204 return NULL; 200 return NULL;
205 } 201 }
206 202
207 memset(dz->dz_hash, 0, dz->dz_divisor*sizeof(struct dn_fib_node*));
208 dz->dz_order = z; 203 dz->dz_order = z;
209 dz->dz_mask = dnet_make_mask(z); 204 dz->dz_mask = dnet_make_mask(z);
210 205
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 309ae4c6549a..4d66aac13483 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -673,12 +673,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
673 edev = dev->ec_ptr; 673 edev = dev->ec_ptr;
674 if (edev == NULL) { 674 if (edev == NULL) {
675 /* Magic up a new one. */ 675 /* Magic up a new one. */
676 edev = kmalloc(sizeof(struct ec_device), GFP_KERNEL); 676 edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL);
677 if (edev == NULL) { 677 if (edev == NULL) {
678 err = -ENOMEM; 678 err = -ENOMEM;
679 break; 679 break;
680 } 680 }
681 memset(edev, 0, sizeof(struct ec_device));
682 dev->ec_ptr = edev; 681 dev->ec_ptr = edev;
683 } else 682 } else
684 net2dev_map[edev->net] = NULL; 683 net2dev_map[edev->net] = NULL;
diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig
index dbb08528ddf5..f7e84e9d13ad 100644
--- a/net/ieee80211/Kconfig
+++ b/net/ieee80211/Kconfig
@@ -58,6 +58,7 @@ config IEEE80211_CRYPT_TKIP
58 depends on IEEE80211 && NET_RADIO 58 depends on IEEE80211 && NET_RADIO
59 select CRYPTO 59 select CRYPTO
60 select CRYPTO_MICHAEL_MIC 60 select CRYPTO_MICHAEL_MIC
61 select CRC32
61 ---help--- 62 ---help---
62 Include software based cipher suites in support of IEEE 802.11i 63 Include software based cipher suites in support of IEEE 802.11i
63 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled 64 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c
index cb71d794a7d1..5ed0a98b2d76 100644
--- a/net/ieee80211/ieee80211_crypt.c
+++ b/net/ieee80211/ieee80211_crypt.c
@@ -110,11 +110,10 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
110 unsigned long flags; 110 unsigned long flags;
111 struct ieee80211_crypto_alg *alg; 111 struct ieee80211_crypto_alg *alg;
112 112
113 alg = kmalloc(sizeof(*alg), GFP_KERNEL); 113 alg = kzalloc(sizeof(*alg), GFP_KERNEL);
114 if (alg == NULL) 114 if (alg == NULL)
115 return -ENOMEM; 115 return -ENOMEM;
116 116
117 memset(alg, 0, sizeof(*alg));
118 alg->ops = ops; 117 alg->ops = ops;
119 118
120 spin_lock_irqsave(&ieee80211_crypto_lock, flags); 119 spin_lock_irqsave(&ieee80211_crypto_lock, flags);
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
index 492647382ad0..ed90a8af1444 100644
--- a/net/ieee80211/ieee80211_crypt_ccmp.c
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -76,10 +76,9 @@ static void *ieee80211_ccmp_init(int key_idx)
76{ 76{
77 struct ieee80211_ccmp_data *priv; 77 struct ieee80211_ccmp_data *priv;
78 78
79 priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 79 priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
80 if (priv == NULL) 80 if (priv == NULL)
81 goto fail; 81 goto fail;
82 memset(priv, 0, sizeof(*priv));
83 priv->key_idx = key_idx; 82 priv->key_idx = key_idx;
84 83
85 priv->tfm = crypto_alloc_tfm("aes", 0); 84 priv->tfm = crypto_alloc_tfm("aes", 0);
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index c5a87724aabe..0ebf235f6939 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -39,10 +39,9 @@ static void *prism2_wep_init(int keyidx)
39{ 39{
40 struct prism2_wep_data *priv; 40 struct prism2_wep_data *priv;
41 41
42 priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 42 priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
43 if (priv == NULL) 43 if (priv == NULL)
44 goto fail; 44 goto fail;
45 memset(priv, 0, sizeof(*priv));
46 priv->key_idx = keyidx; 45 priv->key_idx = keyidx;
47 46
48 priv->tfm = crypto_alloc_tfm("arc4", 0); 47 priv->tfm = crypto_alloc_tfm("arc4", 0);
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index a78c4f845f66..5cb9cfd35397 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -369,11 +369,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
369 struct ieee80211_crypt_data *new_crypt; 369 struct ieee80211_crypt_data *new_crypt;
370 370
371 /* take WEP into use */ 371 /* take WEP into use */
372 new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data), 372 new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
373 GFP_KERNEL); 373 GFP_KERNEL);
374 if (new_crypt == NULL) 374 if (new_crypt == NULL)
375 return -ENOMEM; 375 return -ENOMEM;
376 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
377 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 376 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
378 if (!new_crypt->ops) { 377 if (!new_crypt->ops) {
379 request_module("ieee80211_crypt_wep"); 378 request_module("ieee80211_crypt_wep");
@@ -616,13 +615,11 @@ int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
616 615
617 ieee80211_crypt_delayed_deinit(ieee, crypt); 616 ieee80211_crypt_delayed_deinit(ieee, crypt);
618 617
619 new_crypt = (struct ieee80211_crypt_data *) 618 new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
620 kmalloc(sizeof(*new_crypt), GFP_KERNEL);
621 if (new_crypt == NULL) { 619 if (new_crypt == NULL) {
622 ret = -ENOMEM; 620 ret = -ENOMEM;
623 goto done; 621 goto done;
624 } 622 }
625 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
626 new_crypt->ops = ops; 623 new_crypt->ops = ops;
627 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 624 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
628 new_crypt->priv = new_crypt->ops->init(idx); 625 new_crypt->priv = new_crypt->ops->init(idx);
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index ebc33ca6e692..4cef39e171d0 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -116,6 +116,16 @@ ieee80211softmac_auth_queue(void *data)
116 kfree(auth); 116 kfree(auth);
117} 117}
118 118
119/* Sends a response to an auth challenge (for shared key auth). */
120static void
121ieee80211softmac_auth_challenge_response(void *_aq)
122{
123 struct ieee80211softmac_auth_queue_item *aq = _aq;
124
125 /* Send our response */
126 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
127}
128
119/* Handle the auth response from the AP 129/* Handle the auth response from the AP
120 * This should be registered with ieee80211 as handle_auth 130 * This should be registered with ieee80211 as handle_auth
121 */ 131 */
@@ -197,24 +207,30 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
197 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE: 207 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE:
198 /* Check to make sure we have a challenge IE */ 208 /* Check to make sure we have a challenge IE */
199 data = (u8 *)auth->info_element; 209 data = (u8 *)auth->info_element;
200 if(*data++ != MFIE_TYPE_CHALLENGE){ 210 if (*data++ != MFIE_TYPE_CHALLENGE) {
201 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n"); 211 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n");
202 break; 212 break;
203 } 213 }
204 /* Save the challenge */ 214 /* Save the challenge */
205 spin_lock_irqsave(&mac->lock, flags); 215 spin_lock_irqsave(&mac->lock, flags);
206 net->challenge_len = *data++; 216 net->challenge_len = *data++;
207 if(net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) 217 if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
208 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; 218 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
209 if(net->challenge != NULL) 219 if (net->challenge != NULL)
210 kfree(net->challenge); 220 kfree(net->challenge);
211 net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC); 221 net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC);
212 memcpy(net->challenge, data, net->challenge_len); 222 memcpy(net->challenge, data, net->challenge_len);
213 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; 223 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
214 spin_unlock_irqrestore(&mac->lock, flags);
215 224
216 /* Send our response */ 225 /* We reuse the work struct from the auth request here.
217 ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); 226 * It is safe to do so as each one is per-request, and
227 * at this point (dealing with authentication response)
228 * we have obviously already sent the initial auth
229 * request. */
230 cancel_delayed_work(&aq->work);
231 INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
232 schedule_work(&aq->work);
233 spin_unlock_irqrestore(&mac->lock, flags);
218 return 0; 234 return 0;
219 case IEEE80211SOFTMAC_AUTH_SHARED_PASS: 235 case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
220 kfree(net->challenge); 236 kfree(net->challenge);
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c
index 8cc8b20f5cda..6ae5a1dc7956 100644
--- a/net/ieee80211/softmac/ieee80211softmac_io.c
+++ b/net/ieee80211/softmac/ieee80211softmac_io.c
@@ -96,8 +96,7 @@ ieee80211softmac_alloc_mgt(u32 size)
96 if(size > IEEE80211_DATA_LEN) 96 if(size > IEEE80211_DATA_LEN)
97 return NULL; 97 return NULL;
98 /* Allocate the frame */ 98 /* Allocate the frame */
99 data = kmalloc(size, GFP_ATOMIC); 99 data = kzalloc(size, GFP_ATOMIC);
100 memset(data, 0, size);
101 return data; 100 return data;
102} 101}
103 102
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 8e748be36c5a..1366bc6ce6a5 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -215,12 +215,10 @@ static int ah_init_state(struct xfrm_state *x)
215 if (x->encap) 215 if (x->encap)
216 goto error; 216 goto error;
217 217
218 ahp = kmalloc(sizeof(*ahp), GFP_KERNEL); 218 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
219 if (ahp == NULL) 219 if (ahp == NULL)
220 return -ENOMEM; 220 return -ENOMEM;
221 221
222 memset(ahp, 0, sizeof(*ahp));
223
224 ahp->key = x->aalg->alg_key; 222 ahp->key = x->aalg->alg_key;
225 ahp->key_len = (x->aalg->alg_key_len+7)/8; 223 ahp->key_len = (x->aalg->alg_key_len+7)/8;
226 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 224 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 7b51b3bdb548..c8a3723bc001 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1372,12 +1372,11 @@ static int arp_seq_open(struct inode *inode, struct file *file)
1372{ 1372{
1373 struct seq_file *seq; 1373 struct seq_file *seq;
1374 int rc = -ENOMEM; 1374 int rc = -ENOMEM;
1375 struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1375 struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1376 1376
1377 if (!s) 1377 if (!s)
1378 goto out; 1378 goto out;
1379 1379
1380 memset(s, 0, sizeof(*s));
1381 rc = seq_open(file, &arp_seq_ops); 1380 rc = seq_open(file, &arp_seq_ops);
1382 if (rc) 1381 if (rc)
1383 goto out_kfree; 1382 goto out_kfree;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index a7c65e9e5ec9..a6cc31d911eb 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -93,10 +93,9 @@ static void devinet_sysctl_unregister(struct ipv4_devconf *p);
93 93
94static struct in_ifaddr *inet_alloc_ifa(void) 94static struct in_ifaddr *inet_alloc_ifa(void)
95{ 95{
96 struct in_ifaddr *ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); 96 struct in_ifaddr *ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
97 97
98 if (ifa) { 98 if (ifa) {
99 memset(ifa, 0, sizeof(*ifa));
100 INIT_RCU_HEAD(&ifa->rcu_head); 99 INIT_RCU_HEAD(&ifa->rcu_head);
101 } 100 }
102 101
@@ -140,10 +139,9 @@ struct in_device *inetdev_init(struct net_device *dev)
140 139
141 ASSERT_RTNL(); 140 ASSERT_RTNL();
142 141
143 in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL); 142 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
144 if (!in_dev) 143 if (!in_dev)
145 goto out; 144 goto out;
146 memset(in_dev, 0, sizeof(*in_dev));
147 INIT_RCU_HEAD(&in_dev->rcu_head); 145 INIT_RCU_HEAD(&in_dev->rcu_head);
148 memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf)); 146 memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf));
149 in_dev->cnf.sysctl = NULL; 147 in_dev->cnf.sysctl = NULL;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 4e112738b3fa..fc2f8ce441de 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -316,12 +316,10 @@ static int esp_init_state(struct xfrm_state *x)
316 if (x->ealg == NULL) 316 if (x->ealg == NULL)
317 goto error; 317 goto error;
318 318
319 esp = kmalloc(sizeof(*esp), GFP_KERNEL); 319 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
320 if (esp == NULL) 320 if (esp == NULL)
321 return -ENOMEM; 321 return -ENOMEM;
322 322
323 memset(esp, 0, sizeof(*esp));
324
325 if (x->aalg) { 323 if (x->aalg) {
326 struct xfrm_algo_desc *aalg_desc; 324 struct xfrm_algo_desc *aalg_desc;
327 325
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 3c1d32ad35f2..72c633b357cf 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -204,11 +204,10 @@ static struct fn_zone *
204fn_new_zone(struct fn_hash *table, int z) 204fn_new_zone(struct fn_hash *table, int z)
205{ 205{
206 int i; 206 int i;
207 struct fn_zone *fz = kmalloc(sizeof(struct fn_zone), GFP_KERNEL); 207 struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
208 if (!fz) 208 if (!fz)
209 return NULL; 209 return NULL;
210 210
211 memset(fz, 0, sizeof(struct fn_zone));
212 if (z) { 211 if (z) {
213 fz->fz_divisor = 16; 212 fz->fz_divisor = 16;
214 } else { 213 } else {
@@ -1046,7 +1045,7 @@ static int fib_seq_open(struct inode *inode, struct file *file)
1046{ 1045{
1047 struct seq_file *seq; 1046 struct seq_file *seq;
1048 int rc = -ENOMEM; 1047 int rc = -ENOMEM;
1049 struct fib_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1048 struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1050 1049
1051 if (!s) 1050 if (!s)
1052 goto out; 1051 goto out;
@@ -1057,7 +1056,6 @@ static int fib_seq_open(struct inode *inode, struct file *file)
1057 1056
1058 seq = file->private_data; 1057 seq = file->private_data;
1059 seq->private = s; 1058 seq->private = s;
1060 memset(s, 0, sizeof(*s));
1061out: 1059out:
1062 return rc; 1060 return rc;
1063out_kfree: 1061out_kfree:
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 773b12ba4e3c..79b04718bdfd 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -196,10 +196,9 @@ int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
196 } 196 }
197 } 197 }
198 198
199 new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); 199 new_r = kzalloc(sizeof(*new_r), GFP_KERNEL);
200 if (!new_r) 200 if (!new_r)
201 return -ENOMEM; 201 return -ENOMEM;
202 memset(new_r, 0, sizeof(*new_r));
203 202
204 if (rta[RTA_SRC-1]) 203 if (rta[RTA_SRC-1])
205 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4); 204 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5f87533684d5..9be53a8e72c3 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -709,11 +709,10 @@ fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
709 goto failure; 709 goto failure;
710 } 710 }
711 711
712 fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 712 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
713 if (fi == NULL) 713 if (fi == NULL)
714 goto failure; 714 goto failure;
715 fib_info_cnt++; 715 fib_info_cnt++;
716 memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct fib_nh));
717 716
718 fi->fib_protocol = r->rtm_protocol; 717 fi->fib_protocol = r->rtm_protocol;
719 718
@@ -962,10 +961,6 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
962 rtm->rtm_protocol = fi->fib_protocol; 961 rtm->rtm_protocol = fi->fib_protocol;
963 if (fi->fib_priority) 962 if (fi->fib_priority)
964 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); 963 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
965#ifdef CONFIG_NET_CLS_ROUTE
966 if (fi->fib_nh[0].nh_tclassid)
967 RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid);
968#endif
969 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 964 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
970 goto rtattr_failure; 965 goto rtattr_failure;
971 if (fi->fib_prefsrc) 966 if (fi->fib_prefsrc)
@@ -975,6 +970,10 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
975 RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw); 970 RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw);
976 if (fi->fib_nh->nh_oif) 971 if (fi->fib_nh->nh_oif)
977 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); 972 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
973#ifdef CONFIG_NET_CLS_ROUTE
974 if (fi->fib_nh[0].nh_tclassid)
975 RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid);
976#endif
978 } 977 }
979#ifdef CONFIG_IP_ROUTE_MULTIPATH 978#ifdef CONFIG_IP_ROUTE_MULTIPATH
980 if (fi->fib_nhs > 1) { 979 if (fi->fib_nhs > 1) {
@@ -993,6 +992,10 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
993 nhp->rtnh_ifindex = nh->nh_oif; 992 nhp->rtnh_ifindex = nh->nh_oif;
994 if (nh->nh_gw) 993 if (nh->nh_gw)
995 RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw); 994 RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw);
995#ifdef CONFIG_NET_CLS_ROUTE
996 if (nh->nh_tclassid)
997 RTA_PUT(skb, RTA_FLOW, 4, &nh->nh_tclassid);
998#endif
996 nhp->rtnh_len = skb->tail - (unsigned char*)nhp; 999 nhp->rtnh_len = skb->tail - (unsigned char*)nhp;
997 } endfor_nexthops(fi); 1000 } endfor_nexthops(fi);
998 mp_head->rta_type = RTA_MULTIPATH; 1001 mp_head->rta_type = RTA_MULTIPATH;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d299c8e547d6..9f4b752f5a33 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1028,10 +1028,9 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1028 * for deleted items allows change reports to use common code with 1028 * for deleted items allows change reports to use common code with
1029 * non-deleted or query-response MCA's. 1029 * non-deleted or query-response MCA's.
1030 */ 1030 */
1031 pmc = kmalloc(sizeof(*pmc), GFP_KERNEL); 1031 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1032 if (!pmc) 1032 if (!pmc)
1033 return; 1033 return;
1034 memset(pmc, 0, sizeof(*pmc));
1035 spin_lock_bh(&im->lock); 1034 spin_lock_bh(&im->lock);
1036 pmc->interface = im->interface; 1035 pmc->interface = im->interface;
1037 in_dev_hold(in_dev); 1036 in_dev_hold(in_dev);
@@ -1529,10 +1528,9 @@ static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
1529 psf_prev = psf; 1528 psf_prev = psf;
1530 } 1529 }
1531 if (!psf) { 1530 if (!psf) {
1532 psf = kmalloc(sizeof(*psf), GFP_ATOMIC); 1531 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
1533 if (!psf) 1532 if (!psf)
1534 return -ENOBUFS; 1533 return -ENOBUFS;
1535 memset(psf, 0, sizeof(*psf));
1536 psf->sf_inaddr = *psfsrc; 1534 psf->sf_inaddr = *psfsrc;
1537 if (psf_prev) { 1535 if (psf_prev) {
1538 psf_prev->sf_next = psf; 1536 psf_prev->sf_next = psf;
@@ -2380,7 +2378,7 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
2380{ 2378{
2381 struct seq_file *seq; 2379 struct seq_file *seq;
2382 int rc = -ENOMEM; 2380 int rc = -ENOMEM;
2383 struct igmp_mc_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 2381 struct igmp_mc_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
2384 2382
2385 if (!s) 2383 if (!s)
2386 goto out; 2384 goto out;
@@ -2390,7 +2388,6 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
2390 2388
2391 seq = file->private_data; 2389 seq = file->private_data;
2392 seq->private = s; 2390 seq->private = s;
2393 memset(s, 0, sizeof(*s));
2394out: 2391out:
2395 return rc; 2392 return rc;
2396out_kfree: 2393out_kfree:
@@ -2555,7 +2552,7 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
2555{ 2552{
2556 struct seq_file *seq; 2553 struct seq_file *seq;
2557 int rc = -ENOMEM; 2554 int rc = -ENOMEM;
2558 struct igmp_mcf_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 2555 struct igmp_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
2559 2556
2560 if (!s) 2557 if (!s)
2561 goto out; 2558 goto out;
@@ -2565,7 +2562,6 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
2565 2562
2566 seq = file->private_data; 2563 seq = file->private_data;
2567 seq->private = s; 2564 seq->private = s;
2568 memset(s, 0, sizeof(*s));
2569out: 2565out:
2570 return rc; 2566 return rc;
2571out_kfree: 2567out_kfree:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8e7e41b66c79..492858e6faf0 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -909,11 +909,10 @@ static int __init inet_diag_init(void)
909 sizeof(struct inet_diag_handler *)); 909 sizeof(struct inet_diag_handler *));
910 int err = -ENOMEM; 910 int err = -ENOMEM;
911 911
912 inet_diag_table = kmalloc(inet_diag_table_size, GFP_KERNEL); 912 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
913 if (!inet_diag_table) 913 if (!inet_diag_table)
914 goto out; 914 goto out;
915 915
916 memset(inet_diag_table, 0, inet_diag_table_size);
917 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv, 916 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv,
918 THIS_MODULE); 917 THIS_MODULE);
919 if (idiagnl == NULL) 918 if (idiagnl == NULL)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6ff9b10d9563..0f9b3a31997b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -617,7 +617,6 @@ static int ipgre_rcv(struct sk_buff *skb)
617 skb->mac.raw = skb->nh.raw; 617 skb->mac.raw = skb->nh.raw;
618 skb->nh.raw = __pskb_pull(skb, offset); 618 skb->nh.raw = __pskb_pull(skb, offset);
619 skb_postpull_rcsum(skb, skb->h.raw, offset); 619 skb_postpull_rcsum(skb, skb->h.raw, offset);
620 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
621 skb->pkt_type = PACKET_HOST; 620 skb->pkt_type = PACKET_HOST;
622#ifdef CONFIG_NET_IPGRE_BROADCAST 621#ifdef CONFIG_NET_IPGRE_BROADCAST
623 if (MULTICAST(iph->daddr)) { 622 if (MULTICAST(iph->daddr)) {
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index e1a7dba2fa8a..212734ca238f 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -428,6 +428,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
428 goto drop; 428 goto drop;
429 } 429 }
430 430
431 /* Remove any debris in the socket control block */
432 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
433
431 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, 434 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL,
432 ip_rcv_finish); 435 ip_rcv_finish);
433 436
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index cbcae6544622..406056edc02b 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -256,7 +256,6 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
256 256
257 if (!opt) { 257 if (!opt) {
258 opt = &(IPCB(skb)->opt); 258 opt = &(IPCB(skb)->opt);
259 memset(opt, 0, sizeof(struct ip_options));
260 iph = skb->nh.raw; 259 iph = skb->nh.raw;
261 opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr); 260 opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr);
262 optptr = iph + sizeof(struct iphdr); 261 optptr = iph + sizeof(struct iphdr);
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 8a8b5cf2f7fe..a0c28b2b756e 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -410,11 +410,10 @@ static int ipcomp_init_state(struct xfrm_state *x)
410 goto out; 410 goto out;
411 411
412 err = -ENOMEM; 412 err = -ENOMEM;
413 ipcd = kmalloc(sizeof(*ipcd), GFP_KERNEL); 413 ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
414 if (!ipcd) 414 if (!ipcd)
415 goto out; 415 goto out;
416 416
417 memset(ipcd, 0, sizeof(*ipcd));
418 x->props.header_len = 0; 417 x->props.header_len = 0;
419 if (x->props.mode) 418 if (x->props.mode)
420 x->props.header_len += sizeof(struct iphdr); 419 x->props.header_len += sizeof(struct iphdr);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 3291d5192aad..76ab50b0d6ef 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -487,7 +487,6 @@ static int ipip_rcv(struct sk_buff *skb)
487 487
488 skb->mac.raw = skb->nh.raw; 488 skb->mac.raw = skb->nh.raw;
489 skb->nh.raw = skb->data; 489 skb->nh.raw = skb->data;
490 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
491 skb->protocol = htons(ETH_P_IP); 490 skb->protocol = htons(ETH_P_IP);
492 skb->pkt_type = PACKET_HOST; 491 skb->pkt_type = PACKET_HOST;
493 492
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ba33f8621c67..85893eef6b16 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1461,7 +1461,6 @@ int pim_rcv_v1(struct sk_buff * skb)
1461 skb_pull(skb, (u8*)encap - skb->data); 1461 skb_pull(skb, (u8*)encap - skb->data);
1462 skb->nh.iph = (struct iphdr *)skb->data; 1462 skb->nh.iph = (struct iphdr *)skb->data;
1463 skb->dev = reg_dev; 1463 skb->dev = reg_dev;
1464 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1465 skb->protocol = htons(ETH_P_IP); 1464 skb->protocol = htons(ETH_P_IP);
1466 skb->ip_summed = 0; 1465 skb->ip_summed = 0;
1467 skb->pkt_type = PACKET_HOST; 1466 skb->pkt_type = PACKET_HOST;
@@ -1517,7 +1516,6 @@ static int pim_rcv(struct sk_buff * skb)
1517 skb_pull(skb, (u8*)encap - skb->data); 1516 skb_pull(skb, (u8*)encap - skb->data);
1518 skb->nh.iph = (struct iphdr *)skb->data; 1517 skb->nh.iph = (struct iphdr *)skb->data;
1519 skb->dev = reg_dev; 1518 skb->dev = reg_dev;
1520 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1521 skb->protocol = htons(ETH_P_IP); 1519 skb->protocol = htons(ETH_P_IP);
1522 skb->ip_summed = 0; 1520 skb->ip_summed = 0;
1523 skb->pkt_type = PACKET_HOST; 1521 skb->pkt_type = PACKET_HOST;
@@ -1580,6 +1578,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1580 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); 1578 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1581 1579
1582 if (cache==NULL) { 1580 if (cache==NULL) {
1581 struct sk_buff *skb2;
1583 struct net_device *dev; 1582 struct net_device *dev;
1584 int vif; 1583 int vif;
1585 1584
@@ -1593,12 +1592,18 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1593 read_unlock(&mrt_lock); 1592 read_unlock(&mrt_lock);
1594 return -ENODEV; 1593 return -ENODEV;
1595 } 1594 }
1596 skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); 1595 skb2 = skb_clone(skb, GFP_ATOMIC);
1597 skb->nh.iph->ihl = sizeof(struct iphdr)>>2; 1596 if (!skb2) {
1598 skb->nh.iph->saddr = rt->rt_src; 1597 read_unlock(&mrt_lock);
1599 skb->nh.iph->daddr = rt->rt_dst; 1598 return -ENOMEM;
1600 skb->nh.iph->version = 0; 1599 }
1601 err = ipmr_cache_unresolved(vif, skb); 1600
1601 skb2->nh.raw = skb_push(skb2, sizeof(struct iphdr));
1602 skb2->nh.iph->ihl = sizeof(struct iphdr)>>2;
1603 skb2->nh.iph->saddr = rt->rt_src;
1604 skb2->nh.iph->daddr = rt->rt_dst;
1605 skb2->nh.iph->version = 0;
1606 err = ipmr_cache_unresolved(vif, skb2);
1602 read_unlock(&mrt_lock); 1607 read_unlock(&mrt_lock);
1603 return err; 1608 return err;
1604 } 1609 }
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f28ec6882162..6a28fafe910c 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -735,12 +735,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
735 if (atype != RTN_LOCAL && atype != RTN_UNICAST) 735 if (atype != RTN_LOCAL && atype != RTN_UNICAST)
736 return -EINVAL; 736 return -EINVAL;
737 737
738 dest = kmalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 738 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
739 if (dest == NULL) { 739 if (dest == NULL) {
740 IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n"); 740 IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n");
741 return -ENOMEM; 741 return -ENOMEM;
742 } 742 }
743 memset(dest, 0, sizeof(struct ip_vs_dest));
744 743
745 dest->protocol = svc->protocol; 744 dest->protocol = svc->protocol;
746 dest->vaddr = svc->addr; 745 dest->vaddr = svc->addr;
@@ -1050,14 +1049,12 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1050 goto out_mod_dec; 1049 goto out_mod_dec;
1051 } 1050 }
1052 1051
1053 svc = (struct ip_vs_service *) 1052 svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
1054 kmalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
1055 if (svc == NULL) { 1053 if (svc == NULL) {
1056 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); 1054 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
1057 ret = -ENOMEM; 1055 ret = -ENOMEM;
1058 goto out_err; 1056 goto out_err;
1059 } 1057 }
1060 memset(svc, 0, sizeof(struct ip_vs_service));
1061 1058
1062 /* I'm the first user of the service */ 1059 /* I'm the first user of the service */
1063 atomic_set(&svc->usecnt, 1); 1060 atomic_set(&svc->usecnt, 1);
@@ -1797,7 +1794,7 @@ static int ip_vs_info_open(struct inode *inode, struct file *file)
1797{ 1794{
1798 struct seq_file *seq; 1795 struct seq_file *seq;
1799 int rc = -ENOMEM; 1796 int rc = -ENOMEM;
1800 struct ip_vs_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); 1797 struct ip_vs_iter *s = kzalloc(sizeof(*s), GFP_KERNEL);
1801 1798
1802 if (!s) 1799 if (!s)
1803 goto out; 1800 goto out;
@@ -1808,7 +1805,6 @@ static int ip_vs_info_open(struct inode *inode, struct file *file)
1808 1805
1809 seq = file->private_data; 1806 seq = file->private_data;
1810 seq->private = s; 1807 seq->private = s;
1811 memset(s, 0, sizeof(*s));
1812out: 1808out:
1813 return rc; 1809 return rc;
1814out_kfree: 1810out_kfree:
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index 4c1940381ba0..7d68b80c4c19 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -123,11 +123,10 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
123{ 123{
124 struct ip_vs_estimator *est; 124 struct ip_vs_estimator *est;
125 125
126 est = kmalloc(sizeof(*est), GFP_KERNEL); 126 est = kzalloc(sizeof(*est), GFP_KERNEL);
127 if (est == NULL) 127 if (est == NULL)
128 return -ENOMEM; 128 return -ENOMEM;
129 129
130 memset(est, 0, sizeof(*est));
131 est->stats = stats; 130 est->stats = stats;
132 est->last_conns = stats->conns; 131 est->last_conns = stats->conns;
133 est->cps = stats->cps<<10; 132 est->cps = stats->cps<<10;
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
index af35235672d5..9a39e2969712 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
@@ -1200,7 +1200,7 @@ static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct,
1200 tuple.dst.protonum = IPPROTO_TCP; 1200 tuple.dst.protonum = IPPROTO_TCP;
1201 1201
1202 exp = __ip_conntrack_expect_find(&tuple); 1202 exp = __ip_conntrack_expect_find(&tuple);
1203 if (exp->master == ct) 1203 if (exp && exp->master == ct)
1204 return exp; 1204 return exp;
1205 return NULL; 1205 return NULL;
1206} 1206}
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 7bd3c22003a2..7a9fa04a467a 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -534,6 +534,8 @@ static struct nf_hook_ops ip_conntrack_ops[] = {
534 534
535/* Sysctl support */ 535/* Sysctl support */
536 536
537int ip_conntrack_checksum = 1;
538
537#ifdef CONFIG_SYSCTL 539#ifdef CONFIG_SYSCTL
538 540
539/* From ip_conntrack_core.c */ 541/* From ip_conntrack_core.c */
@@ -568,8 +570,6 @@ extern unsigned int ip_ct_generic_timeout;
568static int log_invalid_proto_min = 0; 570static int log_invalid_proto_min = 0;
569static int log_invalid_proto_max = 255; 571static int log_invalid_proto_max = 255;
570 572
571int ip_conntrack_checksum = 1;
572
573static struct ctl_table_header *ip_ct_sysctl_header; 573static struct ctl_table_header *ip_ct_sysctl_header;
574 574
575static ctl_table ip_ct_sysctl_table[] = { 575static ctl_table ip_ct_sysctl_table[] = {
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index 0b1b416759cc..18b7fbdccb61 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -1255,9 +1255,9 @@ static int help(struct sk_buff **pskb,
1255 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); 1255 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
1256 1256
1257 /* SNMP replies and originating SNMP traps get mangled */ 1257 /* SNMP replies and originating SNMP traps get mangled */
1258 if (udph->source == ntohs(SNMP_PORT) && dir != IP_CT_DIR_REPLY) 1258 if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
1259 return NF_ACCEPT; 1259 return NF_ACCEPT;
1260 if (udph->dest == ntohs(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) 1260 if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
1261 return NF_ACCEPT; 1261 return NF_ACCEPT;
1262 1262
1263 /* No NAT? */ 1263 /* No NAT? */
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index cbffeae3f565..d994c5f5744c 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -172,11 +172,10 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip,
172 struct clusterip_config *c; 172 struct clusterip_config *c;
173 char buffer[16]; 173 char buffer[16];
174 174
175 c = kmalloc(sizeof(*c), GFP_ATOMIC); 175 c = kzalloc(sizeof(*c), GFP_ATOMIC);
176 if (!c) 176 if (!c)
177 return NULL; 177 return NULL;
178 178
179 memset(c, 0, sizeof(*c));
180 c->dev = dev; 179 c->dev = dev;
181 c->clusterip = ip; 180 c->clusterip = ip;
182 memcpy(&c->clustermac, &i->clustermac, ETH_ALEN); 181 memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bd221ec3f81e..62b2762a2420 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -609,6 +609,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
609 if (sin) { 609 if (sin) {
610 sin->sin_family = AF_INET; 610 sin->sin_family = AF_INET;
611 sin->sin_addr.s_addr = skb->nh.iph->saddr; 611 sin->sin_addr.s_addr = skb->nh.iph->saddr;
612 sin->sin_port = 0;
612 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 613 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
613 } 614 }
614 if (inet->cmsg_flags) 615 if (inet->cmsg_flags)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a891133f00e4..f6f39e814291 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1640,10 +1640,9 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
1640 if (unlikely(afinfo == NULL)) 1640 if (unlikely(afinfo == NULL))
1641 return -EINVAL; 1641 return -EINVAL;
1642 1642
1643 s = kmalloc(sizeof(*s), GFP_KERNEL); 1643 s = kzalloc(sizeof(*s), GFP_KERNEL);
1644 if (!s) 1644 if (!s)
1645 return -ENOMEM; 1645 return -ENOMEM;
1646 memset(s, 0, sizeof(*s));
1647 s->family = afinfo->family; 1646 s->family = afinfo->family;
1648 s->seq_ops.start = tcp_seq_start; 1647 s->seq_ops.start = tcp_seq_start;
1649 s->seq_ops.next = tcp_seq_next; 1648 s->seq_ops.next = tcp_seq_next;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9bfcddad695b..f136cec96d95 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1468,11 +1468,10 @@ static int udp_seq_open(struct inode *inode, struct file *file)
1468 struct udp_seq_afinfo *afinfo = PDE(inode)->data; 1468 struct udp_seq_afinfo *afinfo = PDE(inode)->data;
1469 struct seq_file *seq; 1469 struct seq_file *seq;
1470 int rc = -ENOMEM; 1470 int rc = -ENOMEM;
1471 struct udp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1471 struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1472 1472
1473 if (!s) 1473 if (!s)
1474 goto out; 1474 goto out;
1475 memset(s, 0, sizeof(*s));
1476 s->family = afinfo->family; 1475 s->family = afinfo->family;
1477 s->seq_ops.start = udp_seq_start; 1476 s->seq_ops.start = udp_seq_start;
1478 s->seq_ops.next = udp_seq_next; 1477 s->seq_ops.next = udp_seq_next;
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index f8d880beb12f..13cafbe56ce3 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,7 +92,6 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 skb->mac.raw = memmove(skb->data - skb->mac_len, 92 skb->mac.raw = memmove(skb->data - skb->mac_len,
93 skb->mac.raw, skb->mac_len); 93 skb->mac.raw, skb->mac_len);
94 skb->nh.raw = skb->data; 94 skb->nh.raw = skb->data;
95 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
96 err = 0; 95 err = 0;
97 96
98out: 97out:
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index df8f051c0fce..25c2a9e03895 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -71,6 +71,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
71 goto out; 71 goto out;
72 } 72 }
73 73
74 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
75
74 /* 76 /*
75 * Store incoming device index. When the packet will 77 * Store incoming device index. When the packet will
76 * be queued, we cannot refer to skb->dev anymore. 78 * be queued, we cannot refer to skb->dev anymore.
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index bc77c0e1a943..84d7ebdb9d21 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -567,10 +567,9 @@ static inline struct ipv6_txoptions *create_tel(__u8 encap_limit)
567 567
568 int opt_len = sizeof(*opt) + 8; 568 int opt_len = sizeof(*opt) + 8;
569 569
570 if (!(opt = kmalloc(opt_len, GFP_ATOMIC))) { 570 if (!(opt = kzalloc(opt_len, GFP_ATOMIC))) {
571 return NULL; 571 return NULL;
572 } 572 }
573 memset(opt, 0, opt_len);
574 opt->tot_len = opt_len; 573 opt->tot_len = opt_len;
575 opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1); 574 opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1);
576 opt->opt_nflen = 8; 575 opt->opt_nflen = 8;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fa1ce0ae123e..d57e61ce4a7d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -411,6 +411,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
411 /* Copy the address. */ 411 /* Copy the address. */
412 if (sin6) { 412 if (sin6) {
413 sin6->sin6_family = AF_INET6; 413 sin6->sin6_family = AF_INET6;
414 sin6->sin6_port = 0;
414 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); 415 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
415 sin6->sin6_flowinfo = 0; 416 sin6->sin6_flowinfo = 0;
416 sin6->sin6_scope_id = 0; 417 sin6->sin6_scope_id = 0;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c56aeece2bf5..836eecd7e62b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -380,7 +380,6 @@ static int ipip6_rcv(struct sk_buff *skb)
380 secpath_reset(skb); 380 secpath_reset(skb);
381 skb->mac.raw = skb->nh.raw; 381 skb->mac.raw = skb->nh.raw;
382 skb->nh.raw = skb->data; 382 skb->nh.raw = skb->data;
383 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
384 IPCB(skb)->flags = 0; 383 IPCB(skb)->flags = 0;
385 skb->protocol = htons(ETH_P_IPV6); 384 skb->protocol = htons(ETH_P_IPV6);
386 skb->pkt_type = PACKET_HOST; 385 skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 6b44fe8516c3..c8f9369c2a87 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -31,27 +31,6 @@
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33 33
34#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
35# define X6TDEBUG 3
36#else
37# define X6TDEBUG 1
38#endif
39
40#define X6TPRINTK(fmt, args...) printk(fmt, ## args)
41#define X6TNOPRINTK(fmt, args...) do { ; } while(0)
42
43#if X6TDEBUG >= 1
44# define X6TPRINTK1 X6TPRINTK
45#else
46# define X6TPRINTK1 X6TNOPRINTK
47#endif
48
49#if X6TDEBUG >= 3
50# define X6TPRINTK3 X6TPRINTK
51#else
52# define X6TPRINTK3 X6TNOPRINTK
53#endif
54
55/* 34/*
56 * xfrm_tunnel_spi things are for allocating unique id ("spi") 35 * xfrm_tunnel_spi things are for allocating unique id ("spi")
57 * per xfrm_address_t. 36 * per xfrm_address_t.
@@ -62,15 +41,8 @@ struct xfrm6_tunnel_spi {
62 xfrm_address_t addr; 41 xfrm_address_t addr;
63 u32 spi; 42 u32 spi;
64 atomic_t refcnt; 43 atomic_t refcnt;
65#ifdef XFRM6_TUNNEL_SPI_MAGIC
66 u32 magic;
67#endif
68}; 44};
69 45
70#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
71# define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef
72#endif
73
74static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock); 46static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock);
75 47
76static u32 xfrm6_tunnel_spi; 48static u32 xfrm6_tunnel_spi;
@@ -86,43 +58,15 @@ static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
86static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE]; 58static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
87static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE]; 59static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
88 60
89#ifdef XFRM6_TUNNEL_SPI_MAGIC
90static int x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
91 const char *name)
92{
93 if (unlikely(x6spi->magic != XFRM6_TUNNEL_SPI_MAGIC)) {
94 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
95 "at %p has corrupted magic %08x "
96 "(should be %08x)\n",
97 name, x6spi, x6spi->magic, XFRM6_TUNNEL_SPI_MAGIC);
98 return -1;
99 }
100 return 0;
101}
102#else
103static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
104 const char *name)
105{
106 return 0;
107}
108#endif
109
110#define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__)
111
112
113static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 61static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
114{ 62{
115 unsigned h; 63 unsigned h;
116 64
117 X6TPRINTK3(KERN_DEBUG "%s(addr=%p)\n", __FUNCTION__, addr);
118
119 h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]; 65 h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3];
120 h ^= h >> 16; 66 h ^= h >> 16;
121 h ^= h >> 8; 67 h ^= h >> 8;
122 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; 68 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
123 69
124 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, h);
125
126 return h; 70 return h;
127} 71}
128 72
@@ -136,19 +80,13 @@ static int xfrm6_tunnel_spi_init(void)
136{ 80{
137 int i; 81 int i;
138 82
139 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
140
141 xfrm6_tunnel_spi = 0; 83 xfrm6_tunnel_spi = 0;
142 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", 84 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
143 sizeof(struct xfrm6_tunnel_spi), 85 sizeof(struct xfrm6_tunnel_spi),
144 0, SLAB_HWCACHE_ALIGN, 86 0, SLAB_HWCACHE_ALIGN,
145 NULL, NULL); 87 NULL, NULL);
146 if (!xfrm6_tunnel_spi_kmem) { 88 if (!xfrm6_tunnel_spi_kmem)
147 X6TPRINTK1(KERN_ERR
148 "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n",
149 __FUNCTION__);
150 return -ENOMEM; 89 return -ENOMEM;
151 }
152 90
153 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) 91 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
154 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]); 92 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
@@ -161,22 +99,16 @@ static void xfrm6_tunnel_spi_fini(void)
161{ 99{
162 int i; 100 int i;
163 101
164 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
165
166 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) { 102 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
167 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i])) 103 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
168 goto err; 104 return;
169 } 105 }
170 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) { 106 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
171 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) 107 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
172 goto err; 108 return;
173 } 109 }
174 kmem_cache_destroy(xfrm6_tunnel_spi_kmem); 110 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
175 xfrm6_tunnel_spi_kmem = NULL; 111 xfrm6_tunnel_spi_kmem = NULL;
176 return;
177err:
178 X6TPRINTK1(KERN_ERR "%s(): table is not empty\n", __FUNCTION__);
179 return;
180} 112}
181 113
182static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) 114static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
@@ -184,19 +116,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
184 struct xfrm6_tunnel_spi *x6spi; 116 struct xfrm6_tunnel_spi *x6spi;
185 struct hlist_node *pos; 117 struct hlist_node *pos;
186 118
187 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
188
189 hlist_for_each_entry(x6spi, pos, 119 hlist_for_each_entry(x6spi, pos,
190 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 120 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
191 list_byaddr) { 121 list_byaddr) {
192 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 122 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
193 X6SPI_CHECK_MAGIC(x6spi);
194 X6TPRINTK3(KERN_DEBUG "%s() = %p(%u)\n", __FUNCTION__, x6spi, x6spi->spi);
195 return x6spi; 123 return x6spi;
196 }
197 } 124 }
198 125
199 X6TPRINTK3(KERN_DEBUG "%s() = NULL(0)\n", __FUNCTION__);
200 return NULL; 126 return NULL;
201} 127}
202 128
@@ -205,8 +131,6 @@ u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
205 struct xfrm6_tunnel_spi *x6spi; 131 struct xfrm6_tunnel_spi *x6spi;
206 u32 spi; 132 u32 spi;
207 133
208 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
209
210 read_lock_bh(&xfrm6_tunnel_spi_lock); 134 read_lock_bh(&xfrm6_tunnel_spi_lock);
211 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 135 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
212 spi = x6spi ? x6spi->spi : 0; 136 spi = x6spi ? x6spi->spi : 0;
@@ -223,8 +147,6 @@ static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
223 struct hlist_node *pos; 147 struct hlist_node *pos;
224 unsigned index; 148 unsigned index;
225 149
226 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
227
228 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || 150 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
229 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) 151 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
230 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; 152 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN;
@@ -258,18 +180,10 @@ try_next_2:;
258 spi = 0; 180 spi = 0;
259 goto out; 181 goto out;
260alloc_spi: 182alloc_spi:
261 X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for " NIP6_FMT "\n",
262 __FUNCTION__,
263 NIP6(*(struct in6_addr *)saddr));
264 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); 183 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
265 if (!x6spi) { 184 if (!x6spi)
266 X6TPRINTK1(KERN_ERR "%s(): kmem_cache_alloc() failed\n",
267 __FUNCTION__);
268 goto out; 185 goto out;
269 } 186
270#ifdef XFRM6_TUNNEL_SPI_MAGIC
271 x6spi->magic = XFRM6_TUNNEL_SPI_MAGIC;
272#endif
273 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); 187 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
274 x6spi->spi = spi; 188 x6spi->spi = spi;
275 atomic_set(&x6spi->refcnt, 1); 189 atomic_set(&x6spi->refcnt, 1);
@@ -278,9 +192,7 @@ alloc_spi:
278 192
279 index = xfrm6_tunnel_spi_hash_byaddr(saddr); 193 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
280 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); 194 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
281 X6SPI_CHECK_MAGIC(x6spi);
282out: 195out:
283 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
284 return spi; 196 return spi;
285} 197}
286 198
@@ -289,8 +201,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
289 struct xfrm6_tunnel_spi *x6spi; 201 struct xfrm6_tunnel_spi *x6spi;
290 u32 spi; 202 u32 spi;
291 203
292 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
293
294 write_lock_bh(&xfrm6_tunnel_spi_lock); 204 write_lock_bh(&xfrm6_tunnel_spi_lock);
295 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 205 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
296 if (x6spi) { 206 if (x6spi) {
@@ -300,8 +210,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
300 spi = __xfrm6_tunnel_alloc_spi(saddr); 210 spi = __xfrm6_tunnel_alloc_spi(saddr);
301 write_unlock_bh(&xfrm6_tunnel_spi_lock); 211 write_unlock_bh(&xfrm6_tunnel_spi_lock);
302 212
303 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
304
305 return spi; 213 return spi;
306} 214}
307 215
@@ -312,8 +220,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
312 struct xfrm6_tunnel_spi *x6spi; 220 struct xfrm6_tunnel_spi *x6spi;
313 struct hlist_node *pos, *n; 221 struct hlist_node *pos, *n;
314 222
315 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
316
317 write_lock_bh(&xfrm6_tunnel_spi_lock); 223 write_lock_bh(&xfrm6_tunnel_spi_lock);
318 224
319 hlist_for_each_entry_safe(x6spi, pos, n, 225 hlist_for_each_entry_safe(x6spi, pos, n,
@@ -321,12 +227,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
321 list_byaddr) 227 list_byaddr)
322 { 228 {
323 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 229 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
324 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object for " NIP6_FMT
325 " found at %p\n",
326 __FUNCTION__,
327 NIP6(*(struct in6_addr *)saddr),
328 x6spi);
329 X6SPI_CHECK_MAGIC(x6spi);
330 if (atomic_dec_and_test(&x6spi->refcnt)) { 230 if (atomic_dec_and_test(&x6spi->refcnt)) {
331 hlist_del(&x6spi->list_byaddr); 231 hlist_del(&x6spi->list_byaddr);
332 hlist_del(&x6spi->list_byspi); 232 hlist_del(&x6spi->list_byspi);
@@ -377,20 +277,14 @@ static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
377 case ICMPV6_ADDR_UNREACH: 277 case ICMPV6_ADDR_UNREACH:
378 case ICMPV6_PORT_UNREACH: 278 case ICMPV6_PORT_UNREACH:
379 default: 279 default:
380 X6TPRINTK3(KERN_DEBUG
381 "xfrm6_tunnel: Destination Unreach.\n");
382 break; 280 break;
383 } 281 }
384 break; 282 break;
385 case ICMPV6_PKT_TOOBIG: 283 case ICMPV6_PKT_TOOBIG:
386 X6TPRINTK3(KERN_DEBUG
387 "xfrm6_tunnel: Packet Too Big.\n");
388 break; 284 break;
389 case ICMPV6_TIME_EXCEED: 285 case ICMPV6_TIME_EXCEED:
390 switch (code) { 286 switch (code) {
391 case ICMPV6_EXC_HOPLIMIT: 287 case ICMPV6_EXC_HOPLIMIT:
392 X6TPRINTK3(KERN_DEBUG
393 "xfrm6_tunnel: Too small Hoplimit.\n");
394 break; 288 break;
395 case ICMPV6_EXC_FRAGTIME: 289 case ICMPV6_EXC_FRAGTIME:
396 default: 290 default:
@@ -447,22 +341,14 @@ static struct xfrm6_tunnel xfrm6_tunnel_handler = {
447 341
448static int __init xfrm6_tunnel_init(void) 342static int __init xfrm6_tunnel_init(void)
449{ 343{
450 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); 344 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0)
451
452 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) {
453 X6TPRINTK1(KERN_ERR
454 "xfrm6_tunnel init: can't add xfrm type\n");
455 return -EAGAIN; 345 return -EAGAIN;
456 } 346
457 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) { 347 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) {
458 X6TPRINTK1(KERN_ERR
459 "xfrm6_tunnel init(): can't add handler\n");
460 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 348 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
461 return -EAGAIN; 349 return -EAGAIN;
462 } 350 }
463 if (xfrm6_tunnel_spi_init() < 0) { 351 if (xfrm6_tunnel_spi_init() < 0) {
464 X6TPRINTK1(KERN_ERR
465 "xfrm6_tunnel init: failed to initialize spi\n");
466 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler); 352 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler);
467 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 353 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
468 return -EAGAIN; 354 return -EAGAIN;
@@ -472,15 +358,9 @@ static int __init xfrm6_tunnel_init(void)
472 358
473static void __exit xfrm6_tunnel_fini(void) 359static void __exit xfrm6_tunnel_fini(void)
474{ 360{
475 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
476
477 xfrm6_tunnel_spi_fini(); 361 xfrm6_tunnel_spi_fini();
478 if (xfrm6_tunnel_deregister(&xfrm6_tunnel_handler)) 362 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler);
479 X6TPRINTK1(KERN_ERR 363 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
480 "xfrm6_tunnel close: can't remove handler\n");
481 if (xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6) < 0)
482 X6TPRINTK1(KERN_ERR
483 "xfrm6_tunnel close: can't remove xfrm type\n");
484} 364}
485 365
486module_init(xfrm6_tunnel_init); 366module_init(xfrm6_tunnel_init);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 7fae48a53bff..17699eeb64d7 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -308,7 +308,7 @@ static void irda_connect_response(struct irda_sock *self)
308 308
309 IRDA_ASSERT(self != NULL, return;); 309 IRDA_ASSERT(self != NULL, return;);
310 310
311 skb = dev_alloc_skb(64); 311 skb = alloc_skb(64, GFP_ATOMIC);
312 if (skb == NULL) { 312 if (skb == NULL) {
313 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", 313 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n",
314 __FUNCTION__); 314 __FUNCTION__);
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index 9c4a902a9dba..ad6b6af3dd97 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -115,12 +115,10 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line)
115 115
116 IRDA_ASSERT(ircomm != NULL, return NULL;); 116 IRDA_ASSERT(ircomm != NULL, return NULL;);
117 117
118 self = kmalloc(sizeof(struct ircomm_cb), GFP_ATOMIC); 118 self = kzalloc(sizeof(struct ircomm_cb), GFP_ATOMIC);
119 if (self == NULL) 119 if (self == NULL)
120 return NULL; 120 return NULL;
121 121
122 memset(self, 0, sizeof(struct ircomm_cb));
123
124 self->notify = *notify; 122 self->notify = *notify;
125 self->magic = IRCOMM_MAGIC; 123 self->magic = IRCOMM_MAGIC;
126 124
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index d9097207aed3..959874b6451f 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -81,7 +81,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
81 81
82 /* Any userdata supplied? */ 82 /* Any userdata supplied? */
83 if (userdata == NULL) { 83 if (userdata == NULL) {
84 tx_skb = dev_alloc_skb(64); 84 tx_skb = alloc_skb(64, GFP_ATOMIC);
85 if (!tx_skb) 85 if (!tx_skb)
86 return -ENOMEM; 86 return -ENOMEM;
87 87
@@ -115,7 +115,7 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
116 116
117 if (!userdata) { 117 if (!userdata) {
118 tx_skb = dev_alloc_skb(64); 118 tx_skb = alloc_skb(64, GFP_ATOMIC);
119 if (!tx_skb) 119 if (!tx_skb)
120 return -ENOMEM; 120 return -ENOMEM;
121 121
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index 6009bab05091..a39f5735a90b 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -121,7 +121,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
121 121
122 skb = self->ctrl_skb; 122 skb = self->ctrl_skb;
123 if (!skb) { 123 if (!skb) {
124 skb = dev_alloc_skb(256); 124 skb = alloc_skb(256, GFP_ATOMIC);
125 if (!skb) { 125 if (!skb) {
126 spin_unlock_irqrestore(&self->spinlock, flags); 126 spin_unlock_irqrestore(&self->spinlock, flags);
127 return -ENOMEM; 127 return -ENOMEM;
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index b400f27851fc..3bcdb467efc5 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -379,12 +379,11 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
379 self = hashbin_lock_find(ircomm_tty, line, NULL); 379 self = hashbin_lock_find(ircomm_tty, line, NULL);
380 if (!self) { 380 if (!self) {
381 /* No, so make new instance */ 381 /* No, so make new instance */
382 self = kmalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); 382 self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);
383 if (self == NULL) { 383 if (self == NULL) {
384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__); 384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__);
385 return -ENOMEM; 385 return -ENOMEM;
386 } 386 }
387 memset(self, 0, sizeof(struct ircomm_tty_cb));
388 387
389 self->magic = IRCOMM_TTY_MAGIC; 388 self->magic = IRCOMM_TTY_MAGIC;
390 self->flow = FLOW_STOP; 389 self->flow = FLOW_STOP;
@@ -759,8 +758,9 @@ static int ircomm_tty_write(struct tty_struct *tty,
759 } 758 }
760 } else { 759 } else {
761 /* Prepare a full sized frame */ 760 /* Prepare a full sized frame */
762 skb = dev_alloc_skb(self->max_data_size+ 761 skb = alloc_skb(self->max_data_size+
763 self->max_header_size); 762 self->max_header_size,
763 GFP_ATOMIC);
764 if (!skb) { 764 if (!skb) {
765 spin_unlock_irqrestore(&self->spinlock, flags); 765 spin_unlock_irqrestore(&self->spinlock, flags);
766 return -ENOBUFS; 766 return -ENOBUFS;
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index ba40e5495f58..7e7a31798d8d 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -401,12 +401,10 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
401 } 401 }
402 402
403 /* Allocate dongle info for this instance */ 403 /* Allocate dongle info for this instance */
404 dongle = kmalloc(sizeof(dongle_t), GFP_KERNEL); 404 dongle = kzalloc(sizeof(dongle_t), GFP_KERNEL);
405 if (!dongle) 405 if (!dongle)
406 goto out; 406 goto out;
407 407
408 memset(dongle, 0, sizeof(dongle_t));
409
410 /* Bind the registration info to this particular instance */ 408 /* Bind the registration info to this particular instance */
411 dongle->issue = reg; 409 dongle->issue = reg;
412 dongle->dev = dev; 410 dongle->dev = dev;
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index a0472652a44e..61128aa05b40 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -345,7 +345,7 @@ static void iriap_disconnect_request(struct iriap_cb *self)
345 IRDA_ASSERT(self != NULL, return;); 345 IRDA_ASSERT(self != NULL, return;);
346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
347 347
348 tx_skb = dev_alloc_skb(64); 348 tx_skb = alloc_skb(64, GFP_ATOMIC);
349 if (tx_skb == NULL) { 349 if (tx_skb == NULL) {
350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n", 350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n",
351 __FUNCTION__, 64); 351 __FUNCTION__, 64);
@@ -396,7 +396,7 @@ int iriap_getvaluebyclass_request(struct iriap_cb *self,
396 attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */ 396 attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */
397 397
398 skb_len = self->max_header_size+2+name_len+1+attr_len+4; 398 skb_len = self->max_header_size+2+name_len+1+attr_len+4;
399 tx_skb = dev_alloc_skb(skb_len); 399 tx_skb = alloc_skb(skb_len, GFP_ATOMIC);
400 if (!tx_skb) 400 if (!tx_skb)
401 return -ENOMEM; 401 return -ENOMEM;
402 402
@@ -562,7 +562,8 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
562 * value. We add 32 bytes because of the 6 bytes for the frame and 562 * value. We add 32 bytes because of the 6 bytes for the frame and
563 * max 5 bytes for the value coding. 563 * max 5 bytes for the value coding.
564 */ 564 */
565 tx_skb = dev_alloc_skb(value->len + self->max_header_size + 32); 565 tx_skb = alloc_skb(value->len + self->max_header_size + 32,
566 GFP_ATOMIC);
566 if (!tx_skb) 567 if (!tx_skb)
567 return; 568 return;
568 569
@@ -700,7 +701,7 @@ void iriap_send_ack(struct iriap_cb *self)
700 IRDA_ASSERT(self != NULL, return;); 701 IRDA_ASSERT(self != NULL, return;);
701 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 702 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
702 703
703 tx_skb = dev_alloc_skb(64); 704 tx_skb = alloc_skb(64, GFP_ATOMIC);
704 if (!tx_skb) 705 if (!tx_skb)
705 return; 706 return;
706 707
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index a73607450de1..da17395df05a 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -365,7 +365,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
365 365
366 switch (event) { 366 switch (event) {
367 case IAP_LM_CONNECT_INDICATION: 367 case IAP_LM_CONNECT_INDICATION:
368 tx_skb = dev_alloc_skb(64); 368 tx_skb = alloc_skb(64, GFP_ATOMIC);
369 if (tx_skb == NULL) { 369 if (tx_skb == NULL) {
370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); 370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__);
371 return; 371 return;
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 82e665c79991..a154b1d71c0f 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -82,13 +82,12 @@ struct ias_object *irias_new_object( char *name, int id)
82 82
83 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 83 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
84 84
85 obj = kmalloc(sizeof(struct ias_object), GFP_ATOMIC); 85 obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC);
86 if (obj == NULL) { 86 if (obj == NULL) {
87 IRDA_WARNING("%s(), Unable to allocate object!\n", 87 IRDA_WARNING("%s(), Unable to allocate object!\n",
88 __FUNCTION__); 88 __FUNCTION__);
89 return NULL; 89 return NULL;
90 } 90 }
91 memset(obj, 0, sizeof( struct ias_object));
92 91
93 obj->magic = IAS_OBJECT_MAGIC; 92 obj->magic = IAS_OBJECT_MAGIC;
94 obj->name = strndup(name, IAS_MAX_CLASSNAME); 93 obj->name = strndup(name, IAS_MAX_CLASSNAME);
@@ -346,13 +345,12 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
346 IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); 345 IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
347 IRDA_ASSERT(name != NULL, return;); 346 IRDA_ASSERT(name != NULL, return;);
348 347
349 attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 348 attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
350 if (attrib == NULL) { 349 if (attrib == NULL) {
351 IRDA_WARNING("%s: Unable to allocate attribute!\n", 350 IRDA_WARNING("%s: Unable to allocate attribute!\n",
352 __FUNCTION__); 351 __FUNCTION__);
353 return; 352 return;
354 } 353 }
355 memset(attrib, 0, sizeof( struct ias_attrib));
356 354
357 attrib->magic = IAS_ATTRIB_MAGIC; 355 attrib->magic = IAS_ATTRIB_MAGIC;
358 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 356 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -382,13 +380,12 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
382 IRDA_ASSERT(name != NULL, return;); 380 IRDA_ASSERT(name != NULL, return;);
383 IRDA_ASSERT(octets != NULL, return;); 381 IRDA_ASSERT(octets != NULL, return;);
384 382
385 attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 383 attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
386 if (attrib == NULL) { 384 if (attrib == NULL) {
387 IRDA_WARNING("%s: Unable to allocate attribute!\n", 385 IRDA_WARNING("%s: Unable to allocate attribute!\n",
388 __FUNCTION__); 386 __FUNCTION__);
389 return; 387 return;
390 } 388 }
391 memset(attrib, 0, sizeof( struct ias_attrib));
392 389
393 attrib->magic = IAS_ATTRIB_MAGIC; 390 attrib->magic = IAS_ATTRIB_MAGIC;
394 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 391 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -416,13 +413,12 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
416 IRDA_ASSERT(name != NULL, return;); 413 IRDA_ASSERT(name != NULL, return;);
417 IRDA_ASSERT(value != NULL, return;); 414 IRDA_ASSERT(value != NULL, return;);
418 415
419 attrib = kmalloc(sizeof( struct ias_attrib), GFP_ATOMIC); 416 attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC);
420 if (attrib == NULL) { 417 if (attrib == NULL) {
421 IRDA_WARNING("%s: Unable to allocate attribute!\n", 418 IRDA_WARNING("%s: Unable to allocate attribute!\n",
422 __FUNCTION__); 419 __FUNCTION__);
423 return; 420 return;
424 } 421 }
425 memset(attrib, 0, sizeof( struct ias_attrib));
426 422
427 attrib->magic = IAS_ATTRIB_MAGIC; 423 attrib->magic = IAS_ATTRIB_MAGIC;
428 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 424 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -443,12 +439,11 @@ struct ias_value *irias_new_integer_value(int integer)
443{ 439{
444 struct ias_value *value; 440 struct ias_value *value;
445 441
446 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 442 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
447 if (value == NULL) { 443 if (value == NULL) {
448 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 444 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
449 return NULL; 445 return NULL;
450 } 446 }
451 memset(value, 0, sizeof(struct ias_value));
452 447
453 value->type = IAS_INTEGER; 448 value->type = IAS_INTEGER;
454 value->len = 4; 449 value->len = 4;
@@ -469,12 +464,11 @@ struct ias_value *irias_new_string_value(char *string)
469{ 464{
470 struct ias_value *value; 465 struct ias_value *value;
471 466
472 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 467 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
473 if (value == NULL) { 468 if (value == NULL) {
474 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 469 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
475 return NULL; 470 return NULL;
476 } 471 }
477 memset( value, 0, sizeof( struct ias_value));
478 472
479 value->type = IAS_STRING; 473 value->type = IAS_STRING;
480 value->charset = CS_ASCII; 474 value->charset = CS_ASCII;
@@ -495,12 +489,11 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
495{ 489{
496 struct ias_value *value; 490 struct ias_value *value;
497 491
498 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 492 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
499 if (value == NULL) { 493 if (value == NULL) {
500 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 494 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
501 return NULL; 495 return NULL;
502 } 496 }
503 memset(value, 0, sizeof(struct ias_value));
504 497
505 value->type = IAS_OCT_SEQ; 498 value->type = IAS_OCT_SEQ;
506 /* Check length */ 499 /* Check length */
@@ -522,12 +515,11 @@ struct ias_value *irias_new_missing_value(void)
522{ 515{
523 struct ias_value *value; 516 struct ias_value *value;
524 517
525 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 518 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
526 if (value == NULL) { 519 if (value == NULL) {
527 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 520 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
528 return NULL; 521 return NULL;
529 } 522 }
530 memset(value, 0, sizeof(struct ias_value));
531 523
532 value->type = IAS_MISSING; 524 value->type = IAS_MISSING;
533 value->len = 0; 525 value->len = 0;
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index bd659dd545ac..7dd0a2fe1d20 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -636,7 +636,7 @@ void irlan_get_provider_info(struct irlan_cb *self)
636 IRDA_ASSERT(self != NULL, return;); 636 IRDA_ASSERT(self != NULL, return;);
637 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 637 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
638 638
639 skb = dev_alloc_skb(64); 639 skb = alloc_skb(64, GFP_ATOMIC);
640 if (!skb) 640 if (!skb)
641 return; 641 return;
642 642
@@ -668,7 +668,7 @@ void irlan_open_data_channel(struct irlan_cb *self)
668 IRDA_ASSERT(self != NULL, return;); 668 IRDA_ASSERT(self != NULL, return;);
669 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 669 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
670 670
671 skb = dev_alloc_skb(64); 671 skb = alloc_skb(64, GFP_ATOMIC);
672 if (!skb) 672 if (!skb)
673 return; 673 return;
674 674
@@ -704,7 +704,7 @@ void irlan_close_data_channel(struct irlan_cb *self)
704 if (self->client.tsap_ctrl == NULL) 704 if (self->client.tsap_ctrl == NULL)
705 return; 705 return;
706 706
707 skb = dev_alloc_skb(64); 707 skb = alloc_skb(64, GFP_ATOMIC);
708 if (!skb) 708 if (!skb)
709 return; 709 return;
710 710
@@ -739,7 +739,7 @@ static void irlan_open_unicast_addr(struct irlan_cb *self)
739 IRDA_ASSERT(self != NULL, return;); 739 IRDA_ASSERT(self != NULL, return;);
740 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 740 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
741 741
742 skb = dev_alloc_skb(128); 742 skb = alloc_skb(128, GFP_ATOMIC);
743 if (!skb) 743 if (!skb)
744 return; 744 return;
745 745
@@ -777,7 +777,7 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
777 IRDA_ASSERT(self != NULL, return;); 777 IRDA_ASSERT(self != NULL, return;);
778 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 778 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
779 779
780 skb = dev_alloc_skb(128); 780 skb = alloc_skb(128, GFP_ATOMIC);
781 if (!skb) 781 if (!skb)
782 return; 782 return;
783 783
@@ -816,7 +816,7 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status)
816 IRDA_ASSERT(self != NULL, return;); 816 IRDA_ASSERT(self != NULL, return;);
817 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 817 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
818 818
819 skb = dev_alloc_skb(128); 819 skb = alloc_skb(128, GFP_ATOMIC);
820 if (!skb) 820 if (!skb)
821 return; 821 return;
822 822
@@ -856,7 +856,7 @@ static void irlan_get_unicast_addr(struct irlan_cb *self)
856 IRDA_ASSERT(self != NULL, return;); 856 IRDA_ASSERT(self != NULL, return;);
857 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 857 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
858 858
859 skb = dev_alloc_skb(128); 859 skb = alloc_skb(128, GFP_ATOMIC);
860 if (!skb) 860 if (!skb)
861 return; 861 return;
862 862
@@ -891,7 +891,7 @@ void irlan_get_media_char(struct irlan_cb *self)
891 IRDA_ASSERT(self != NULL, return;); 891 IRDA_ASSERT(self != NULL, return;);
892 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 892 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
893 893
894 skb = dev_alloc_skb(64); 894 skb = alloc_skb(64, GFP_ATOMIC);
895 if (!skb) 895 if (!skb)
896 return; 896 return;
897 897
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 39c202d1c374..9c0df86044d7 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -296,7 +296,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
296 IRDA_ASSERT(self != NULL, return;); 296 IRDA_ASSERT(self != NULL, return;);
297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
298 298
299 skb = dev_alloc_skb(128); 299 skb = alloc_skb(128, GFP_ATOMIC);
300 if (!skb) 300 if (!skb)
301 return; 301 return;
302 302
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index cade355ac8af..e7852a07495e 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -116,11 +116,10 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
117 117
118 /* Initialize the irlap structure. */ 118 /* Initialize the irlap structure. */
119 self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL); 119 self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
120 if (self == NULL) 120 if (self == NULL)
121 return NULL; 121 return NULL;
122 122
123 memset(self, 0, sizeof(struct irlap_cb));
124 self->magic = LAP_MAGIC; 123 self->magic = LAP_MAGIC;
125 124
126 /* Make a binding between the layers */ 125 /* Make a binding between the layers */
@@ -882,7 +881,7 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
882 /* Change speed now, or just piggyback speed on frames */ 881 /* Change speed now, or just piggyback speed on frames */
883 if (now) { 882 if (now) {
884 /* Send down empty frame to trigger speed change */ 883 /* Send down empty frame to trigger speed change */
885 skb = dev_alloc_skb(0); 884 skb = alloc_skb(0, GFP_ATOMIC);
886 if (skb) 885 if (skb)
887 irlap_queue_xmit(self, skb); 886 irlap_queue_xmit(self, skb);
888 } 887 }
@@ -1222,7 +1221,7 @@ static int irlap_seq_open(struct inode *inode, struct file *file)
1222{ 1221{
1223 struct seq_file *seq; 1222 struct seq_file *seq;
1224 int rc = -ENOMEM; 1223 int rc = -ENOMEM;
1225 struct irlap_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1224 struct irlap_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1226 1225
1227 if (!s) 1226 if (!s)
1228 goto out; 1227 goto out;
@@ -1238,7 +1237,6 @@ static int irlap_seq_open(struct inode *inode, struct file *file)
1238 1237
1239 seq = file->private_data; 1238 seq = file->private_data;
1240 seq->private = s; 1239 seq->private = s;
1241 memset(s, 0, sizeof(*s));
1242out: 1240out:
1243 return rc; 1241 return rc;
1244out_kfree: 1242out_kfree:
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 3e9a06abbdd0..ccb983bf0f4a 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -117,7 +117,7 @@ void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos)
117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
118 118
119 /* Allocate frame */ 119 /* Allocate frame */
120 tx_skb = dev_alloc_skb(64); 120 tx_skb = alloc_skb(64, GFP_ATOMIC);
121 if (!tx_skb) 121 if (!tx_skb)
122 return; 122 return;
123 123
@@ -210,7 +210,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
211 211
212 /* Allocate frame */ 212 /* Allocate frame */
213 tx_skb = dev_alloc_skb(64); 213 tx_skb = alloc_skb(64, GFP_ATOMIC);
214 if (!tx_skb) 214 if (!tx_skb)
215 return; 215 return;
216 216
@@ -250,7 +250,7 @@ void irlap_send_dm_frame( struct irlap_cb *self)
250 IRDA_ASSERT(self != NULL, return;); 250 IRDA_ASSERT(self != NULL, return;);
251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
252 252
253 tx_skb = dev_alloc_skb(32); 253 tx_skb = alloc_skb(32, GFP_ATOMIC);
254 if (!tx_skb) 254 if (!tx_skb)
255 return; 255 return;
256 256
@@ -282,7 +282,7 @@ void irlap_send_disc_frame(struct irlap_cb *self)
282 IRDA_ASSERT(self != NULL, return;); 282 IRDA_ASSERT(self != NULL, return;);
283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
284 284
285 tx_skb = dev_alloc_skb(16); 285 tx_skb = alloc_skb(16, GFP_ATOMIC);
286 if (!tx_skb) 286 if (!tx_skb)
287 return; 287 return;
288 288
@@ -315,7 +315,7 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
316 IRDA_ASSERT(discovery != NULL, return;); 316 IRDA_ASSERT(discovery != NULL, return;);
317 317
318 tx_skb = dev_alloc_skb(64); 318 tx_skb = alloc_skb(64, GFP_ATOMIC);
319 if (!tx_skb) 319 if (!tx_skb)
320 return; 320 return;
321 321
@@ -422,11 +422,10 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
422 return; 422 return;
423 } 423 }
424 424
425 if ((discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { 425 if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
426 IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__); 426 IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__);
427 return; 427 return;
428 } 428 }
429 memset(discovery, 0, sizeof(discovery_t));
430 429
431 discovery->data.daddr = info->daddr; 430 discovery->data.daddr = info->daddr;
432 discovery->data.saddr = self->saddr; 431 discovery->data.saddr = self->saddr;
@@ -576,7 +575,7 @@ void irlap_send_rr_frame(struct irlap_cb *self, int command)
576 struct sk_buff *tx_skb; 575 struct sk_buff *tx_skb;
577 __u8 *frame; 576 __u8 *frame;
578 577
579 tx_skb = dev_alloc_skb(16); 578 tx_skb = alloc_skb(16, GFP_ATOMIC);
580 if (!tx_skb) 579 if (!tx_skb)
581 return; 580 return;
582 581
@@ -601,7 +600,7 @@ void irlap_send_rd_frame(struct irlap_cb *self)
601 struct sk_buff *tx_skb; 600 struct sk_buff *tx_skb;
602 __u8 *frame; 601 __u8 *frame;
603 602
604 tx_skb = dev_alloc_skb(16); 603 tx_skb = alloc_skb(16, GFP_ATOMIC);
605 if (!tx_skb) 604 if (!tx_skb)
606 return; 605 return;
607 606
@@ -1215,7 +1214,7 @@ void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
1215 struct test_frame *frame; 1214 struct test_frame *frame;
1216 __u8 *info; 1215 __u8 *info;
1217 1216
1218 tx_skb = dev_alloc_skb(cmd->len+sizeof(struct test_frame)); 1217 tx_skb = alloc_skb(cmd->len+sizeof(struct test_frame), GFP_ATOMIC);
1219 if (!tx_skb) 1218 if (!tx_skb)
1220 return; 1219 return;
1221 1220
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 129ad64c15bb..c440913dee14 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -78,10 +78,9 @@ int __init irlmp_init(void)
78{ 78{
79 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 79 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
80 /* Initialize the irlmp structure. */ 80 /* Initialize the irlmp structure. */
81 irlmp = kmalloc( sizeof(struct irlmp_cb), GFP_KERNEL); 81 irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
82 if (irlmp == NULL) 82 if (irlmp == NULL)
83 return -ENOMEM; 83 return -ENOMEM;
84 memset(irlmp, 0, sizeof(struct irlmp_cb));
85 84
86 irlmp->magic = LMP_MAGIC; 85 irlmp->magic = LMP_MAGIC;
87 86
@@ -160,12 +159,11 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
160 return NULL; 159 return NULL;
161 160
162 /* Allocate new instance of a LSAP connection */ 161 /* Allocate new instance of a LSAP connection */
163 self = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC); 162 self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
164 if (self == NULL) { 163 if (self == NULL) {
165 IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__); 164 IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__);
166 return NULL; 165 return NULL;
167 } 166 }
168 memset(self, 0, sizeof(struct lsap_cb));
169 167
170 self->magic = LMP_LSAP_MAGIC; 168 self->magic = LMP_LSAP_MAGIC;
171 self->slsap_sel = slsap_sel; 169 self->slsap_sel = slsap_sel;
@@ -288,12 +286,11 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
288 /* 286 /*
289 * Allocate new instance of a LSAP connection 287 * Allocate new instance of a LSAP connection
290 */ 288 */
291 lap = kmalloc(sizeof(struct lap_cb), GFP_KERNEL); 289 lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL);
292 if (lap == NULL) { 290 if (lap == NULL) {
293 IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__); 291 IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__);
294 return; 292 return;
295 } 293 }
296 memset(lap, 0, sizeof(struct lap_cb));
297 294
298 lap->irlap = irlap; 295 lap->irlap = irlap;
299 lap->magic = LMP_LAP_MAGIC; 296 lap->magic = LMP_LAP_MAGIC;
@@ -395,7 +392,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
395 392
396 /* Any userdata? */ 393 /* Any userdata? */
397 if (tx_skb == NULL) { 394 if (tx_skb == NULL) {
398 tx_skb = dev_alloc_skb(64); 395 tx_skb = alloc_skb(64, GFP_ATOMIC);
399 if (!tx_skb) 396 if (!tx_skb)
400 return -ENOMEM; 397 return -ENOMEM;
401 398
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index e53bf9e0053e..a1e502ff9070 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -476,11 +476,10 @@ dev_irnet_open(struct inode * inode,
476#endif /* SECURE_DEVIRNET */ 476#endif /* SECURE_DEVIRNET */
477 477
478 /* Allocate a private structure for this IrNET instance */ 478 /* Allocate a private structure for this IrNET instance */
479 ap = kmalloc(sizeof(*ap), GFP_KERNEL); 479 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
480 DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n"); 480 DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n");
481 481
482 /* initialize the irnet structure */ 482 /* initialize the irnet structure */
483 memset(ap, 0, sizeof(*ap));
484 ap->file = file; 483 ap->file = file;
485 484
486 /* PPP channel setup */ 485 /* PPP channel setup */
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 49c51c5f1a86..42acf1cde737 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -85,10 +85,9 @@ static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
85 */ 85 */
86int __init irttp_init(void) 86int __init irttp_init(void)
87{ 87{
88 irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL); 88 irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL);
89 if (irttp == NULL) 89 if (irttp == NULL)
90 return -ENOMEM; 90 return -ENOMEM;
91 memset(irttp, 0, sizeof(struct irttp_cb));
92 91
93 irttp->magic = TTP_MAGIC; 92 irttp->magic = TTP_MAGIC;
94 93
@@ -306,7 +305,8 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
306 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__); 305 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__);
307 306
308 /* Make new segment */ 307 /* Make new segment */
309 frag = dev_alloc_skb(self->max_seg_size+self->max_header_size); 308 frag = alloc_skb(self->max_seg_size+self->max_header_size,
309 GFP_ATOMIC);
310 if (!frag) 310 if (!frag)
311 return; 311 return;
312 312
@@ -389,12 +389,11 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
389 return NULL; 389 return NULL;
390 } 390 }
391 391
392 self = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); 392 self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
393 if (self == NULL) { 393 if (self == NULL) {
394 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__); 394 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__);
395 return NULL; 395 return NULL;
396 } 396 }
397 memset(self, 0, sizeof(struct tsap_cb));
398 spin_lock_init(&self->lock); 397 spin_lock_init(&self->lock);
399 398
400 /* Initialise todo timer */ 399 /* Initialise todo timer */
@@ -805,7 +804,7 @@ static inline void irttp_give_credit(struct tsap_cb *self)
805 self->send_credit, self->avail_credit, self->remote_credit); 804 self->send_credit, self->avail_credit, self->remote_credit);
806 805
807 /* Give credit to peer */ 806 /* Give credit to peer */
808 tx_skb = dev_alloc_skb(64); 807 tx_skb = alloc_skb(64, GFP_ATOMIC);
809 if (!tx_skb) 808 if (!tx_skb)
810 return; 809 return;
811 810
@@ -1094,7 +1093,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
1094 1093
1095 /* Any userdata supplied? */ 1094 /* Any userdata supplied? */
1096 if (userdata == NULL) { 1095 if (userdata == NULL) {
1097 tx_skb = dev_alloc_skb(64); 1096 tx_skb = alloc_skb(64, GFP_ATOMIC);
1098 if (!tx_skb) 1097 if (!tx_skb)
1099 return -ENOMEM; 1098 return -ENOMEM;
1100 1099
@@ -1342,7 +1341,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
1342 1341
1343 /* Any userdata supplied? */ 1342 /* Any userdata supplied? */
1344 if (userdata == NULL) { 1343 if (userdata == NULL) {
1345 tx_skb = dev_alloc_skb(64); 1344 tx_skb = alloc_skb(64, GFP_ATOMIC);
1346 if (!tx_skb) 1345 if (!tx_skb)
1347 return -ENOMEM; 1346 return -ENOMEM;
1348 1347
@@ -1541,7 +1540,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1541 1540
1542 if (!userdata) { 1541 if (!userdata) {
1543 struct sk_buff *tx_skb; 1542 struct sk_buff *tx_skb;
1544 tx_skb = dev_alloc_skb(64); 1543 tx_skb = alloc_skb(64, GFP_ATOMIC);
1545 if (!tx_skb) 1544 if (!tx_skb)
1546 return -ENOMEM; 1545 return -ENOMEM;
1547 1546
@@ -1876,7 +1875,7 @@ static int irttp_seq_open(struct inode *inode, struct file *file)
1876 int rc = -ENOMEM; 1875 int rc = -ENOMEM;
1877 struct irttp_iter_state *s; 1876 struct irttp_iter_state *s;
1878 1877
1879 s = kmalloc(sizeof(*s), GFP_KERNEL); 1878 s = kzalloc(sizeof(*s), GFP_KERNEL);
1880 if (!s) 1879 if (!s)
1881 goto out; 1880 goto out;
1882 1881
@@ -1886,7 +1885,6 @@ static int irttp_seq_open(struct inode *inode, struct file *file)
1886 1885
1887 seq = file->private_data; 1886 seq = file->private_data;
1888 seq->private = s; 1887 seq->private = s;
1889 memset(s, 0, sizeof(*s));
1890out: 1888out:
1891 return rc; 1889 return rc;
1892out_kfree: 1890out_kfree:
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index aea6616cea3d..d504eed416f6 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -115,14 +115,12 @@ static struct lapb_cb *lapb_devtostruct(struct net_device *dev)
115 */ 115 */
116static struct lapb_cb *lapb_create_cb(void) 116static struct lapb_cb *lapb_create_cb(void)
117{ 117{
118 struct lapb_cb *lapb = kmalloc(sizeof(*lapb), GFP_ATOMIC); 118 struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC);
119 119
120 120
121 if (!lapb) 121 if (!lapb)
122 goto out; 122 goto out;
123 123
124 memset(lapb, 0x00, sizeof(*lapb));
125
126 skb_queue_head_init(&lapb->write_queue); 124 skb_queue_head_init(&lapb->write_queue);
127 skb_queue_head_init(&lapb->ack_queue); 125 skb_queue_head_init(&lapb->ack_queue);
128 126
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index bd242a49514a..d12413cff5bd 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -33,10 +33,9 @@ unsigned char llc_station_mac_sa[ETH_ALEN];
33 */ 33 */
34static struct llc_sap *llc_sap_alloc(void) 34static struct llc_sap *llc_sap_alloc(void)
35{ 35{
36 struct llc_sap *sap = kmalloc(sizeof(*sap), GFP_ATOMIC); 36 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
37 37
38 if (sap) { 38 if (sap) {
39 memset(sap, 0, sizeof(*sap));
40 sap->state = LLC_SAP_STATE_ACTIVE; 39 sap->state = LLC_SAP_STATE_ACTIVE;
41 memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN); 40 memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN);
42 rwlock_init(&sap->sk_list.lock); 41 rwlock_init(&sap->sk_list.lock);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 42a178aa30f9..a9894ddfd72a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -386,8 +386,8 @@ config NETFILTER_XT_MATCH_REALM
386 <file:Documentation/modules.txt>. If unsure, say `N'. 386 <file:Documentation/modules.txt>. If unsure, say `N'.
387 387
388config NETFILTER_XT_MATCH_SCTP 388config NETFILTER_XT_MATCH_SCTP
389 tristate '"sctp" protocol match support' 389 tristate '"sctp" protocol match support (EXPERIMENTAL)'
390 depends on NETFILTER_XTABLES 390 depends on NETFILTER_XTABLES && EXPERIMENTAL
391 help 391 help
392 With this option enabled, you will be able to use the 392 With this option enabled, you will be able to use the
393 `sctp' match in order to match on SCTP source/destination ports 393 `sctp' match in order to match on SCTP source/destination ports
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 5fcab2ef231f..4ef836699962 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -428,6 +428,8 @@ static struct file_operations ct_cpu_seq_fops = {
428 428
429/* Sysctl support */ 429/* Sysctl support */
430 430
431int nf_conntrack_checksum = 1;
432
431#ifdef CONFIG_SYSCTL 433#ifdef CONFIG_SYSCTL
432 434
433/* From nf_conntrack_core.c */ 435/* From nf_conntrack_core.c */
@@ -459,8 +461,6 @@ extern unsigned int nf_ct_generic_timeout;
459static int log_invalid_proto_min = 0; 461static int log_invalid_proto_min = 0;
460static int log_invalid_proto_max = 255; 462static int log_invalid_proto_max = 255;
461 463
462int nf_conntrack_checksum = 1;
463
464static struct ctl_table_header *nf_ct_sysctl_header; 464static struct ctl_table_header *nf_ct_sysctl_header;
465 465
466static ctl_table nf_ct_sysctl_table[] = { 466static ctl_table nf_ct_sysctl_table[] = {
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index bb6fcee452ca..662a869593bf 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -219,21 +219,20 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
219 219
220 switch (verdict & NF_VERDICT_MASK) { 220 switch (verdict & NF_VERDICT_MASK) {
221 case NF_ACCEPT: 221 case NF_ACCEPT:
222 case NF_STOP:
222 info->okfn(skb); 223 info->okfn(skb);
224 case NF_STOLEN:
223 break; 225 break;
224
225 case NF_QUEUE: 226 case NF_QUEUE:
226 if (!nf_queue(&skb, elem, info->pf, info->hook, 227 if (!nf_queue(&skb, elem, info->pf, info->hook,
227 info->indev, info->outdev, info->okfn, 228 info->indev, info->outdev, info->okfn,
228 verdict >> NF_VERDICT_BITS)) 229 verdict >> NF_VERDICT_BITS))
229 goto next_hook; 230 goto next_hook;
230 break; 231 break;
232 default:
233 kfree_skb(skb);
231 } 234 }
232 rcu_read_unlock(); 235 rcu_read_unlock();
233
234 if (verdict == NF_DROP)
235 kfree_skb(skb);
236
237 kfree(info); 236 kfree(info);
238 return; 237 return;
239} 238}
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 5fe4c9df17f5..a9f4f6f3c628 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -113,6 +113,21 @@ checkentry(const char *tablename,
113 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || 113 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
114 info->bitmask & ~XT_PHYSDEV_OP_MASK) 114 info->bitmask & ~XT_PHYSDEV_OP_MASK)
115 return 0; 115 return 0;
116 if (brnf_deferred_hooks == 0 &&
117 info->bitmask & XT_PHYSDEV_OP_OUT &&
118 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
119 info->invert & XT_PHYSDEV_OP_BRIDGED) &&
120 hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) |
121 (1 << NF_IP_POST_ROUTING))) {
122 printk(KERN_WARNING "physdev match: using --physdev-out in the "
123 "OUTPUT, FORWARD and POSTROUTING chains for non-bridged "
124 "traffic is deprecated and breaks other things, it will "
125 "be removed in January 2007. See Documentation/"
126 "feature-removal-schedule.txt for details. This doesn't "
127 "affect you in case you're using it for purely bridged "
128 "traffic.\n");
129 brnf_deferred_hooks = 1;
130 }
116 return 1; 131 return 1;
117} 132}
118 133
diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c
index 3ac703b5cb8f..d2f5320a80bf 100644
--- a/net/netfilter/xt_pkttype.c
+++ b/net/netfilter/xt_pkttype.c
@@ -9,6 +9,8 @@
9#include <linux/skbuff.h> 9#include <linux/skbuff.h>
10#include <linux/if_ether.h> 10#include <linux/if_ether.h>
11#include <linux/if_packet.h> 11#include <linux/if_packet.h>
12#include <linux/in.h>
13#include <linux/ip.h>
12 14
13#include <linux/netfilter/xt_pkttype.h> 15#include <linux/netfilter/xt_pkttype.h>
14#include <linux/netfilter/x_tables.h> 16#include <linux/netfilter/x_tables.h>
@@ -28,9 +30,17 @@ static int match(const struct sk_buff *skb,
28 unsigned int protoff, 30 unsigned int protoff,
29 int *hotdrop) 31 int *hotdrop)
30{ 32{
33 u_int8_t type;
31 const struct xt_pkttype_info *info = matchinfo; 34 const struct xt_pkttype_info *info = matchinfo;
32 35
33 return (skb->pkt_type == info->pkttype) ^ info->invert; 36 if (skb->pkt_type == PACKET_LOOPBACK)
37 type = (MULTICAST(skb->nh.iph->daddr)
38 ? PACKET_MULTICAST
39 : PACKET_BROADCAST);
40 else
41 type = skb->pkt_type;
42
43 return (type == info->pkttype) ^ info->invert;
34} 44}
35 45
36static struct xt_match pkttype_match = { 46static struct xt_match pkttype_match = {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 55c0adc8f115..b85c1f9f1288 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -562,10 +562,9 @@ static int netlink_alloc_groups(struct sock *sk)
562 if (err) 562 if (err)
563 return err; 563 return err;
564 564
565 nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL); 565 nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
566 if (nlk->groups == NULL) 566 if (nlk->groups == NULL)
567 return -ENOMEM; 567 return -ENOMEM;
568 memset(nlk->groups, 0, NLGRPSZ(groups));
569 nlk->ngroups = groups; 568 nlk->ngroups = groups;
570 return 0; 569 return 0;
571} 570}
@@ -1393,11 +1392,10 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1393 struct sock *sk; 1392 struct sock *sk;
1394 struct netlink_sock *nlk; 1393 struct netlink_sock *nlk;
1395 1394
1396 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 1395 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1397 if (cb == NULL) 1396 if (cb == NULL)
1398 return -ENOBUFS; 1397 return -ENOBUFS;
1399 1398
1400 memset(cb, 0, sizeof(*cb));
1401 cb->dump = dump; 1399 cb->dump = dump;
1402 cb->done = done; 1400 cb->done = done;
1403 cb->nlh = nlh; 1401 cb->nlh = nlh;
@@ -1668,7 +1666,7 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
1668 struct nl_seq_iter *iter; 1666 struct nl_seq_iter *iter;
1669 int err; 1667 int err;
1670 1668
1671 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 1669 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1672 if (!iter) 1670 if (!iter)
1673 return -ENOMEM; 1671 return -ENOMEM;
1674 1672
@@ -1678,7 +1676,6 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
1678 return err; 1676 return err;
1679 } 1677 }
1680 1678
1681 memset(iter, 0, sizeof(*iter));
1682 seq = file->private_data; 1679 seq = file->private_data;
1683 seq->private = iter; 1680 seq->private = iter;
1684 return 0; 1681 return 0;
@@ -1747,15 +1744,13 @@ static int __init netlink_proto_init(void)
1747 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) 1744 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1748 netlink_skb_parms_too_large(); 1745 netlink_skb_parms_too_large();
1749 1746
1750 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL); 1747 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
1751 if (!nl_table) { 1748 if (!nl_table) {
1752enomem: 1749enomem:
1753 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n"); 1750 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1754 return -ENOMEM; 1751 return -ENOMEM;
1755 } 1752 }
1756 1753
1757 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1758
1759 if (num_physpages >= (128 * 1024)) 1754 if (num_physpages >= (128 * 1024))
1760 max = num_physpages >> (21 - PAGE_SHIFT); 1755 max = num_physpages >> (21 - PAGE_SHIFT);
1761 else 1756 else
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index 573b572f8f91..93d2c55ad2d5 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -58,13 +58,12 @@ static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
58 _enter("%p",peer); 58 _enter("%p",peer);
59 59
60 /* allocate and initialise a connection record */ 60 /* allocate and initialise a connection record */
61 conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); 61 conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
62 if (!conn) { 62 if (!conn) {
63 _leave(" = -ENOMEM"); 63 _leave(" = -ENOMEM");
64 return -ENOMEM; 64 return -ENOMEM;
65 } 65 }
66 66
67 memset(conn, 0, sizeof(struct rxrpc_connection));
68 atomic_set(&conn->usage, 1); 67 atomic_set(&conn->usage, 1);
69 68
70 INIT_LIST_HEAD(&conn->link); 69 INIT_LIST_HEAD(&conn->link);
@@ -535,13 +534,12 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
535 return -EINVAL; 534 return -EINVAL;
536 } 535 }
537 536
538 msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags); 537 msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags);
539 if (!msg) { 538 if (!msg) {
540 _leave(" = -ENOMEM"); 539 _leave(" = -ENOMEM");
541 return -ENOMEM; 540 return -ENOMEM;
542 } 541 }
543 542
544 memset(msg, 0, sizeof(*msg));
545 atomic_set(&msg->usage, 1); 543 atomic_set(&msg->usage, 1);
546 544
547 INIT_LIST_HEAD(&msg->link); 545 INIT_LIST_HEAD(&msg->link);
diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c
index ed38f5b17c1b..8a275157a3bb 100644
--- a/net/rxrpc/peer.c
+++ b/net/rxrpc/peer.c
@@ -58,13 +58,12 @@ static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr,
58 _enter("%p,%08x", trans, ntohl(addr)); 58 _enter("%p,%08x", trans, ntohl(addr));
59 59
60 /* allocate and initialise a peer record */ 60 /* allocate and initialise a peer record */
61 peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); 61 peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
62 if (!peer) { 62 if (!peer) {
63 _leave(" = -ENOMEM"); 63 _leave(" = -ENOMEM");
64 return -ENOMEM; 64 return -ENOMEM;
65 } 65 }
66 66
67 memset(peer, 0, sizeof(struct rxrpc_peer));
68 atomic_set(&peer->usage, 1); 67 atomic_set(&peer->usage, 1);
69 68
70 INIT_LIST_HEAD(&peer->link); 69 INIT_LIST_HEAD(&peer->link);
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index dbe6105e83a5..465efc86fccf 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -68,11 +68,10 @@ int rxrpc_create_transport(unsigned short port,
68 68
69 _enter("%hu", port); 69 _enter("%hu", port);
70 70
71 trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); 71 trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
72 if (!trans) 72 if (!trans)
73 return -ENOMEM; 73 return -ENOMEM;
74 74
75 memset(trans, 0, sizeof(struct rxrpc_transport));
76 atomic_set(&trans->usage, 1); 75 atomic_set(&trans->usage, 1);
77 INIT_LIST_HEAD(&trans->services); 76 INIT_LIST_HEAD(&trans->services);
78 INIT_LIST_HEAD(&trans->link); 77 INIT_LIST_HEAD(&trans->link);
@@ -312,13 +311,12 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
312 311
313 _enter(""); 312 _enter("");
314 313
315 msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL); 314 msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
316 if (!msg) { 315 if (!msg) {
317 _leave(" = -ENOMEM"); 316 _leave(" = -ENOMEM");
318 return -ENOMEM; 317 return -ENOMEM;
319 } 318 }
320 319
321 memset(msg, 0, sizeof(*msg));
322 atomic_set(&msg->usage, 1); 320 atomic_set(&msg->usage, 1);
323 list_add_tail(&msg->link,msgq); 321 list_add_tail(&msg->link,msgq);
324 322
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 9affeeedf107..a2587b52e531 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -312,10 +312,9 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
312 } 312 }
313 313
314 *err = -ENOMEM; 314 *err = -ENOMEM;
315 a = kmalloc(sizeof(*a), GFP_KERNEL); 315 a = kzalloc(sizeof(*a), GFP_KERNEL);
316 if (a == NULL) 316 if (a == NULL)
317 goto err_mod; 317 goto err_mod;
318 memset(a, 0, sizeof(*a));
319 318
320 /* backward compatibility for policer */ 319 /* backward compatibility for policer */
321 if (name == NULL) 320 if (name == NULL)
@@ -492,10 +491,9 @@ tcf_action_get_1(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int *err)
492 index = *(int *)RTA_DATA(tb[TCA_ACT_INDEX - 1]); 491 index = *(int *)RTA_DATA(tb[TCA_ACT_INDEX - 1]);
493 492
494 *err = -ENOMEM; 493 *err = -ENOMEM;
495 a = kmalloc(sizeof(struct tc_action), GFP_KERNEL); 494 a = kzalloc(sizeof(struct tc_action), GFP_KERNEL);
496 if (a == NULL) 495 if (a == NULL)
497 return NULL; 496 return NULL;
498 memset(a, 0, sizeof(struct tc_action));
499 497
500 *err = -EINVAL; 498 *err = -EINVAL;
501 a->ops = tc_lookup_action(tb[TCA_ACT_KIND - 1]); 499 a->ops = tc_lookup_action(tb[TCA_ACT_KIND - 1]);
@@ -531,12 +529,11 @@ static struct tc_action *create_a(int i)
531{ 529{
532 struct tc_action *act; 530 struct tc_action *act;
533 531
534 act = kmalloc(sizeof(*act), GFP_KERNEL); 532 act = kzalloc(sizeof(*act), GFP_KERNEL);
535 if (act == NULL) { 533 if (act == NULL) {
536 printk("create_a: failed to alloc!\n"); 534 printk("create_a: failed to alloc!\n");
537 return NULL; 535 return NULL;
538 } 536 }
539 memset(act, 0, sizeof(*act));
540 act->order = i; 537 act->order = i;
541 return act; 538 return act;
542} 539}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 58b3a8652042..f257475e0e0c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -209,10 +209,9 @@ tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
209 s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key); 209 s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key);
210 210
211 /* netlink spinlocks held above us - must use ATOMIC */ 211 /* netlink spinlocks held above us - must use ATOMIC */
212 opt = kmalloc(s, GFP_ATOMIC); 212 opt = kzalloc(s, GFP_ATOMIC);
213 if (opt == NULL) 213 if (opt == NULL)
214 return -ENOBUFS; 214 return -ENOBUFS;
215 memset(opt, 0, s);
216 215
217 memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key)); 216 memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key));
218 opt->index = p->index; 217 opt->index = p->index;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 47e00bd9625e..da905d7b4b40 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -196,10 +196,9 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
196 return ret; 196 return ret;
197 } 197 }
198 198
199 p = kmalloc(sizeof(*p), GFP_KERNEL); 199 p = kzalloc(sizeof(*p), GFP_KERNEL);
200 if (p == NULL) 200 if (p == NULL)
201 return -ENOMEM; 201 return -ENOMEM;
202 memset(p, 0, sizeof(*p));
203 202
204 ret = ACT_P_CREATED; 203 ret = ACT_P_CREATED;
205 p->refcnt = 1; 204 p->refcnt = 1;
@@ -429,11 +428,10 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
429 return p; 428 return p;
430 } 429 }
431 430
432 p = kmalloc(sizeof(*p), GFP_KERNEL); 431 p = kzalloc(sizeof(*p), GFP_KERNEL);
433 if (p == NULL) 432 if (p == NULL)
434 return NULL; 433 return NULL;
435 434
436 memset(p, 0, sizeof(*p));
437 p->refcnt = 1; 435 p->refcnt = 1;
438 spin_lock_init(&p->lock); 436 spin_lock_init(&p->lock);
439 p->stats_lock = &p->lock; 437 p->stats_lock = &p->lock;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 61507f006b11..86cac49a0531 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -178,19 +178,17 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
178 178
179 err = -ENOBUFS; 179 err = -ENOBUFS;
180 if (head == NULL) { 180 if (head == NULL) {
181 head = kmalloc(sizeof(*head), GFP_KERNEL); 181 head = kzalloc(sizeof(*head), GFP_KERNEL);
182 if (head == NULL) 182 if (head == NULL)
183 goto errout; 183 goto errout;
184 184
185 memset(head, 0, sizeof(*head));
186 INIT_LIST_HEAD(&head->flist); 185 INIT_LIST_HEAD(&head->flist);
187 tp->root = head; 186 tp->root = head;
188 } 187 }
189 188
190 f = kmalloc(sizeof(*f), GFP_KERNEL); 189 f = kzalloc(sizeof(*f), GFP_KERNEL);
191 if (f == NULL) 190 if (f == NULL)
192 goto errout; 191 goto errout;
193 memset(f, 0, sizeof(*f));
194 192
195 err = -EINVAL; 193 err = -EINVAL;
196 if (handle) 194 if (handle)
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index d41de91fc4f6..e6973d9b686d 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -267,20 +267,18 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
267 return -EINVAL; 267 return -EINVAL;
268 268
269 if (head == NULL) { 269 if (head == NULL) {
270 head = kmalloc(sizeof(struct fw_head), GFP_KERNEL); 270 head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
271 if (head == NULL) 271 if (head == NULL)
272 return -ENOBUFS; 272 return -ENOBUFS;
273 memset(head, 0, sizeof(*head));
274 273
275 tcf_tree_lock(tp); 274 tcf_tree_lock(tp);
276 tp->root = head; 275 tp->root = head;
277 tcf_tree_unlock(tp); 276 tcf_tree_unlock(tp);
278 } 277 }
279 278
280 f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL); 279 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
281 if (f == NULL) 280 if (f == NULL)
282 return -ENOBUFS; 281 return -ENOBUFS;
283 memset(f, 0, sizeof(*f));
284 282
285 f->id = handle; 283 f->id = handle;
286 284
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index c2e71900f7bd..d3aea730d4c8 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -396,10 +396,9 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
396 h1 = to_hash(nhandle); 396 h1 = to_hash(nhandle);
397 if ((b = head->table[h1]) == NULL) { 397 if ((b = head->table[h1]) == NULL) {
398 err = -ENOBUFS; 398 err = -ENOBUFS;
399 b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL); 399 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
400 if (b == NULL) 400 if (b == NULL)
401 goto errout; 401 goto errout;
402 memset(b, 0, sizeof(*b));
403 402
404 tcf_tree_lock(tp); 403 tcf_tree_lock(tp);
405 head->table[h1] = b; 404 head->table[h1] = b;
@@ -475,20 +474,18 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
475 474
476 err = -ENOBUFS; 475 err = -ENOBUFS;
477 if (head == NULL) { 476 if (head == NULL) {
478 head = kmalloc(sizeof(struct route4_head), GFP_KERNEL); 477 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
479 if (head == NULL) 478 if (head == NULL)
480 goto errout; 479 goto errout;
481 memset(head, 0, sizeof(struct route4_head));
482 480
483 tcf_tree_lock(tp); 481 tcf_tree_lock(tp);
484 tp->root = head; 482 tp->root = head;
485 tcf_tree_unlock(tp); 483 tcf_tree_unlock(tp);
486 } 484 }
487 485
488 f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL); 486 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
489 if (f == NULL) 487 if (f == NULL)
490 goto errout; 488 goto errout;
491 memset(f, 0, sizeof(*f));
492 489
493 err = route4_set_parms(tp, base, f, handle, head, tb, 490 err = route4_set_parms(tp, base, f, handle, head, tb,
494 tca[TCA_RATE-1], 1); 491 tca[TCA_RATE-1], 1);
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index ba8741971629..6e230ecfba05 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -240,9 +240,8 @@ static int rsvp_init(struct tcf_proto *tp)
240{ 240{
241 struct rsvp_head *data; 241 struct rsvp_head *data;
242 242
243 data = kmalloc(sizeof(struct rsvp_head), GFP_KERNEL); 243 data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
244 if (data) { 244 if (data) {
245 memset(data, 0, sizeof(struct rsvp_head));
246 tp->root = data; 245 tp->root = data;
247 return 0; 246 return 0;
248 } 247 }
@@ -446,11 +445,10 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
446 goto errout2; 445 goto errout2;
447 446
448 err = -ENOBUFS; 447 err = -ENOBUFS;
449 f = kmalloc(sizeof(struct rsvp_filter), GFP_KERNEL); 448 f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
450 if (f == NULL) 449 if (f == NULL)
451 goto errout2; 450 goto errout2;
452 451
453 memset(f, 0, sizeof(*f));
454 h2 = 16; 452 h2 = 16;
455 if (tb[TCA_RSVP_SRC-1]) { 453 if (tb[TCA_RSVP_SRC-1]) {
456 err = -EINVAL; 454 err = -EINVAL;
@@ -532,10 +530,9 @@ insert:
532 /* No session found. Create new one. */ 530 /* No session found. Create new one. */
533 531
534 err = -ENOBUFS; 532 err = -ENOBUFS;
535 s = kmalloc(sizeof(struct rsvp_session), GFP_KERNEL); 533 s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
536 if (s == NULL) 534 if (s == NULL)
537 goto errout; 535 goto errout;
538 memset(s, 0, sizeof(*s));
539 memcpy(s->dst, dst, sizeof(s->dst)); 536 memcpy(s->dst, dst, sizeof(s->dst));
540 537
541 if (pinfo) { 538 if (pinfo) {
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 7870e7bb0bac..5af8a59e1503 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -148,11 +148,10 @@ static int tcindex_init(struct tcf_proto *tp)
148 struct tcindex_data *p; 148 struct tcindex_data *p;
149 149
150 DPRINTK("tcindex_init(tp %p)\n",tp); 150 DPRINTK("tcindex_init(tp %p)\n",tp);
151 p = kmalloc(sizeof(struct tcindex_data),GFP_KERNEL); 151 p = kzalloc(sizeof(struct tcindex_data),GFP_KERNEL);
152 if (!p) 152 if (!p)
153 return -ENOMEM; 153 return -ENOMEM;
154 154
155 memset(p, 0, sizeof(*p));
156 p->mask = 0xffff; 155 p->mask = 0xffff;
157 p->hash = DEFAULT_HASH_SIZE; 156 p->hash = DEFAULT_HASH_SIZE;
158 p->fall_through = 1; 157 p->fall_through = 1;
@@ -296,16 +295,14 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
296 err = -ENOMEM; 295 err = -ENOMEM;
297 if (!cp.perfect && !cp.h) { 296 if (!cp.perfect && !cp.h) {
298 if (valid_perfect_hash(&cp)) { 297 if (valid_perfect_hash(&cp)) {
299 cp.perfect = kmalloc(cp.hash * sizeof(*r), GFP_KERNEL); 298 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
300 if (!cp.perfect) 299 if (!cp.perfect)
301 goto errout; 300 goto errout;
302 memset(cp.perfect, 0, cp.hash * sizeof(*r));
303 balloc = 1; 301 balloc = 1;
304 } else { 302 } else {
305 cp.h = kmalloc(cp.hash * sizeof(f), GFP_KERNEL); 303 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
306 if (!cp.h) 304 if (!cp.h)
307 goto errout; 305 goto errout;
308 memset(cp.h, 0, cp.hash * sizeof(f));
309 balloc = 2; 306 balloc = 2;
310 } 307 }
311 } 308 }
@@ -316,10 +313,9 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
316 r = tcindex_lookup(&cp, handle) ? : &new_filter_result; 313 r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
317 314
318 if (r == &new_filter_result) { 315 if (r == &new_filter_result) {
319 f = kmalloc(sizeof(*f), GFP_KERNEL); 316 f = kzalloc(sizeof(*f), GFP_KERNEL);
320 if (!f) 317 if (!f)
321 goto errout_alloc; 318 goto errout_alloc;
322 memset(f, 0, sizeof(*f));
323 } 319 }
324 320
325 if (tb[TCA_TCINDEX_CLASSID-1]) { 321 if (tb[TCA_TCINDEX_CLASSID-1]) {
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d712edcd1bcf..eea366966740 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -307,23 +307,21 @@ static int u32_init(struct tcf_proto *tp)
307 if (tp_c->q == tp->q) 307 if (tp_c->q == tp->q)
308 break; 308 break;
309 309
310 root_ht = kmalloc(sizeof(*root_ht), GFP_KERNEL); 310 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
311 if (root_ht == NULL) 311 if (root_ht == NULL)
312 return -ENOBUFS; 312 return -ENOBUFS;
313 313
314 memset(root_ht, 0, sizeof(*root_ht));
315 root_ht->divisor = 0; 314 root_ht->divisor = 0;
316 root_ht->refcnt++; 315 root_ht->refcnt++;
317 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; 316 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
318 root_ht->prio = tp->prio; 317 root_ht->prio = tp->prio;
319 318
320 if (tp_c == NULL) { 319 if (tp_c == NULL) {
321 tp_c = kmalloc(sizeof(*tp_c), GFP_KERNEL); 320 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
322 if (tp_c == NULL) { 321 if (tp_c == NULL) {
323 kfree(root_ht); 322 kfree(root_ht);
324 return -ENOBUFS; 323 return -ENOBUFS;
325 } 324 }
326 memset(tp_c, 0, sizeof(*tp_c));
327 tp_c->q = tp->q; 325 tp_c->q = tp->q;
328 tp_c->next = u32_list; 326 tp_c->next = u32_list;
329 u32_list = tp_c; 327 u32_list = tp_c;
@@ -571,10 +569,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
571 if (handle == 0) 569 if (handle == 0)
572 return -ENOMEM; 570 return -ENOMEM;
573 } 571 }
574 ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); 572 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
575 if (ht == NULL) 573 if (ht == NULL)
576 return -ENOBUFS; 574 return -ENOBUFS;
577 memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*));
578 ht->tp_c = tp_c; 575 ht->tp_c = tp_c;
579 ht->refcnt = 0; 576 ht->refcnt = 0;
580 ht->divisor = divisor; 577 ht->divisor = divisor;
@@ -617,18 +614,16 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
617 614
618 s = RTA_DATA(tb[TCA_U32_SEL-1]); 615 s = RTA_DATA(tb[TCA_U32_SEL-1]);
619 616
620 n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); 617 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
621 if (n == NULL) 618 if (n == NULL)
622 return -ENOBUFS; 619 return -ENOBUFS;
623 620
624 memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));
625#ifdef CONFIG_CLS_U32_PERF 621#ifdef CONFIG_CLS_U32_PERF
626 n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); 622 n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
627 if (n->pf == NULL) { 623 if (n->pf == NULL) {
628 kfree(n); 624 kfree(n);
629 return -ENOBUFS; 625 return -ENOBUFS;
630 } 626 }
631 memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64));
632#endif 627#endif
633 628
634 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); 629 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 698372954f4d..61e3b740ab1a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -773,10 +773,9 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len,
773 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX) 773 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
774 goto errout; 774 goto errout;
775 775
776 meta = kmalloc(sizeof(*meta), GFP_KERNEL); 776 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
777 if (meta == NULL) 777 if (meta == NULL)
778 goto errout; 778 goto errout;
779 memset(meta, 0, sizeof(*meta));
780 779
781 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left)); 780 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
782 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right)); 781 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 2405a86093a2..0fd0768a17c6 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -321,10 +321,9 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct rtattr *rta,
321 list_len = RTA_PAYLOAD(rt_list); 321 list_len = RTA_PAYLOAD(rt_list);
322 matches_len = tree_hdr->nmatches * sizeof(*em); 322 matches_len = tree_hdr->nmatches * sizeof(*em);
323 323
324 tree->matches = kmalloc(matches_len, GFP_KERNEL); 324 tree->matches = kzalloc(matches_len, GFP_KERNEL);
325 if (tree->matches == NULL) 325 if (tree->matches == NULL)
326 goto errout; 326 goto errout;
327 memset(tree->matches, 0, matches_len);
328 327
329 /* We do not use rtattr_parse_nested here because the maximum 328 /* We do not use rtattr_parse_nested here because the maximum
330 * number of attributes is unknown. This saves us the allocation 329 * number of attributes is unknown. This saves us the allocation
diff --git a/net/sched/estimator.c b/net/sched/estimator.c
index 5d3ae03e22a7..0ebc98e9be2d 100644
--- a/net/sched/estimator.c
+++ b/net/sched/estimator.c
@@ -139,11 +139,10 @@ int qdisc_new_estimator(struct tc_stats *stats, spinlock_t *stats_lock, struct r
139 if (parm->interval < -2 || parm->interval > 3) 139 if (parm->interval < -2 || parm->interval > 3)
140 return -EINVAL; 140 return -EINVAL;
141 141
142 est = kmalloc(sizeof(*est), GFP_KERNEL); 142 est = kzalloc(sizeof(*est), GFP_KERNEL);
143 if (est == NULL) 143 if (est == NULL)
144 return -ENOBUFS; 144 return -ENOBUFS;
145 145
146 memset(est, 0, sizeof(*est));
147 est->interval = parm->interval + 2; 146 est->interval = parm->interval + 2;
148 est->stats = stats; 147 est->stats = stats;
149 est->stats_lock = stats_lock; 148 est->stats_lock = stats_lock;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 80b7f6a8d008..bac881bfe362 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1926,10 +1926,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1926 } 1926 }
1927 1927
1928 err = -ENOBUFS; 1928 err = -ENOBUFS;
1929 cl = kmalloc(sizeof(*cl), GFP_KERNEL); 1929 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1930 if (cl == NULL) 1930 if (cl == NULL)
1931 goto failure; 1931 goto failure;
1932 memset(cl, 0, sizeof(*cl));
1933 cl->R_tab = rtab; 1932 cl->R_tab = rtab;
1934 rtab = NULL; 1933 rtab = NULL;
1935 cl->refcnt = 1; 1934 cl->refcnt = 1;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d735f51686a1..0834c2ee9174 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -432,10 +432,9 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
432 size = QDISC_ALIGN(sizeof(*sch)); 432 size = QDISC_ALIGN(sizeof(*sch));
433 size += ops->priv_size + (QDISC_ALIGNTO - 1); 433 size += ops->priv_size + (QDISC_ALIGNTO - 1);
434 434
435 p = kmalloc(size, GFP_KERNEL); 435 p = kzalloc(size, GFP_KERNEL);
436 if (!p) 436 if (!p)
437 goto errout; 437 goto errout;
438 memset(p, 0, size);
439 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 438 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
440 sch->padded = (char *) sch - (char *) p; 439 sch->padded = (char *) sch - (char *) p;
441 440
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 0cafdd5feb1b..18e81a8ffb01 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -406,10 +406,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
406 struct gred_sched_data *q; 406 struct gred_sched_data *q;
407 407
408 if (table->tab[dp] == NULL) { 408 if (table->tab[dp] == NULL) {
409 table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL); 409 table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
410 if (table->tab[dp] == NULL) 410 if (table->tab[dp] == NULL)
411 return -ENOMEM; 411 return -ENOMEM;
412 memset(table->tab[dp], 0, sizeof(*q));
413 } 412 }
414 413
415 q = table->tab[dp]; 414 q = table->tab[dp];
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6b1b4a981e88..6a6735a2ed35 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1123,10 +1123,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1123 if (rsc == NULL && fsc == NULL) 1123 if (rsc == NULL && fsc == NULL)
1124 return -EINVAL; 1124 return -EINVAL;
1125 1125
1126 cl = kmalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1126 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1127 if (cl == NULL) 1127 if (cl == NULL)
1128 return -ENOBUFS; 1128 return -ENOBUFS;
1129 memset(cl, 0, sizeof(struct hfsc_class));
1130 1129
1131 if (rsc != NULL) 1130 if (rsc != NULL)
1132 hfsc_change_rsc(cl, rsc, 0); 1131 hfsc_change_rsc(cl, rsc, 0);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 34afe41fa2f3..880a3394a51f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -196,7 +196,7 @@ struct htb_class
196 struct qdisc_rate_table *rate; /* rate table of the class itself */ 196 struct qdisc_rate_table *rate; /* rate table of the class itself */
197 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ 197 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
198 long buffer,cbuffer; /* token bucket depth/rate */ 198 long buffer,cbuffer; /* token bucket depth/rate */
199 long mbuffer; /* max wait time */ 199 psched_tdiff_t mbuffer; /* max wait time */
200 long tokens,ctokens; /* current number of tokens */ 200 long tokens,ctokens; /* current number of tokens */
201 psched_time_t t_c; /* checkpoint time */ 201 psched_time_t t_c; /* checkpoint time */
202}; 202};
@@ -1559,10 +1559,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1559 goto failure; 1559 goto failure;
1560 } 1560 }
1561 err = -ENOBUFS; 1561 err = -ENOBUFS;
1562 if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1562 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1563 goto failure; 1563 goto failure;
1564 1564
1565 memset(cl, 0, sizeof(*cl));
1566 cl->refcnt = 1; 1565 cl->refcnt = 1;
1567 INIT_LIST_HEAD(&cl->sibling); 1566 INIT_LIST_HEAD(&cl->sibling);
1568 INIT_LIST_HEAD(&cl->hlist); 1567 INIT_LIST_HEAD(&cl->hlist);
@@ -1601,7 +1600,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1601 /* set class to be in HTB_CAN_SEND state */ 1600 /* set class to be in HTB_CAN_SEND state */
1602 cl->tokens = hopt->buffer; 1601 cl->tokens = hopt->buffer;
1603 cl->ctokens = hopt->cbuffer; 1602 cl->ctokens = hopt->cbuffer;
1604 cl->mbuffer = 60000000; /* 1min */ 1603 cl->mbuffer = PSCHED_JIFFIE2US(HZ*60); /* 1min */
1605 PSCHED_GET_TIME(cl->t_c); 1604 PSCHED_GET_TIME(cl->t_c);
1606 cl->cmode = HTB_CAN_SEND; 1605 cl->cmode = HTB_CAN_SEND;
1607 1606
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index c5bd8064e6d8..a08ec4c7c55d 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -148,7 +148,8 @@ static long tabledist(unsigned long mu, long sigma,
148static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) 148static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
149{ 149{
150 struct netem_sched_data *q = qdisc_priv(sch); 150 struct netem_sched_data *q = qdisc_priv(sch);
151 struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; 151 /* We don't fill cb now as skb_unshare() may invalidate it */
152 struct netem_skb_cb *cb;
152 struct sk_buff *skb2; 153 struct sk_buff *skb2;
153 int ret; 154 int ret;
154 int count = 1; 155 int count = 1;
@@ -200,6 +201,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
200 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 201 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
201 } 202 }
202 203
204 cb = (struct netem_skb_cb *)skb->cb;
203 if (q->gap == 0 /* not doing reordering */ 205 if (q->gap == 0 /* not doing reordering */
204 || q->counter < q->gap /* inside last reordering gap */ 206 || q->counter < q->gap /* inside last reordering gap */
205 || q->reorder < get_crandom(&q->reorder_cor)) { 207 || q->reorder < get_crandom(&q->reorder_cor)) {
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9d05e13e92f6..27329ce9c311 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -441,7 +441,8 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
441 /* If the primary path is changing, assume that the 441 /* If the primary path is changing, assume that the
442 * user wants to use this new path. 442 * user wants to use this new path.
443 */ 443 */
444 if (transport->state != SCTP_INACTIVE) 444 if ((transport->state == SCTP_ACTIVE) ||
445 (transport->state == SCTP_UNKNOWN))
445 asoc->peer.active_path = transport; 446 asoc->peer.active_path = transport;
446 447
447 /* 448 /*
@@ -532,11 +533,11 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
532 port = addr->v4.sin_port; 533 port = addr->v4.sin_port;
533 534
534 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", 535 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
535 " port: %d state:%s\n", 536 " port: %d state:%d\n",
536 asoc, 537 asoc,
537 addr, 538 addr,
538 addr->v4.sin_port, 539 addr->v4.sin_port,
539 peer_state == SCTP_UNKNOWN?"UNKNOWN":"ACTIVE"); 540 peer_state);
540 541
541 /* Set the port if it has not been set yet. */ 542 /* Set the port if it has not been set yet. */
542 if (0 == asoc->peer.port) 543 if (0 == asoc->peer.port)
@@ -545,9 +546,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
545 /* Check to see if this is a duplicate. */ 546 /* Check to see if this is a duplicate. */
546 peer = sctp_assoc_lookup_paddr(asoc, addr); 547 peer = sctp_assoc_lookup_paddr(asoc, addr);
547 if (peer) { 548 if (peer) {
548 if (peer_state == SCTP_ACTIVE && 549 if (peer->state == SCTP_UNKNOWN) {
549 peer->state == SCTP_UNKNOWN) 550 if (peer_state == SCTP_ACTIVE)
550 peer->state = SCTP_ACTIVE; 551 peer->state = SCTP_ACTIVE;
552 if (peer_state == SCTP_UNCONFIRMED)
553 peer->state = SCTP_UNCONFIRMED;
554 }
551 return peer; 555 return peer;
552 } 556 }
553 557
@@ -739,7 +743,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
739 list_for_each(pos, &asoc->peer.transport_addr_list) { 743 list_for_each(pos, &asoc->peer.transport_addr_list) {
740 t = list_entry(pos, struct sctp_transport, transports); 744 t = list_entry(pos, struct sctp_transport, transports);
741 745
742 if (t->state == SCTP_INACTIVE) 746 if ((t->state == SCTP_INACTIVE) ||
747 (t->state == SCTP_UNCONFIRMED))
743 continue; 748 continue;
744 if (!first || t->last_time_heard > first->last_time_heard) { 749 if (!first || t->last_time_heard > first->last_time_heard) {
745 second = first; 750 second = first;
@@ -759,7 +764,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
759 * [If the primary is active but not most recent, bump the most 764 * [If the primary is active but not most recent, bump the most
760 * recently used transport.] 765 * recently used transport.]
761 */ 766 */
762 if (asoc->peer.primary_path->state != SCTP_INACTIVE && 767 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
768 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
763 first != asoc->peer.primary_path) { 769 first != asoc->peer.primary_path) {
764 second = first; 770 second = first;
765 first = asoc->peer.primary_path; 771 first = asoc->peer.primary_path;
@@ -1054,7 +1060,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
1054 transports); 1060 transports);
1055 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) 1061 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1056 sctp_assoc_add_peer(asoc, &trans->ipaddr, 1062 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1057 GFP_ATOMIC, SCTP_ACTIVE); 1063 GFP_ATOMIC, trans->state);
1058 } 1064 }
1059 1065
1060 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1066 asoc->ctsn_ack_point = asoc->next_tsn - 1;
@@ -1094,7 +1100,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1094 1100
1095 /* Try to find an active transport. */ 1101 /* Try to find an active transport. */
1096 1102
1097 if (t->state != SCTP_INACTIVE) { 1103 if ((t->state == SCTP_ACTIVE) ||
1104 (t->state == SCTP_UNKNOWN)) {
1098 break; 1105 break;
1099 } else { 1106 } else {
1100 /* Keep track of the next transport in case 1107 /* Keep track of the next transport in case
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 2b962627f631..2b9c12a170e5 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -146,7 +146,7 @@ void sctp_bind_addr_free(struct sctp_bind_addr *bp)
146 146
147/* Add an address to the bind address list in the SCTP_bind_addr structure. */ 147/* Add an address to the bind address list in the SCTP_bind_addr structure. */
148int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, 148int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
149 gfp_t gfp) 149 __u8 use_as_src, gfp_t gfp)
150{ 150{
151 struct sctp_sockaddr_entry *addr; 151 struct sctp_sockaddr_entry *addr;
152 152
@@ -163,6 +163,8 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
163 if (!addr->a.v4.sin_port) 163 if (!addr->a.v4.sin_port)
164 addr->a.v4.sin_port = bp->port; 164 addr->a.v4.sin_port = bp->port;
165 165
166 addr->use_as_src = use_as_src;
167
166 INIT_LIST_HEAD(&addr->list); 168 INIT_LIST_HEAD(&addr->list);
167 list_add_tail(&addr->list, &bp->address_list); 169 list_add_tail(&addr->list, &bp->address_list);
168 SCTP_DBG_OBJCNT_INC(addr); 170 SCTP_DBG_OBJCNT_INC(addr);
@@ -274,7 +276,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
274 } 276 }
275 277
276 af->from_addr_param(&addr, rawaddr, port, 0); 278 af->from_addr_param(&addr, rawaddr, port, 0);
277 retval = sctp_add_bind_addr(bp, &addr, gfp); 279 retval = sctp_add_bind_addr(bp, &addr, 1, gfp);
278 if (retval) { 280 if (retval) {
279 /* Can't finish building the list, clean up. */ 281 /* Can't finish building the list, clean up. */
280 sctp_bind_addr_clean(bp); 282 sctp_bind_addr_clean(bp);
@@ -367,7 +369,7 @@ static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
367 (((AF_INET6 == addr->sa.sa_family) && 369 (((AF_INET6 == addr->sa.sa_family) &&
368 (flags & SCTP_ADDR6_ALLOWED) && 370 (flags & SCTP_ADDR6_ALLOWED) &&
369 (flags & SCTP_ADDR6_PEERSUPP)))) 371 (flags & SCTP_ADDR6_PEERSUPP))))
370 error = sctp_add_bind_addr(dest, addr, gfp); 372 error = sctp_add_bind_addr(dest, addr, 1, gfp);
371 } 373 }
372 374
373 return error; 375 return error;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 67bd53070ee0..ffda1d680529 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -158,6 +158,12 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
158void sctp_endpoint_free(struct sctp_endpoint *ep) 158void sctp_endpoint_free(struct sctp_endpoint *ep)
159{ 159{
160 ep->base.dead = 1; 160 ep->base.dead = 1;
161
162 ep->base.sk->sk_state = SCTP_SS_CLOSED;
163
164 /* Unlink this endpoint, so we can't find it again! */
165 sctp_unhash_endpoint(ep);
166
161 sctp_endpoint_put(ep); 167 sctp_endpoint_put(ep);
162} 168}
163 169
@@ -166,11 +172,6 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
166{ 172{
167 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 173 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
168 174
169 ep->base.sk->sk_state = SCTP_SS_CLOSED;
170
171 /* Unlink this endpoint, so we can't find it again! */
172 sctp_unhash_endpoint(ep);
173
174 /* Free up the HMAC transform. */ 175 /* Free up the HMAC transform. */
175 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); 176 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
176 177
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 8ef08070c8b6..99c0cefc04e0 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -290,7 +290,8 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
290 sctp_read_lock(addr_lock); 290 sctp_read_lock(addr_lock);
291 list_for_each(pos, &bp->address_list) { 291 list_for_each(pos, &bp->address_list) {
292 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 292 laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
293 if ((laddr->a.sa.sa_family == AF_INET6) && 293 if ((laddr->use_as_src) &&
294 (laddr->a.sa.sa_family == AF_INET6) &&
294 (scope <= sctp_scope(&laddr->a))) { 295 (scope <= sctp_scope(&laddr->a))) {
295 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 296 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
296 if (!baddr || (matchlen < bmatchlen)) { 297 if (!baddr || (matchlen < bmatchlen)) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index e5faa351aaad..30b710c54e64 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -691,7 +691,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
691 691
692 if (!new_transport) { 692 if (!new_transport) {
693 new_transport = asoc->peer.active_path; 693 new_transport = asoc->peer.active_path;
694 } else if (new_transport->state == SCTP_INACTIVE) { 694 } else if ((new_transport->state == SCTP_INACTIVE) ||
695 (new_transport->state == SCTP_UNCONFIRMED)) {
695 /* If the chunk is Heartbeat or Heartbeat Ack, 696 /* If the chunk is Heartbeat or Heartbeat Ack,
696 * send it to chunk->transport, even if it's 697 * send it to chunk->transport, even if it's
697 * inactive. 698 * inactive.
@@ -848,7 +849,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
848 */ 849 */
849 new_transport = chunk->transport; 850 new_transport = chunk->transport;
850 if (!new_transport || 851 if (!new_transport ||
851 new_transport->state == SCTP_INACTIVE) 852 ((new_transport->state == SCTP_INACTIVE) ||
853 (new_transport->state == SCTP_UNCONFIRMED)))
852 new_transport = asoc->peer.active_path; 854 new_transport = asoc->peer.active_path;
853 855
854 /* Change packets if necessary. */ 856 /* Change packets if necessary. */
@@ -1464,7 +1466,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1464 /* Mark the destination transport address as 1466 /* Mark the destination transport address as
1465 * active if it is not so marked. 1467 * active if it is not so marked.
1466 */ 1468 */
1467 if (transport->state == SCTP_INACTIVE) { 1469 if ((transport->state == SCTP_INACTIVE) ||
1470 (transport->state == SCTP_UNCONFIRMED)) {
1468 sctp_assoc_control_transport( 1471 sctp_assoc_control_transport(
1469 transport->asoc, 1472 transport->asoc,
1470 transport, 1473 transport,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 816c033d7886..1ab03a27a76e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -240,7 +240,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
240 (((AF_INET6 == addr->a.sa.sa_family) && 240 (((AF_INET6 == addr->a.sa.sa_family) &&
241 (copy_flags & SCTP_ADDR6_ALLOWED) && 241 (copy_flags & SCTP_ADDR6_ALLOWED) &&
242 (copy_flags & SCTP_ADDR6_PEERSUPP)))) { 242 (copy_flags & SCTP_ADDR6_PEERSUPP)))) {
243 error = sctp_add_bind_addr(bp, &addr->a, 243 error = sctp_add_bind_addr(bp, &addr->a, 1,
244 GFP_ATOMIC); 244 GFP_ATOMIC);
245 if (error) 245 if (error)
246 goto end_copy; 246 goto end_copy;
@@ -486,6 +486,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
486 list_for_each(pos, &bp->address_list) { 486 list_for_each(pos, &bp->address_list) {
487 laddr = list_entry(pos, struct sctp_sockaddr_entry, 487 laddr = list_entry(pos, struct sctp_sockaddr_entry,
488 list); 488 list);
489 if (!laddr->use_as_src)
490 continue;
489 sctp_v4_dst_saddr(&dst_saddr, dst, bp->port); 491 sctp_v4_dst_saddr(&dst_saddr, dst, bp->port);
490 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 492 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
491 goto out_unlock; 493 goto out_unlock;
@@ -506,7 +508,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
506 list_for_each(pos, &bp->address_list) { 508 list_for_each(pos, &bp->address_list) {
507 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 509 laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
508 510
509 if (AF_INET == laddr->a.sa.sa_family) { 511 if ((laddr->use_as_src) &&
512 (AF_INET == laddr->a.sa.sa_family)) {
510 fl.fl4_src = laddr->a.v4.sin_addr.s_addr; 513 fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
511 if (!ip_route_output_key(&rt, &fl)) { 514 if (!ip_route_output_key(&rt, &fl)) {
512 dst = &rt->u.dst; 515 dst = &rt->u.dst;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2a8773691695..4f11f5858209 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1493,7 +1493,7 @@ no_hmac:
1493 1493
1494 /* Also, add the destination address. */ 1494 /* Also, add the destination address. */
1495 if (list_empty(&retval->base.bind_addr.address_list)) { 1495 if (list_empty(&retval->base.bind_addr.address_list)) {
1496 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1496 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,
1497 GFP_ATOMIC); 1497 GFP_ATOMIC);
1498 } 1498 }
1499 1499
@@ -2017,7 +2017,7 @@ static int sctp_process_param(struct sctp_association *asoc,
2017 af->from_addr_param(&addr, param.addr, asoc->peer.port, 0); 2017 af->from_addr_param(&addr, param.addr, asoc->peer.port, 0);
2018 scope = sctp_scope(peer_addr); 2018 scope = sctp_scope(peer_addr);
2019 if (sctp_in_scope(&addr, scope)) 2019 if (sctp_in_scope(&addr, scope))
2020 if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_ACTIVE)) 2020 if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
2021 return 0; 2021 return 0;
2022 break; 2022 break;
2023 2023
@@ -2418,7 +2418,7 @@ static __u16 sctp_process_asconf_param(struct sctp_association *asoc,
2418 * Due to Resource Shortage'. 2418 * Due to Resource Shortage'.
2419 */ 2419 */
2420 2420
2421 peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_ACTIVE); 2421 peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
2422 if (!peer) 2422 if (!peer)
2423 return SCTP_ERROR_RSRC_LOW; 2423 return SCTP_ERROR_RSRC_LOW;
2424 2424
@@ -2565,6 +2565,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2565 union sctp_addr_param *addr_param; 2565 union sctp_addr_param *addr_param;
2566 struct list_head *pos; 2566 struct list_head *pos;
2567 struct sctp_transport *transport; 2567 struct sctp_transport *transport;
2568 struct sctp_sockaddr_entry *saddr;
2568 int retval = 0; 2569 int retval = 0;
2569 2570
2570 addr_param = (union sctp_addr_param *) 2571 addr_param = (union sctp_addr_param *)
@@ -2578,7 +2579,11 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2578 case SCTP_PARAM_ADD_IP: 2579 case SCTP_PARAM_ADD_IP:
2579 sctp_local_bh_disable(); 2580 sctp_local_bh_disable();
2580 sctp_write_lock(&asoc->base.addr_lock); 2581 sctp_write_lock(&asoc->base.addr_lock);
2581 retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC); 2582 list_for_each(pos, &bp->address_list) {
2583 saddr = list_entry(pos, struct sctp_sockaddr_entry, list);
2584 if (sctp_cmp_addr_exact(&saddr->a, &addr))
2585 saddr->use_as_src = 1;
2586 }
2582 sctp_write_unlock(&asoc->base.addr_lock); 2587 sctp_write_unlock(&asoc->base.addr_lock);
2583 sctp_local_bh_enable(); 2588 sctp_local_bh_enable();
2584 break; 2589 break;
@@ -2591,6 +2596,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2591 list_for_each(pos, &asoc->peer.transport_addr_list) { 2596 list_for_each(pos, &asoc->peer.transport_addr_list) {
2592 transport = list_entry(pos, struct sctp_transport, 2597 transport = list_entry(pos, struct sctp_transport,
2593 transports); 2598 transports);
2599 dst_release(transport->dst);
2594 sctp_transport_route(transport, NULL, 2600 sctp_transport_route(transport, NULL,
2595 sctp_sk(asoc->base.sk)); 2601 sctp_sk(asoc->base.sk));
2596 } 2602 }
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index c5beb2ad7ef7..9c10bdec1afe 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -430,7 +430,11 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
430 /* The check for association's overall error counter exceeding the 430 /* The check for association's overall error counter exceeding the
431 * threshold is done in the state function. 431 * threshold is done in the state function.
432 */ 432 */
433 asoc->overall_error_count++; 433 /* When probing UNCONFIRMED addresses, the association overall
434 * error count is NOT incremented
435 */
436 if (transport->state != SCTP_UNCONFIRMED)
437 asoc->overall_error_count++;
434 438
435 if (transport->state != SCTP_INACTIVE && 439 if (transport->state != SCTP_INACTIVE &&
436 (transport->error_count++ >= transport->pathmaxrxt)) { 440 (transport->error_count++ >= transport->pathmaxrxt)) {
@@ -610,7 +614,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
610 /* Mark the destination transport address as active if it is not so 614 /* Mark the destination transport address as active if it is not so
611 * marked. 615 * marked.
612 */ 616 */
613 if (t->state == SCTP_INACTIVE) 617 if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
614 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, 618 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
615 SCTP_HEARTBEAT_SUCCESS); 619 SCTP_HEARTBEAT_SUCCESS);
616 620
@@ -620,6 +624,10 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
620 */ 624 */
621 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; 625 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
622 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 626 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
627
628 /* Update the heartbeat timer. */
629 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
630 sctp_transport_hold(t);
623} 631}
624 632
625/* Helper function to do a transport reset at the expiry of the hearbeat 633/* Helper function to do a transport reset at the expiry of the hearbeat
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 9e58144f4851..ead3f1b0ea3d 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -846,6 +846,7 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
846 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); 846 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
847 hbinfo.daddr = transport->ipaddr; 847 hbinfo.daddr = transport->ipaddr;
848 hbinfo.sent_at = jiffies; 848 hbinfo.sent_at = jiffies;
849 hbinfo.hb_nonce = transport->hb_nonce;
849 850
850 /* Send a heartbeat to our peer. */ 851 /* Send a heartbeat to our peer. */
851 paylen = sizeof(sctp_sender_hb_info_t); 852 paylen = sizeof(sctp_sender_hb_info_t);
@@ -1048,6 +1049,10 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1048 return SCTP_DISPOSITION_DISCARD; 1049 return SCTP_DISPOSITION_DISCARD;
1049 } 1050 }
1050 1051
1052 /* Validate the 64-bit random nonce. */
1053 if (hbinfo->hb_nonce != link->hb_nonce)
1054 return SCTP_DISPOSITION_DISCARD;
1055
1051 max_interval = link->hbinterval + link->rto; 1056 max_interval = link->hbinterval + link->rto;
1052 1057
1053 /* Check if the timestamp looks valid. */ 1058 /* Check if the timestamp looks valid. */
@@ -5278,7 +5283,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5278 datalen -= sizeof(sctp_data_chunk_t); 5283 datalen -= sizeof(sctp_data_chunk_t);
5279 5284
5280 deliver = SCTP_CMD_CHUNK_ULP; 5285 deliver = SCTP_CMD_CHUNK_ULP;
5281 chunk->data_accepted = 1;
5282 5286
5283 /* Think about partial delivery. */ 5287 /* Think about partial delivery. */
5284 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { 5288 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
@@ -5357,6 +5361,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5357 if (SCTP_CMD_CHUNK_ULP == deliver) 5361 if (SCTP_CMD_CHUNK_ULP == deliver)
5358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 5362 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
5359 5363
5364 chunk->data_accepted = 1;
5365
5360 /* Note: Some chunks may get overcounted (if we drop) or overcounted 5366 /* Note: Some chunks may get overcounted (if we drop) or overcounted
5361 * if we renege and the chunk arrives again. 5367 * if we renege and the chunk arrives again.
5362 */ 5368 */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0a2c71d0d8aa..54722e622e6d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -369,7 +369,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
369 369
370 /* Use GFP_ATOMIC since BHs are disabled. */ 370 /* Use GFP_ATOMIC since BHs are disabled. */
371 addr->v4.sin_port = ntohs(addr->v4.sin_port); 371 addr->v4.sin_port = ntohs(addr->v4.sin_port);
372 ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC); 372 ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
373 addr->v4.sin_port = htons(addr->v4.sin_port); 373 addr->v4.sin_port = htons(addr->v4.sin_port);
374 sctp_write_unlock(&ep->base.addr_lock); 374 sctp_write_unlock(&ep->base.addr_lock);
375 sctp_local_bh_enable(); 375 sctp_local_bh_enable();
@@ -491,6 +491,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
491 struct sctp_chunk *chunk; 491 struct sctp_chunk *chunk;
492 struct sctp_sockaddr_entry *laddr; 492 struct sctp_sockaddr_entry *laddr;
493 union sctp_addr *addr; 493 union sctp_addr *addr;
494 union sctp_addr saveaddr;
494 void *addr_buf; 495 void *addr_buf;
495 struct sctp_af *af; 496 struct sctp_af *af;
496 struct list_head *pos; 497 struct list_head *pos;
@@ -558,14 +559,26 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
558 } 559 }
559 560
560 retval = sctp_send_asconf(asoc, chunk); 561 retval = sctp_send_asconf(asoc, chunk);
562 if (retval)
563 goto out;
561 564
562 /* FIXME: After sending the add address ASCONF chunk, we 565 /* Add the new addresses to the bind address list with
563 * cannot append the address to the association's binding 566 * use_as_src set to 0.
564 * address list, because the new address may be used as the
565 * source of a message sent to the peer before the ASCONF
566 * chunk is received by the peer. So we should wait until
567 * ASCONF_ACK is received.
568 */ 567 */
568 sctp_local_bh_disable();
569 sctp_write_lock(&asoc->base.addr_lock);
570 addr_buf = addrs;
571 for (i = 0; i < addrcnt; i++) {
572 addr = (union sctp_addr *)addr_buf;
573 af = sctp_get_af_specific(addr->v4.sin_family);
574 memcpy(&saveaddr, addr, af->sockaddr_len);
575 saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
576 retval = sctp_add_bind_addr(bp, &saveaddr, 0,
577 GFP_ATOMIC);
578 addr_buf += af->sockaddr_len;
579 }
580 sctp_write_unlock(&asoc->base.addr_lock);
581 sctp_local_bh_enable();
569 } 582 }
570 583
571out: 584out:
@@ -676,12 +689,15 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
676 struct sctp_sock *sp; 689 struct sctp_sock *sp;
677 struct sctp_endpoint *ep; 690 struct sctp_endpoint *ep;
678 struct sctp_association *asoc; 691 struct sctp_association *asoc;
692 struct sctp_transport *transport;
679 struct sctp_bind_addr *bp; 693 struct sctp_bind_addr *bp;
680 struct sctp_chunk *chunk; 694 struct sctp_chunk *chunk;
681 union sctp_addr *laddr; 695 union sctp_addr *laddr;
696 union sctp_addr saveaddr;
682 void *addr_buf; 697 void *addr_buf;
683 struct sctp_af *af; 698 struct sctp_af *af;
684 struct list_head *pos; 699 struct list_head *pos, *pos1;
700 struct sctp_sockaddr_entry *saddr;
685 int i; 701 int i;
686 int retval = 0; 702 int retval = 0;
687 703
@@ -748,14 +764,42 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
748 goto out; 764 goto out;
749 } 765 }
750 766
751 retval = sctp_send_asconf(asoc, chunk); 767 /* Reset use_as_src flag for the addresses in the bind address
768 * list that are to be deleted.
769 */
770 sctp_local_bh_disable();
771 sctp_write_lock(&asoc->base.addr_lock);
772 addr_buf = addrs;
773 for (i = 0; i < addrcnt; i++) {
774 laddr = (union sctp_addr *)addr_buf;
775 af = sctp_get_af_specific(laddr->v4.sin_family);
776 memcpy(&saveaddr, laddr, af->sockaddr_len);
777 saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
778 list_for_each(pos1, &bp->address_list) {
779 saddr = list_entry(pos1,
780 struct sctp_sockaddr_entry,
781 list);
782 if (sctp_cmp_addr_exact(&saddr->a, &saveaddr))
783 saddr->use_as_src = 0;
784 }
785 addr_buf += af->sockaddr_len;
786 }
787 sctp_write_unlock(&asoc->base.addr_lock);
788 sctp_local_bh_enable();
752 789
753 /* FIXME: After sending the delete address ASCONF chunk, we 790 /* Update the route and saddr entries for all the transports
754 * cannot remove the addresses from the association's bind 791 * as some of the addresses in the bind address list are
755 * address list, because there maybe some packet send to 792 * about to be deleted and cannot be used as source addresses.
756 * the delete addresses, so we should wait until ASCONF_ACK
757 * packet is received.
758 */ 793 */
794 list_for_each(pos1, &asoc->peer.transport_addr_list) {
795 transport = list_entry(pos1, struct sctp_transport,
796 transports);
797 dst_release(transport->dst);
798 sctp_transport_route(transport, NULL,
799 sctp_sk(asoc->base.sk));
800 }
801
802 retval = sctp_send_asconf(asoc, chunk);
759 } 803 }
760out: 804out:
761 return retval; 805 return retval;
@@ -4977,7 +5021,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
4977/* Caller must hold hashbucket lock for this tb with local BH disabled */ 5021/* Caller must hold hashbucket lock for this tb with local BH disabled */
4978static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 5022static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
4979{ 5023{
4980 if (hlist_empty(&pp->owner)) { 5024 if (pp && hlist_empty(&pp->owner)) {
4981 if (pp->next) 5025 if (pp->next)
4982 pp->next->pprev = pp->pprev; 5026 pp->next->pprev = pp->pprev;
4983 *(pp->pprev) = pp->next; 5027 *(pp->pprev) = pp->next;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 160f62ad1cc5..2763aa93de1a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -49,6 +49,7 @@
49 */ 49 */
50 50
51#include <linux/types.h> 51#include <linux/types.h>
52#include <linux/random.h>
52#include <net/sctp/sctp.h> 53#include <net/sctp/sctp.h>
53#include <net/sctp/sm.h> 54#include <net/sctp/sm.h>
54 55
@@ -85,7 +86,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
85 86
86 peer->init_sent_count = 0; 87 peer->init_sent_count = 0;
87 88
88 peer->state = SCTP_ACTIVE;
89 peer->param_flags = SPP_HB_DISABLE | 89 peer->param_flags = SPP_HB_DISABLE |
90 SPP_PMTUD_ENABLE | 90 SPP_PMTUD_ENABLE |
91 SPP_SACKDELAY_ENABLE; 91 SPP_SACKDELAY_ENABLE;
@@ -109,6 +109,9 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
109 peer->hb_timer.function = sctp_generate_heartbeat_event; 109 peer->hb_timer.function = sctp_generate_heartbeat_event;
110 peer->hb_timer.data = (unsigned long)peer; 110 peer->hb_timer.data = (unsigned long)peer;
111 111
112 /* Initialize the 64-bit random nonce sent with heartbeat. */
113 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
114
112 atomic_set(&peer->refcnt, 1); 115 atomic_set(&peer->refcnt, 1);
113 peer->dead = 0; 116 peer->dead = 0;
114 117
@@ -517,7 +520,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
517unsigned long sctp_transport_timeout(struct sctp_transport *t) 520unsigned long sctp_transport_timeout(struct sctp_transport *t)
518{ 521{
519 unsigned long timeout; 522 unsigned long timeout;
520 timeout = t->hbinterval + t->rto + sctp_jitter(t->rto); 523 timeout = t->rto + sctp_jitter(t->rto);
524 if (t->state != SCTP_UNCONFIRMED)
525 timeout += t->hbinterval;
521 timeout += jiffies; 526 timeout += jiffies;
522 return timeout; 527 return timeout;
523} 528}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 519ebc17c028..4a9aa9393b97 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -225,9 +225,8 @@ gss_alloc_context(void)
225{ 225{
226 struct gss_cl_ctx *ctx; 226 struct gss_cl_ctx *ctx;
227 227
228 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 228 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
229 if (ctx != NULL) { 229 if (ctx != NULL) {
230 memset(ctx, 0, sizeof(*ctx));
231 ctx->gc_proc = RPC_GSS_PROC_DATA; 230 ctx->gc_proc = RPC_GSS_PROC_DATA;
232 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 231 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
233 spin_lock_init(&ctx->gc_seq_lock); 232 spin_lock_init(&ctx->gc_seq_lock);
@@ -391,9 +390,8 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
391{ 390{
392 struct gss_upcall_msg *gss_msg; 391 struct gss_upcall_msg *gss_msg;
393 392
394 gss_msg = kmalloc(sizeof(*gss_msg), GFP_KERNEL); 393 gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
395 if (gss_msg != NULL) { 394 if (gss_msg != NULL) {
396 memset(gss_msg, 0, sizeof(*gss_msg));
397 INIT_LIST_HEAD(&gss_msg->list); 395 INIT_LIST_HEAD(&gss_msg->list);
398 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 396 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
399 init_waitqueue_head(&gss_msg->waitqueue); 397 init_waitqueue_head(&gss_msg->waitqueue);
@@ -776,10 +774,9 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
776 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", 774 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n",
777 acred->uid, auth->au_flavor); 775 acred->uid, auth->au_flavor);
778 776
779 if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) 777 if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
780 goto out_err; 778 goto out_err;
781 779
782 memset(cred, 0, sizeof(*cred));
783 atomic_set(&cred->gc_count, 1); 780 atomic_set(&cred->gc_count, 1);
784 cred->gc_uid = acred->uid; 781 cred->gc_uid = acred->uid;
785 /* 782 /*
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index b8714a87b34c..70e1e53a632b 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -129,9 +129,8 @@ gss_import_sec_context_kerberos(const void *p,
129 const void *end = (const void *)((const char *)p + len); 129 const void *end = (const void *)((const char *)p + len);
130 struct krb5_ctx *ctx; 130 struct krb5_ctx *ctx;
131 131
132 if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 132 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
133 goto out_err; 133 goto out_err;
134 memset(ctx, 0, sizeof(*ctx));
135 134
136 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 135 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
137 if (IS_ERR(p)) 136 if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index d88468d21c37..3db745379d06 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -237,9 +237,8 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
237 struct gss_api_mech *mech, 237 struct gss_api_mech *mech,
238 struct gss_ctx **ctx_id) 238 struct gss_ctx **ctx_id)
239{ 239{
240 if (!(*ctx_id = kmalloc(sizeof(**ctx_id), GFP_KERNEL))) 240 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
241 return GSS_S_FAILURE; 241 return GSS_S_FAILURE;
242 memset(*ctx_id, 0, sizeof(**ctx_id));
243 (*ctx_id)->mech_type = gss_mech_get(mech); 242 (*ctx_id)->mech_type = gss_mech_get(mech);
244 243
245 return mech->gm_ops 244 return mech->gm_ops
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 3d0432aa45c1..88dcb52d171b 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -152,9 +152,8 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
152 const void *end = (const void *)((const char *)p + len); 152 const void *end = (const void *)((const char *)p + len);
153 struct spkm3_ctx *ctx; 153 struct spkm3_ctx *ctx;
154 154
155 if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 155 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
156 goto out_err; 156 goto out_err;
157 memset(ctx, 0, sizeof(*ctx));
158 157
159 p = simple_get_netobj(p, end, &ctx->ctx_id); 158 p = simple_get_netobj(p, end, &ctx->ctx_id);
160 if (IS_ERR(p)) 159 if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index af0d7ce74686..854a983ccf26 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -90,10 +90,9 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
90int 90int
91decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) 91decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
92{ 92{
93 if (!(out->data = kmalloc(explen,GFP_KERNEL))) 93 if (!(out->data = kzalloc(explen,GFP_KERNEL)))
94 return 0; 94 return 0;
95 out->len = explen; 95 out->len = explen;
96 memset(out->data, 0, explen);
97 memcpy(out->data, in, enclen); 96 memcpy(out->data, in, enclen);
98 return 1; 97 return 1;
99} 98}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index aa8965e9d307..4ba271f892c8 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -125,10 +125,9 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
125 goto out_err; 125 goto out_err;
126 126
127 err = -ENOMEM; 127 err = -ENOMEM;
128 clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); 128 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
129 if (!clnt) 129 if (!clnt)
130 goto out_err; 130 goto out_err;
131 memset(clnt, 0, sizeof(*clnt));
132 atomic_set(&clnt->cl_users, 0); 131 atomic_set(&clnt->cl_users, 0);
133 atomic_set(&clnt->cl_count, 1); 132 atomic_set(&clnt->cl_count, 1);
134 clnt->cl_parent = clnt; 133 clnt->cl_parent = clnt;
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 15c2db26767b..bd98124c3a64 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -114,13 +114,8 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
114 */ 114 */
115struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) 115struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
116{ 116{
117 unsigned int ops = clnt->cl_maxproc;
118 size_t size = ops * sizeof(struct rpc_iostats);
119 struct rpc_iostats *new; 117 struct rpc_iostats *new;
120 118 new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
121 new = kmalloc(size, GFP_KERNEL);
122 if (new)
123 memset(new, 0 , size);
124 return new; 119 return new;
125} 120}
126EXPORT_SYMBOL(rpc_alloc_iostats); 121EXPORT_SYMBOL(rpc_alloc_iostats);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 01ba60a49572..b76a227dd3ad 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -32,9 +32,8 @@ svc_create(struct svc_program *prog, unsigned int bufsize)
32 int vers; 32 int vers;
33 unsigned int xdrsize; 33 unsigned int xdrsize;
34 34
35 if (!(serv = kmalloc(sizeof(*serv), GFP_KERNEL))) 35 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
36 return NULL; 36 return NULL;
37 memset(serv, 0, sizeof(*serv));
38 serv->sv_name = prog->pg_name; 37 serv->sv_name = prog->pg_name;
39 serv->sv_program = prog; 38 serv->sv_program = prog;
40 serv->sv_nrthreads = 1; 39 serv->sv_nrthreads = 1;
@@ -159,11 +158,10 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
159 struct svc_rqst *rqstp; 158 struct svc_rqst *rqstp;
160 int error = -ENOMEM; 159 int error = -ENOMEM;
161 160
162 rqstp = kmalloc(sizeof(*rqstp), GFP_KERNEL); 161 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
163 if (!rqstp) 162 if (!rqstp)
164 goto out; 163 goto out;
165 164
166 memset(rqstp, 0, sizeof(*rqstp));
167 init_waitqueue_head(&rqstp->rq_wait); 165 init_waitqueue_head(&rqstp->rq_wait);
168 166
169 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 167 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a27905a0ad27..d9a95732df46 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1322,11 +1322,10 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1322 struct sock *inet; 1322 struct sock *inet;
1323 1323
1324 dprintk("svc: svc_setup_socket %p\n", sock); 1324 dprintk("svc: svc_setup_socket %p\n", sock);
1325 if (!(svsk = kmalloc(sizeof(*svsk), GFP_KERNEL))) { 1325 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1326 *errp = -ENOMEM; 1326 *errp = -ENOMEM;
1327 return NULL; 1327 return NULL;
1328 } 1328 }
1329 memset(svsk, 0, sizeof(*svsk));
1330 1329
1331 inet = sock->sk; 1330 inet = sock->sk;
1332 1331
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 02060d0e7be8..313b68d892c6 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -908,9 +908,8 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
908 struct rpc_xprt *xprt; 908 struct rpc_xprt *xprt;
909 struct rpc_rqst *req; 909 struct rpc_rqst *req;
910 910
911 if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) 911 if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
912 return ERR_PTR(-ENOMEM); 912 return ERR_PTR(-ENOMEM);
913 memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
914 913
915 xprt->addr = *ap; 914 xprt->addr = *ap;
916 915
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 21006b109101..ee678ed13b6f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1276,10 +1276,9 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1276 1276
1277 xprt->max_reqs = xprt_udp_slot_table_entries; 1277 xprt->max_reqs = xprt_udp_slot_table_entries;
1278 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1278 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
1279 xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1279 xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
1280 if (xprt->slot == NULL) 1280 if (xprt->slot == NULL)
1281 return -ENOMEM; 1281 return -ENOMEM;
1282 memset(xprt->slot, 0, slot_table_size);
1283 1282
1284 xprt->prot = IPPROTO_UDP; 1283 xprt->prot = IPPROTO_UDP;
1285 xprt->port = xs_get_random_port(); 1284 xprt->port = xs_get_random_port();
@@ -1318,10 +1317,9 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1318 1317
1319 xprt->max_reqs = xprt_tcp_slot_table_entries; 1318 xprt->max_reqs = xprt_tcp_slot_table_entries;
1320 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1319 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
1321 xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1320 xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
1322 if (xprt->slot == NULL) 1321 if (xprt->slot == NULL)
1323 return -ENOMEM; 1322 return -ENOMEM;
1324 memset(xprt->slot, 0, slot_table_size);
1325 1323
1326 xprt->prot = IPPROTO_TCP; 1324 xprt->prot = IPPROTO_TCP;
1327 xprt->port = xs_get_random_port(); 1325 xprt->port = xs_get_random_port();
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 7ef17a449cfd..75a5968c2139 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -665,11 +665,9 @@ int tipc_bearer_init(void)
665 int res; 665 int res;
666 666
667 write_lock_bh(&tipc_net_lock); 667 write_lock_bh(&tipc_net_lock);
668 tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC); 668 tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
669 media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC); 669 media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
670 if (tipc_bearers && media_list) { 670 if (tipc_bearers && media_list) {
671 memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer));
672 memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
673 res = TIPC_OK; 671 res = TIPC_OK;
674 } else { 672 } else {
675 kfree(tipc_bearers); 673 kfree(tipc_bearers);
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 1dcb6940e338..b46b5188a9fd 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -57,29 +57,25 @@ struct cluster *tipc_cltr_create(u32 addr)
57 struct _zone *z_ptr; 57 struct _zone *z_ptr;
58 struct cluster *c_ptr; 58 struct cluster *c_ptr;
59 int max_nodes; 59 int max_nodes;
60 int alloc;
61 60
62 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC); 61 c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC);
63 if (c_ptr == NULL) { 62 if (c_ptr == NULL) {
64 warn("Cluster creation failure, no memory\n"); 63 warn("Cluster creation failure, no memory\n");
65 return NULL; 64 return NULL;
66 } 65 }
67 memset(c_ptr, 0, sizeof(*c_ptr));
68 66
69 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
70 if (in_own_cluster(addr)) 68 if (in_own_cluster(addr))
71 max_nodes = LOWEST_SLAVE + tipc_max_slaves; 69 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
72 else 70 else
73 max_nodes = tipc_max_nodes + 1; 71 max_nodes = tipc_max_nodes + 1;
74 alloc = sizeof(void *) * (max_nodes + 1);
75 72
76 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC); 73 c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC);
77 if (c_ptr->nodes == NULL) { 74 if (c_ptr->nodes == NULL) {
78 warn("Cluster creation failure, no memory for node area\n"); 75 warn("Cluster creation failure, no memory for node area\n");
79 kfree(c_ptr); 76 kfree(c_ptr);
80 return NULL; 77 return NULL;
81 } 78 }
82 memset(c_ptr->nodes, 0, alloc);
83 79
84 if (in_own_cluster(addr)) 80 if (in_own_cluster(addr))
85 tipc_local_nodes = c_ptr->nodes; 81 tipc_local_nodes = c_ptr->nodes;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 2b8441203120..ee94de92ae99 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -295,7 +295,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
295{ 295{
296 struct link_req *req; 296 struct link_req *req;
297 297
298 req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC); 298 req = kmalloc(sizeof(*req), GFP_ATOMIC);
299 if (!req) 299 if (!req)
300 return NULL; 300 return NULL;
301 301
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c10e18a49b96..693f02eca6d6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -417,12 +417,11 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
417 struct tipc_msg *msg; 417 struct tipc_msg *msg;
418 char *if_name; 418 char *if_name;
419 419
420 l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC); 420 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
421 if (!l_ptr) { 421 if (!l_ptr) {
422 warn("Link creation failed, no memory\n"); 422 warn("Link creation failed, no memory\n");
423 return NULL; 423 return NULL;
424 } 424 }
425 memset(l_ptr, 0, sizeof(*l_ptr));
426 425
427 l_ptr->addr = peer; 426 l_ptr->addr = peer;
428 if_name = strchr(b_ptr->publ.name, ':') + 1; 427 if_name = strchr(b_ptr->publ.name, ':') + 1;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index a6926ff07bcc..049242ea5c38 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -117,14 +117,12 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
117 u32 scope, u32 node, u32 port_ref, 117 u32 scope, u32 node, u32 port_ref,
118 u32 key) 118 u32 key)
119{ 119{
120 struct publication *publ = 120 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
121 (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
122 if (publ == NULL) { 121 if (publ == NULL) {
123 warn("Publication creation failure, no memory\n"); 122 warn("Publication creation failure, no memory\n");
124 return NULL; 123 return NULL;
125 } 124 }
126 125
127 memset(publ, 0, sizeof(*publ));
128 publ->type = type; 126 publ->type = type;
129 publ->lower = lower; 127 publ->lower = lower;
130 publ->upper = upper; 128 publ->upper = upper;
@@ -144,11 +142,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
144 142
145static struct sub_seq *tipc_subseq_alloc(u32 cnt) 143static struct sub_seq *tipc_subseq_alloc(u32 cnt)
146{ 144{
147 u32 sz = cnt * sizeof(struct sub_seq); 145 struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
148 struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
149
150 if (sseq)
151 memset(sseq, 0, sz);
152 return sseq; 146 return sseq;
153} 147}
154 148
@@ -160,8 +154,7 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt)
160 154
161static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) 155static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
162{ 156{
163 struct name_seq *nseq = 157 struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
164 (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
165 struct sub_seq *sseq = tipc_subseq_alloc(1); 158 struct sub_seq *sseq = tipc_subseq_alloc(1);
166 159
167 if (!nseq || !sseq) { 160 if (!nseq || !sseq) {
@@ -171,7 +164,6 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
171 return NULL; 164 return NULL;
172 } 165 }
173 166
174 memset(nseq, 0, sizeof(*nseq));
175 spin_lock_init(&nseq->lock); 167 spin_lock_init(&nseq->lock);
176 nseq->type = type; 168 nseq->type = type;
177 nseq->sseqs = sseq; 169 nseq->sseqs = sseq;
@@ -1060,7 +1052,7 @@ int tipc_nametbl_init(void)
1060{ 1052{
1061 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; 1053 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
1062 1054
1063 table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC); 1055 table.types = kmalloc(array_size, GFP_ATOMIC);
1064 if (!table.types) 1056 if (!table.types)
1065 return -ENOMEM; 1057 return -ENOMEM;
1066 1058
diff --git a/net/tipc/net.c b/net/tipc/net.c
index e5a359ab4930..a991bf8a7f74 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -160,14 +160,11 @@ void tipc_net_send_external_routes(u32 dest)
160 160
161static int net_init(void) 161static int net_init(void)
162{ 162{
163 u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
164
165 memset(&tipc_net, 0, sizeof(tipc_net)); 163 memset(&tipc_net, 0, sizeof(tipc_net));
166 tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC); 164 tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
167 if (!tipc_net.zones) { 165 if (!tipc_net.zones) {
168 return -ENOMEM; 166 return -ENOMEM;
169 } 167 }
170 memset(tipc_net.zones, 0, sz);
171 return TIPC_OK; 168 return TIPC_OK;
172} 169}
173 170
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 3251c8d8e53c..b9c8c6b9e94f 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -226,12 +226,11 @@ u32 tipc_createport_raw(void *usr_handle,
226 struct tipc_msg *msg; 226 struct tipc_msg *msg;
227 u32 ref; 227 u32 ref;
228 228
229 p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC); 229 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
230 if (!p_ptr) { 230 if (!p_ptr) {
231 warn("Port creation failed, no memory\n"); 231 warn("Port creation failed, no memory\n");
232 return 0; 232 return 0;
233 } 233 }
234 memset(p_ptr, 0, sizeof(*p_ptr));
235 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); 234 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
236 if (!ref) { 235 if (!ref) {
237 warn("Port creation failed, reference table exhausted\n"); 236 warn("Port creation failed, reference table exhausted\n");
@@ -1058,7 +1057,7 @@ int tipc_createport(u32 user_ref,
1058 struct port *p_ptr; 1057 struct port *p_ptr;
1059 u32 ref; 1058 u32 ref;
1060 1059
1061 up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1060 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1062 if (!up_ptr) { 1061 if (!up_ptr) {
1063 warn("Port creation failed, no memory\n"); 1062 warn("Port creation failed, no memory\n");
1064 return -ENOMEM; 1063 return -ENOMEM;
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 596d3c8ff750..e6d6ae22ea49 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -79,7 +79,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
79 while (sz < requested_size) { 79 while (sz < requested_size) {
80 sz <<= 1; 80 sz <<= 1;
81 } 81 }
82 table = (struct reference *)vmalloc(sz * sizeof(struct reference)); 82 table = vmalloc(sz * sizeof(*table));
83 if (table == NULL) 83 if (table == NULL)
84 return -ENOMEM; 84 return -ENOMEM;
85 85
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index e19b4bcd67ec..c51600ba5f4a 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -393,12 +393,11 @@ static void subscr_named_msg_event(void *usr_handle,
393 393
394 /* Create subscriber object */ 394 /* Create subscriber object */
395 395
396 subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC); 396 subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC);
397 if (subscriber == NULL) { 397 if (subscriber == NULL) {
398 warn("Subscriber rejected, no memory\n"); 398 warn("Subscriber rejected, no memory\n");
399 return; 399 return;
400 } 400 }
401 memset(subscriber, 0, sizeof(struct subscriber));
402 INIT_LIST_HEAD(&subscriber->subscription_list); 401 INIT_LIST_HEAD(&subscriber->subscription_list);
403 INIT_LIST_HEAD(&subscriber->subscriber_list); 402 INIT_LIST_HEAD(&subscriber->subscriber_list);
404 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock); 403 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 1e3ae57c7228..04d1b9be9c51 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -82,9 +82,8 @@ static int reg_init(void)
82 82
83 spin_lock_bh(&reg_lock); 83 spin_lock_bh(&reg_lock);
84 if (!users) { 84 if (!users) {
85 users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC); 85 users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);
86 if (users) { 86 if (users) {
87 memset(users, 0, USER_LIST_SIZE);
88 for (i = 1; i <= MAX_USERID; i++) { 87 for (i = 1; i <= MAX_USERID; i++) {
89 users[i].next = i - 1; 88 users[i].next = i - 1;
90 } 89 }
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 316c4872ff5b..f5b00ea2d5ac 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -52,13 +52,12 @@ struct _zone *tipc_zone_create(u32 addr)
52 return NULL; 52 return NULL;
53 } 53 }
54 54
55 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); 55 z_ptr = kzalloc(sizeof(*z_ptr), GFP_ATOMIC);
56 if (!z_ptr) { 56 if (!z_ptr) {
57 warn("Zone creation failed, insufficient memory\n"); 57 warn("Zone creation failed, insufficient memory\n");
58 return NULL; 58 return NULL;
59 } 59 }
60 60
61 memset(z_ptr, 0, sizeof(*z_ptr));
62 z_num = tipc_zone(addr); 61 z_num = tipc_zone(addr);
63 z_ptr->addr = tipc_addr(z_num, 0, 0); 62 z_ptr->addr = tipc_addr(z_num, 0, 0);
64 tipc_net.zones[z_num] = z_ptr; 63 tipc_net.zones[z_num] = z_ptr;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f70475bfb62a..6f2909279268 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -663,11 +663,10 @@ static int unix_autobind(struct socket *sock)
663 goto out; 663 goto out;
664 664
665 err = -ENOMEM; 665 err = -ENOMEM;
666 addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); 666 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
667 if (!addr) 667 if (!addr)
668 goto out; 668 goto out;
669 669
670 memset(addr, 0, sizeof(*addr) + sizeof(short) + 16);
671 addr->name->sun_family = AF_UNIX; 670 addr->name->sun_family = AF_UNIX;
672 atomic_set(&addr->refcnt, 1); 671 atomic_set(&addr->refcnt, 1);
673 672
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c
index a690cf773b6a..6f39faa15832 100644
--- a/net/wanrouter/af_wanpipe.c
+++ b/net/wanrouter/af_wanpipe.c
@@ -370,12 +370,11 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
370 * used by the ioctl call to read call information 370 * used by the ioctl call to read call information
371 * and to execute commands. 371 * and to execute commands.
372 */ 372 */
373 if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) { 373 if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) {
374 wanpipe_kill_sock_irq (newsk); 374 wanpipe_kill_sock_irq (newsk);
375 release_device(dev); 375 release_device(dev);
376 return -ENOMEM; 376 return -ENOMEM;
377 } 377 }
378 memset(mbox_ptr, 0, sizeof(mbox_cmd_t));
379 memcpy(mbox_ptr,skb->data,skb->len); 378 memcpy(mbox_ptr,skb->data,skb->len);
380 379
381 /* Register the lcn on which incoming call came 380 /* Register the lcn on which incoming call came
@@ -507,11 +506,10 @@ static struct sock *wanpipe_alloc_socket(void)
507 if ((sk = sk_alloc(PF_WANPIPE, GFP_ATOMIC, &wanpipe_proto, 1)) == NULL) 506 if ((sk = sk_alloc(PF_WANPIPE, GFP_ATOMIC, &wanpipe_proto, 1)) == NULL)
508 return NULL; 507 return NULL;
509 508
510 if ((wan_opt = kmalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) { 509 if ((wan_opt = kzalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) {
511 sk_free(sk); 510 sk_free(sk);
512 return NULL; 511 return NULL;
513 } 512 }
514 memset(wan_opt, 0x00, sizeof(struct wanpipe_opt));
515 513
516 wp_sk(sk) = wan_opt; 514 wp_sk(sk) = wan_opt;
517 515
@@ -2011,10 +2009,9 @@ static int set_ioctl_cmd (struct sock *sk, void *arg)
2011 2009
2012 dev_put(dev); 2010 dev_put(dev);
2013 2011
2014 if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) 2012 if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL)
2015 return -ENOMEM; 2013 return -ENOMEM;
2016 2014
2017 memset(mbox_ptr, 0, sizeof(mbox_cmd_t));
2018 wp_sk(sk)->mbox = mbox_ptr; 2015 wp_sk(sk)->mbox = mbox_ptr;
2019 2016
2020 wanpipe_link_driver(dev,sk); 2017 wanpipe_link_driver(dev,sk);
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index ad8e8a797790..9479659277ae 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -642,18 +642,16 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
642 642
643 if (cnf->config_id == WANCONFIG_MPPP) { 643 if (cnf->config_id == WANCONFIG_MPPP) {
644#ifdef CONFIG_WANPIPE_MULTPPP 644#ifdef CONFIG_WANPIPE_MULTPPP
645 pppdev = kmalloc(sizeof(struct ppp_device), GFP_KERNEL); 645 pppdev = kzalloc(sizeof(struct ppp_device), GFP_KERNEL);
646 err = -ENOBUFS; 646 err = -ENOBUFS;
647 if (pppdev == NULL) 647 if (pppdev == NULL)
648 goto out; 648 goto out;
649 memset(pppdev, 0, sizeof(struct ppp_device)); 649 pppdev->dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
650 pppdev->dev = kmalloc(sizeof(struct net_device), GFP_KERNEL);
651 if (pppdev->dev == NULL) { 650 if (pppdev->dev == NULL) {
652 kfree(pppdev); 651 kfree(pppdev);
653 err = -ENOBUFS; 652 err = -ENOBUFS;
654 goto out; 653 goto out;
655 } 654 }
656 memset(pppdev->dev, 0, sizeof(struct net_device));
657 err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf); 655 err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf);
658 dev = pppdev->dev; 656 dev = pppdev->dev;
659#else 657#else
@@ -663,11 +661,10 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
663 goto out; 661 goto out;
664#endif 662#endif
665 } else { 663 } else {
666 dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); 664 dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
667 err = -ENOBUFS; 665 err = -ENOBUFS;
668 if (dev == NULL) 666 if (dev == NULL)
669 goto out; 667 goto out;
670 memset(dev, 0, sizeof(struct net_device));
671 err = wandev->new_if(wandev, dev, cnf); 668 err = wandev->new_if(wandev, dev, cnf);
672 } 669 }
673 670
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 405b741dff43..f35bc676128c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -307,10 +307,9 @@ struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
307{ 307{
308 struct xfrm_policy *policy; 308 struct xfrm_policy *policy;
309 309
310 policy = kmalloc(sizeof(struct xfrm_policy), gfp); 310 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
311 311
312 if (policy) { 312 if (policy) {
313 memset(policy, 0, sizeof(struct xfrm_policy));
314 atomic_set(&policy->refcnt, 1); 313 atomic_set(&policy->refcnt, 1);
315 rwlock_init(&policy->lock); 314 rwlock_init(&policy->lock);
316 init_timer(&policy->timer); 315 init_timer(&policy->timer);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 43f00fc28a3d..0021aad5db43 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -194,10 +194,9 @@ struct xfrm_state *xfrm_state_alloc(void)
194{ 194{
195 struct xfrm_state *x; 195 struct xfrm_state *x;
196 196
197 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC); 197 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
198 198
199 if (x) { 199 if (x) {
200 memset(x, 0, sizeof(struct xfrm_state));
201 atomic_set(&x->refcnt, 1); 200 atomic_set(&x->refcnt, 1);
202 atomic_set(&x->tunnel_users, 0); 201 atomic_set(&x->tunnel_users, 0);
203 INIT_LIST_HEAD(&x->bydst); 202 INIT_LIST_HEAD(&x->bydst);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2e8b4dfcbc74..a91c961ba38b 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -523,12 +523,16 @@ static int try_context_mount(struct super_block *sb, void *data)
523 goto out_free; 523 goto out_free;
524 } 524 }
525 525
526 rc = may_context_mount_sb_relabel(sid, sbsec, tsec); 526 if (!fscontext) {
527 if (rc) 527 rc = may_context_mount_sb_relabel(sid, sbsec, tsec);
528 goto out_free; 528 if (rc)
529 529 goto out_free;
530 if (!fscontext)
531 sbsec->sid = sid; 530 sbsec->sid = sid;
531 } else {
532 rc = may_context_mount_inode_relabel(sid, sbsec, tsec);
533 if (rc)
534 goto out_free;
535 }
532 sbsec->mntpoint_sid = sid; 536 sbsec->mntpoint_sid = sid;
533 537
534 sbsec->behavior = SECURITY_FS_USE_MNTPOINT; 538 sbsec->behavior = SECURITY_FS_USE_MNTPOINT;