aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt16
-rw-r--r--Documentation/x86_64/boot-options.txt7
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile2
-rw-r--r--arch/arm/configs/ep93xx_defconfig15
-rw-r--r--arch/arm/kernel/head.S6
-rw-r--r--arch/arm/mach-footbridge/cats-hw.c2
-rw-r--r--arch/arm/mach-s3c2410/mach-anubis.c49
-rw-r--r--arch/arm/mach-s3c2410/mach-osiris.c20
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/mm/proc-syms.c8
-rw-r--r--arch/arm/mm/proc-xscale.S30
-rw-r--r--arch/i386/kernel/process.c4
-rw-r--r--arch/i386/kernel/time.c2
-rw-r--r--arch/i386/kernel/traps.c21
-rw-r--r--arch/s390/defconfig44
-rw-r--r--arch/s390/kernel/head31.S4
-rw-r--r--arch/s390/kernel/head64.S4
-rw-r--r--arch/s390/kernel/setup.c46
-rw-r--r--arch/sparc/kernel/devices.c25
-rw-r--r--arch/sparc/kernel/irq.c2
-rw-r--r--arch/sparc/kernel/of_device.c34
-rw-r--r--arch/sparc/kernel/prom.c9
-rw-r--r--arch/sparc/kernel/smp.c96
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c1
-rw-r--r--arch/sparc/kernel/sun4d_smp.c103
-rw-r--r--arch/sparc/kernel/sys_sparc.c18
-rw-r--r--arch/sparc/kernel/time.c74
-rw-r--r--arch/sparc/mm/io-unit.c1
-rw-r--r--arch/sparc/prom/tree.c18
-rw-r--r--arch/sparc64/defconfig8
-rw-r--r--arch/sparc64/kernel/devices.c3
-rw-r--r--arch/sparc64/kernel/of_device.c34
-rw-r--r--arch/sparc64/kernel/prom.c12
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c1
-rw-r--r--arch/sparc64/kernel/sys_sparc.c18
-rw-r--r--arch/sparc64/mm/fault.c3
-rw-r--r--arch/sparc64/prom/tree.c85
-rw-r--r--arch/x86_64/defconfig9
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/kernel/pci-calgary.c77
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c5
-rw-r--r--arch/x86_64/kernel/tce.c4
-rw-r--r--arch/x86_64/kernel/time.c18
-rw-r--r--arch/x86_64/kernel/traps.c22
-rw-r--r--arch/x86_64/pci/k8-bus.c10
-rw-r--r--block/blktrace.c2
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--drivers/block/cciss.c86
-rw-r--r--drivers/bluetooth/hci_usb.c25
-rw-r--r--drivers/char/pcmcia/synclink_cs.c14
-rw-r--r--drivers/char/synclink.c14
-rw-r--r--drivers/char/synclink_gt.c14
-rw-r--r--drivers/char/synclinkmp.c14
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c3
-rw-r--r--drivers/dma/ioatdma.c2
-rw-r--r--drivers/fc4/fc.c4
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-dma.c2
-rw-r--r--drivers/ide/ide.c5
-rw-r--r--drivers/ide/pci/it821x.c11
-rw-r--r--drivers/infiniband/core/mad.c22
-rw-r--r--drivers/infiniband/core/user_mad.c87
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c42
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c76
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c8
-rw-r--r--drivers/message/fusion/Kconfig2
-rw-r--r--drivers/message/fusion/Makefile1
-rw-r--r--drivers/message/fusion/mptbase.c99
-rw-r--r--drivers/message/fusion/mptbase.h13
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptctl.h5
-rw-r--r--drivers/message/fusion/mptfc.c14
-rw-r--r--drivers/message/fusion/mptsas.c109
-rw-r--r--drivers/message/fusion/mptscsih.c118
-rw-r--r--drivers/message/fusion/mptspi.c10
-rw-r--r--drivers/net/dummy.c1
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_main.c52
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/skge.c5
-rw-r--r--drivers/net/sky2.c7
-rw-r--r--drivers/net/spider_net.c580
-rw-r--r--drivers/net/spider_net.h73
-rw-r--r--drivers/net/sunhme.c9
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/tg3.c116
-rw-r--r--drivers/net/via-velocity.c17
-rw-r--r--drivers/net/wan/c101.c4
-rw-r--r--drivers/net/wan/hdlc_ppp.c1
-rw-r--r--drivers/net/wan/hdlc_raw.c1
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c1
-rw-r--r--drivers/net/wan/n2.c3
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c2
-rw-r--r--drivers/net/wireless/orinoco.c4
-rw-r--r--drivers/net/wireless/zd1201.c2
-rw-r--r--drivers/s390/block/xpram.c17
-rw-r--r--drivers/s390/char/raw3270.c52
-rw-r--r--drivers/s390/char/tape_class.c10
-rw-r--r--drivers/s390/char/tape_core.c18
-rw-r--r--drivers/s390/cio/ccwgroup.c10
-rw-r--r--drivers/s390/cio/cmf.c1
-rw-r--r--drivers/s390/cio/device_fsm.c3
-rw-r--r--drivers/s390/net/ctcmain.c21
-rw-r--r--drivers/s390/net/qeth_main.c7
-rw-r--r--drivers/sbus/sbus.c2
-rw-r--r--drivers/scsi/53c7xx.c8
-rw-r--r--drivers/scsi/NCR53C9x.c18
-rw-r--r--drivers/scsi/NCR_D700.c14
-rw-r--r--drivers/scsi/aha152x.c43
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c21
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/ata_piix.c165
-rw-r--r--drivers/scsi/atari_NCR5380.c2
-rw-r--r--drivers/scsi/constants.c126
-rw-r--r--drivers/scsi/esp.c16
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c1
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libata-eh.c69
-rw-r--r--drivers/scsi/lpfc/lpfc.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/pluto.c2
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/sata_promise.c7
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_debug.c72
-rw-r--r--drivers/scsi/scsi_error.c210
-rw-r--r--drivers/scsi/scsi_ioctl.c5
-rw-r--r--drivers/scsi/scsi_lib.c88
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_transport_sas.c64
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/seagate.c2
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/scsi/sun3_NCR5380.c2
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/wd33c93.c2
-rw-r--r--drivers/serial/sunsab.c7
-rw-r--r--drivers/serial/sunzilog.c125
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c7
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c19
-rw-r--r--fs/xfs/xfs_inode.c17
-rw-r--r--fs/xfs/xfs_log.c12
-rw-r--r--fs/xfs/xfs_vfsops.c2
-rw-r--r--include/asm-arm/arch-iop3xx/iop331-irqs.h4
-rw-r--r--include/asm-m68k/oplib.h5
-rw-r--r--include/asm-s390/system.h9
-rw-r--r--include/asm-s390/timex.h4
-rw-r--r--include/asm-sparc/oplib.h5
-rw-r--r--include/asm-sparc/signal.h2
-rw-r--r--include/asm-sparc64/openprom.h2
-rw-r--r--include/asm-sparc64/oplib.h5
-rw-r--r--include/asm-sparc64/pgtable.h2
-rw-r--r--include/asm-sparc64/sfp-machine.h2
-rw-r--r--include/asm-x86_64/calgary.h5
-rw-r--r--include/asm-x86_64/page.h2
-rw-r--r--include/asm-x86_64/swiotlb.h2
-rw-r--r--include/linux/cpu.h6
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/futex.h3
-rw-r--r--include/linux/ide.h1
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/netfilter_bridge.h2
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/net/netdma.h2
-rw-r--r--include/net/pkt_sched.h18
-rw-r--r--include/net/sctp/structs.h11
-rw-r--r--include/net/sctp/user.h9
-rw-r--r--include/rdma/ib_mad.h7
-rw-r--r--include/scsi/scsi_cmnd.h9
-rw-r--r--include/scsi/scsi_transport_sas.h7
-rw-r--r--kernel/cpu.c75
-rw-r--r--kernel/cpuset.c24
-rw-r--r--kernel/futex.c121
-rw-r--r--kernel/futex_compat.c34
-rw-r--r--mm/filemap.c2
-rw-r--r--net/8021q/vlan.c11
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/atm/br2684.c3
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/atm/lec.c3
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/atm/pppoatm.c3
-rw-r--r--net/atm/resources.c3
-rw-r--r--net/ax25/sysctl_net_ax25.c4
-rw-r--r--net/bluetooth/rfcomm/core.c19
-rw-r--r--net/bridge/br_ioctl.c7
-rw-r--r--net/bridge/br_netfilter.c5
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/user_dma.c1
-rw-r--r--net/dccp/feat.h2
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/decnet/dn_dev.c9
-rw-r--r--net/decnet/dn_fib.c3
-rw-r--r--net/decnet/dn_neigh.c3
-rw-r--r--net/decnet/dn_rules.c3
-rw-r--r--net/decnet/dn_table.c11
-rw-r--r--net/econet/af_econet.c3
-rw-r--r--net/ieee80211/Kconfig1
-rw-r--r--net/ieee80211/ieee80211_crypt.c3
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--net/ieee80211/ieee80211_wx.c7
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c28
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_io.c3
-rw-r--r--net/ipv4/ah4.c4
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/esp4.c4
-rw-r--r--net/ipv4/fib_hash.c6
-rw-r--r--net/ipv4/fib_rules.c3
-rw-r--r--net/ipv4/fib_semantics.c15
-rw-r--r--net/ipv4/igmp.c12
-rw-r--r--net/ipv4/inet_diag.c3
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_options.c1
-rw-r--r--net/ipv4/ipcomp.c3
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/ipmr.c21
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c10
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c1
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/xfrm6_tunnel.c140
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/ircomm/ircomm_core.c4
-rw-r--r--net/irda/ircomm/ircomm_lmp.c4
-rw-r--r--net/irda/ircomm/ircomm_param.c2
-rw-r--r--net/irda/ircomm/ircomm_tty.c8
-rw-r--r--net/irda/irda_device.c4
-rw-r--r--net/irda/iriap.c9
-rw-r--r--net/irda/iriap_event.c2
-rw-r--r--net/irda/irias_object.c24
-rw-r--r--net/irda/irlan/irlan_common.c16
-rw-r--r--net/irda/irlan/irlan_provider.c2
-rw-r--r--net/irda/irlap.c8
-rw-r--r--net/irda/irlap_frame.c19
-rw-r--r--net/irda/irlmp.c11
-rw-r--r--net/irda/irnet/irnet_ppp.c3
-rw-r--r--net/irda/irttp.c20
-rw-r--r--net/lapb/lapb_iface.c4
-rw-r--r--net/llc/llc_core.c3
-rw-r--r--net/netfilter/Kconfig4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netfilter/nf_queue.c9
-rw-r--r--net/netfilter/xt_physdev.c15
-rw-r--r--net/netfilter/xt_pkttype.c12
-rw-r--r--net/netlink/af_netlink.c13
-rw-r--r--net/rxrpc/connection.c6
-rw-r--r--net/rxrpc/peer.c3
-rw-r--r--net/rxrpc/transport.c6
-rw-r--r--net/sched/act_api.c9
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c6
-rw-r--r--net/sched/cls_basic.c6
-rw-r--r--net/sched/cls_fw.c6
-rw-r--r--net/sched/cls_route.c9
-rw-r--r--net/sched/cls_rsvp.h9
-rw-r--r--net/sched/cls_tcindex.c12
-rw-r--r--net/sched/cls_u32.c15
-rw-r--r--net/sched/em_meta.c3
-rw-r--r--net/sched/ematch.c3
-rw-r--r--net/sched/estimator.c3
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sched/sch_gred.c3
-rw-r--r--net/sched/sch_hfsc.c3
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sctp/associola.c27
-rw-r--r--net/sctp/bind_addr.c8
-rw-r--r--net/sctp/endpointola.c11
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/outqueue.c9
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c14
-rw-r--r--net/sctp/sm_sideeffect.c12
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/socket.c74
-rw-r--r--net/sctp/transport.c9
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c3
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c3
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c3
-rw-r--r--net/sunrpc/clnt.c3
-rw-r--r--net/sunrpc/stats.c7
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svcsock.c3
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtsock.c6
-rw-r--r--net/tipc/bearer.c6
-rw-r--r--net/tipc/cluster.c8
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/link.c3
-rw-r--r--net/tipc/name_table.c16
-rw-r--r--net/tipc/net.c5
-rw-r--r--net/tipc/port.c5
-rw-r--r--net/tipc/ref.c2
-rw-r--r--net/tipc/subscr.c3
-rw-r--r--net/tipc/user_reg.c3
-rw-r--r--net/tipc/zone.c3
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--net/wanrouter/af_wanpipe.c9
-rw-r--r--net/wanrouter/wanmain.c9
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_state.c3
350 files changed, 3212 insertions, 2732 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 9d3a0775a11d..87851efb0228 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -258,3 +258,19 @@ Why: These drivers never compiled since they were added to the kernel
258Who: Jean Delvare <khali@linux-fr.org> 258Who: Jean Delvare <khali@linux-fr.org>
259 259
260--------------------------- 260---------------------------
261
262What: Bridge netfilter deferred IPv4/IPv6 output hook calling
263When: January 2007
264Why: The deferred output hooks are a layering violation causing unusual
265 and broken behaviour on bridge devices. Examples of things they
266 break include QoS classifation using the MARK or CLASSIFY targets,
267 the IPsec policy match and connection tracking with VLANs on a
268 bridge. Their only use is to enable bridge output port filtering
269 within iptables with the physdev match, which can also be done by
270 combining iptables and ebtables using netfilter marks. Until it
271 will get removed the hook deferral is disabled by default and is
272 only enabled when needed.
273
274Who: Patrick McHardy <kaber@trash.net>
275
276---------------------------
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index 6887d44d2661..6da24e7a56cb 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -238,6 +238,13 @@ Debugging
238 pagefaulttrace Dump all page faults. Only useful for extreme debugging 238 pagefaulttrace Dump all page faults. Only useful for extreme debugging
239 and will create a lot of output. 239 and will create a lot of output.
240 240
241 call_trace=[old|both|newfallback|new]
242 old: use old inexact backtracer
243 new: use new exact dwarf2 unwinder
244 both: print entries from both
245 newfallback: use new unwinder but fall back to old if it gets
246 stuck (default)
247
241Misc 248Misc
242 249
243 noreplacement Don't replace instructions with more appropriate ones 250 noreplacement Don't replace instructions with more appropriate ones
diff --git a/MAINTAINERS b/MAINTAINERS
index c67c3e338105..e42e14335194 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1694,10 +1694,8 @@ L: linux-kernel@vger.kernel.org
1694S: Maintained 1694S: Maintained
1695 1695
1696LAPB module 1696LAPB module
1697P: Henner Eisen
1698M: eis@baty.hanse.de
1699L: linux-x25@vger.kernel.org 1697L: linux-x25@vger.kernel.org
1700S: Maintained 1698S: Orphan
1701 1699
1702LASI 53c700 driver for PARISC 1700LASI 53c700 driver for PARISC
1703P: James E.J. Bottomley 1701P: James E.J. Bottomley
diff --git a/Makefile b/Makefile
index 1dd58d35d72c..c9b7dbb64c71 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 18 3SUBLEVEL = 18
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME=Crazed Snow-Weasel 5NAME=Crazed Snow-Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index c0de6fcd488a..2948b4589a8b 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.18-rc1 3# Linux kernel version: 2.6.18-rc1-git9
4# Sun Jul 9 15:21:30 2006 4# Sat Jul 15 15:08:10 2006
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -30,6 +30,7 @@ CONFIG_SWAP=y
30CONFIG_SYSVIPC=y 30CONFIG_SYSVIPC=y
31# CONFIG_POSIX_MQUEUE is not set 31# CONFIG_POSIX_MQUEUE is not set
32# CONFIG_BSD_PROCESS_ACCT is not set 32# CONFIG_BSD_PROCESS_ACCT is not set
33# CONFIG_TASKSTATS is not set
33CONFIG_SYSCTL=y 34CONFIG_SYSCTL=y
34# CONFIG_AUDIT is not set 35# CONFIG_AUDIT is not set
35CONFIG_IKCONFIG=y 36CONFIG_IKCONFIG=y
@@ -749,7 +750,7 @@ CONFIG_VIDEO_V4L2=y
749# USB support 750# USB support
750# 751#
751CONFIG_USB_ARCH_HAS_HCD=y 752CONFIG_USB_ARCH_HAS_HCD=y
752# CONFIG_USB_ARCH_HAS_OHCI is not set 753CONFIG_USB_ARCH_HAS_OHCI=y
753# CONFIG_USB_ARCH_HAS_EHCI is not set 754# CONFIG_USB_ARCH_HAS_EHCI is not set
754CONFIG_USB=y 755CONFIG_USB=y
755CONFIG_USB_DEBUG=y 756CONFIG_USB_DEBUG=y
@@ -766,6 +767,9 @@ CONFIG_USB_DYNAMIC_MINORS=y
766# USB Host Controller Drivers 767# USB Host Controller Drivers
767# 768#
768# CONFIG_USB_ISP116X_HCD is not set 769# CONFIG_USB_ISP116X_HCD is not set
770CONFIG_USB_OHCI_HCD=y
771# CONFIG_USB_OHCI_BIG_ENDIAN is not set
772CONFIG_USB_OHCI_LITTLE_ENDIAN=y
769# CONFIG_USB_SL811_HCD is not set 773# CONFIG_USB_SL811_HCD is not set
770 774
771# 775#
@@ -855,6 +859,7 @@ CONFIG_USB_SERIAL_CONSOLE=y
855CONFIG_USB_SERIAL_PL2303=y 859CONFIG_USB_SERIAL_PL2303=y
856# CONFIG_USB_SERIAL_HP4X is not set 860# CONFIG_USB_SERIAL_HP4X is not set
857# CONFIG_USB_SERIAL_SAFE is not set 861# CONFIG_USB_SERIAL_SAFE is not set
862# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
858# CONFIG_USB_SERIAL_TI is not set 863# CONFIG_USB_SERIAL_TI is not set
859# CONFIG_USB_SERIAL_CYBERJACK is not set 864# CONFIG_USB_SERIAL_CYBERJACK is not set
860# CONFIG_USB_SERIAL_XIRCOM is not set 865# CONFIG_USB_SERIAL_XIRCOM is not set
@@ -871,7 +876,7 @@ CONFIG_USB_SERIAL_PL2303=y
871# CONFIG_USB_LEGOTOWER is not set 876# CONFIG_USB_LEGOTOWER is not set
872# CONFIG_USB_LCD is not set 877# CONFIG_USB_LCD is not set
873# CONFIG_USB_LED is not set 878# CONFIG_USB_LED is not set
874# CONFIG_USB_CY7C63 is not set 879# CONFIG_USB_CYPRESS_CY7C63 is not set
875# CONFIG_USB_CYTHERM is not set 880# CONFIG_USB_CYTHERM is not set
876# CONFIG_USB_PHIDGETKIT is not set 881# CONFIG_USB_PHIDGETKIT is not set
877# CONFIG_USB_PHIDGETSERVO is not set 882# CONFIG_USB_PHIDGETSERVO is not set
@@ -916,6 +921,7 @@ CONFIG_RTC_INTF_DEV=y
916# CONFIG_RTC_DRV_X1205 is not set 921# CONFIG_RTC_DRV_X1205 is not set
917# CONFIG_RTC_DRV_DS1307 is not set 922# CONFIG_RTC_DRV_DS1307 is not set
918# CONFIG_RTC_DRV_DS1553 is not set 923# CONFIG_RTC_DRV_DS1553 is not set
924# CONFIG_RTC_DRV_ISL1208 is not set
919# CONFIG_RTC_DRV_DS1672 is not set 925# CONFIG_RTC_DRV_DS1672 is not set
920# CONFIG_RTC_DRV_DS1742 is not set 926# CONFIG_RTC_DRV_DS1742 is not set
921# CONFIG_RTC_DRV_PCF8563 is not set 927# CONFIG_RTC_DRV_PCF8563 is not set
@@ -1023,7 +1029,6 @@ CONFIG_SUNRPC=y
1023# CONFIG_RPCSEC_GSS_SPKM3 is not set 1029# CONFIG_RPCSEC_GSS_SPKM3 is not set
1024# CONFIG_SMB_FS is not set 1030# CONFIG_SMB_FS is not set
1025# CONFIG_CIFS is not set 1031# CONFIG_CIFS is not set
1026# CONFIG_CIFS_DEBUG2 is not set
1027# CONFIG_NCP_FS is not set 1032# CONFIG_NCP_FS is not set
1028# CONFIG_CODA_FS is not set 1033# CONFIG_CODA_FS is not set
1029# CONFIG_AFS_FS is not set 1034# CONFIG_AFS_FS is not set
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 2242f5f7cb7d..4fe386eea4b4 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -114,9 +114,9 @@ ENTRY(secondary_startup)
114 * Use the page tables supplied from __cpu_up. 114 * Use the page tables supplied from __cpu_up.
115 */ 115 */
116 adr r4, __secondary_data 116 adr r4, __secondary_data
117 ldmia r4, {r5, r6, r13} @ address to jump to after 117 ldmia r4, {r5, r7, r13} @ address to jump to after
118 sub r4, r4, r5 @ mmu has been enabled 118 sub r4, r4, r5 @ mmu has been enabled
119 ldr r4, [r6, r4] @ get secondary_data.pgdir 119 ldr r4, [r7, r4] @ get secondary_data.pgdir
120 adr lr, __enable_mmu @ return address 120 adr lr, __enable_mmu @ return address
121 add pc, r10, #12 @ initialise processor 121 add pc, r10, #12 @ initialise processor
122 @ (return control reg) 122 @ (return control reg)
@@ -125,7 +125,7 @@ ENTRY(secondary_startup)
125 * r6 = &secondary_data 125 * r6 = &secondary_data
126 */ 126 */
127ENTRY(__secondary_switched) 127ENTRY(__secondary_switched)
128 ldr sp, [r6, #4] @ get secondary_data.stack 128 ldr sp, [r7, #4] @ get secondary_data.stack
129 mov fp, #0 129 mov fp, #0
130 b secondary_start_kernel 130 b secondary_start_kernel
131 131
diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c
index 5b64d5c5b967..ef6ccc8993e9 100644
--- a/arch/arm/mach-footbridge/cats-hw.c
+++ b/arch/arm/mach-footbridge/cats-hw.c
@@ -8,7 +8,7 @@
8#include <linux/ioport.h> 8#include <linux/ioport.h>
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/tty.h> 11#include <linux/screen_info.h>
12 12
13#include <asm/hardware/dec21285.h> 13#include <asm/hardware/dec21285.h>
14#include <asm/io.h> 14#include <asm/io.h>
diff --git a/arch/arm/mach-s3c2410/mach-anubis.c b/arch/arm/mach-s3c2410/mach-anubis.c
index 4a92d6f92d6b..60641d452db3 100644
--- a/arch/arm/mach-s3c2410/mach-anubis.c
+++ b/arch/arm/mach-s3c2410/mach-anubis.c
@@ -60,11 +60,12 @@ static struct map_desc anubis_iodesc[] __initdata = {
60 .virtual = (u32)S3C24XX_VA_ISA_BYTE, 60 .virtual = (u32)S3C24XX_VA_ISA_BYTE,
61 .pfn = __phys_to_pfn(0x0), 61 .pfn = __phys_to_pfn(0x0),
62 .length = SZ_4M, 62 .length = SZ_4M,
63 .type = MT_DEVICE 63 .type = MT_DEVICE,
64 }, { 64 }, {
65 .virtual = (u32)S3C24XX_VA_ISA_WORD, 65 .virtual = (u32)S3C24XX_VA_ISA_WORD,
66 .pfn = __phys_to_pfn(0x0), 66 .pfn = __phys_to_pfn(0x0),
67 .length = SZ_4M, MT_DEVICE 67 .length = SZ_4M,
68 .type = MT_DEVICE,
68 }, 69 },
69 70
70 /* we could possibly compress the next set down into a set of smaller tables 71 /* we could possibly compress the next set down into a set of smaller tables
@@ -78,36 +79,12 @@ static struct map_desc anubis_iodesc[] __initdata = {
78 .virtual = (u32)ANUBIS_VA_CTRL1, 79 .virtual = (u32)ANUBIS_VA_CTRL1,
79 .pfn = __phys_to_pfn(ANUBIS_PA_CTRL1), 80 .pfn = __phys_to_pfn(ANUBIS_PA_CTRL1),
80 .length = SZ_4K, 81 .length = SZ_4K,
81 .type = MT_DEVICE 82 .type = MT_DEVICE,
82 }, { 83 }, {
83 .virtual = (u32)ANUBIS_VA_CTRL2, 84 .virtual = (u32)ANUBIS_VA_CTRL2,
84 .pfn = __phys_to_pfn(ANUBIS_PA_CTRL2), 85 .pfn = __phys_to_pfn(ANUBIS_PA_CTRL2),
85 .length = SZ_4K, 86 .length = SZ_4K,
86 .type =MT_DEVICE 87 .type = MT_DEVICE,
87 },
88
89 /* IDE drives */
90
91 {
92 .virtual = (u32)ANUBIS_IDEPRI,
93 .pfn = __phys_to_pfn(S3C2410_CS3),
94 .length = SZ_1M,
95 .type = MT_DEVICE
96 }, {
97 .virtual = (u32)ANUBIS_IDEPRIAUX,
98 .pfn = __phys_to_pfn(S3C2410_CS3+(1<<26)),
99 .length = SZ_1M,
100 .type = MT_DEVICE
101 }, {
102 .virtual = (u32)ANUBIS_IDESEC,
103 .pfn = __phys_to_pfn(S3C2410_CS4),
104 .length = SZ_1M,
105 .type = MT_DEVICE
106 }, {
107 .virtual = (u32)ANUBIS_IDESECAUX,
108 .pfn = __phys_to_pfn(S3C2410_CS4+(1<<26)),
109 .length = SZ_1M,
110 .type = MT_DEVICE
111 }, 88 },
112}; 89};
113 90
@@ -126,7 +103,7 @@ static struct s3c24xx_uart_clksrc anubis_serial_clocks[] = {
126 .name = "pclk", 103 .name = "pclk",
127 .divisor = 1, 104 .divisor = 1,
128 .min_baud = 0, 105 .min_baud = 0,
129 .max_baud = 0. 106 .max_baud = 0,
130 } 107 }
131}; 108};
132 109
@@ -139,7 +116,7 @@ static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
139 .ulcon = ULCON, 116 .ulcon = ULCON,
140 .ufcon = UFCON, 117 .ufcon = UFCON,
141 .clocks = anubis_serial_clocks, 118 .clocks = anubis_serial_clocks,
142 .clocks_size = ARRAY_SIZE(anubis_serial_clocks) 119 .clocks_size = ARRAY_SIZE(anubis_serial_clocks),
143 }, 120 },
144 [1] = { 121 [1] = {
145 .hwport = 2, 122 .hwport = 2,
@@ -148,7 +125,7 @@ static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
148 .ulcon = ULCON, 125 .ulcon = ULCON,
149 .ufcon = UFCON, 126 .ufcon = UFCON,
150 .clocks = anubis_serial_clocks, 127 .clocks = anubis_serial_clocks,
151 .clocks_size = ARRAY_SIZE(anubis_serial_clocks) 128 .clocks_size = ARRAY_SIZE(anubis_serial_clocks),
152 }, 129 },
153}; 130};
154 131
@@ -162,7 +139,7 @@ static struct mtd_partition anubis_default_nand_part[] = {
162 [0] = { 139 [0] = {
163 .name = "Boot Agent", 140 .name = "Boot Agent",
164 .size = SZ_16K, 141 .size = SZ_16K,
165 .offset = 0 142 .offset = 0,
166 }, 143 },
167 [1] = { 144 [1] = {
168 .name = "/boot", 145 .name = "/boot",
@@ -194,21 +171,21 @@ static struct s3c2410_nand_set anubis_nand_sets[] = {
194 .nr_chips = 1, 171 .nr_chips = 1,
195 .nr_map = external_map, 172 .nr_map = external_map,
196 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part), 173 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
197 .partitions = anubis_default_nand_part 174 .partitions = anubis_default_nand_part,
198 }, 175 },
199 [0] = { 176 [0] = {
200 .name = "chip0", 177 .name = "chip0",
201 .nr_chips = 1, 178 .nr_chips = 1,
202 .nr_map = chip0_map, 179 .nr_map = chip0_map,
203 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part), 180 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
204 .partitions = anubis_default_nand_part 181 .partitions = anubis_default_nand_part,
205 }, 182 },
206 [2] = { 183 [2] = {
207 .name = "chip1", 184 .name = "chip1",
208 .nr_chips = 1, 185 .nr_chips = 1,
209 .nr_map = chip1_map, 186 .nr_map = chip1_map,
210 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part), 187 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
211 .partitions = anubis_default_nand_part 188 .partitions = anubis_default_nand_part,
212 }, 189 },
213}; 190};
214 191
@@ -313,7 +290,7 @@ static struct s3c24xx_board anubis_board __initdata = {
313 .devices = anubis_devices, 290 .devices = anubis_devices,
314 .devices_count = ARRAY_SIZE(anubis_devices), 291 .devices_count = ARRAY_SIZE(anubis_devices),
315 .clocks = anubis_clocks, 292 .clocks = anubis_clocks,
316 .clocks_count = ARRAY_SIZE(anubis_clocks) 293 .clocks_count = ARRAY_SIZE(anubis_clocks),
317}; 294};
318 295
319static void __init anubis_map_io(void) 296static void __init anubis_map_io(void)
diff --git a/arch/arm/mach-s3c2410/mach-osiris.c b/arch/arm/mach-s3c2410/mach-osiris.c
index 858fd03c6bc5..e193ba69e652 100644
--- a/arch/arm/mach-s3c2410/mach-osiris.c
+++ b/arch/arm/mach-s3c2410/mach-osiris.c
@@ -67,12 +67,12 @@ static struct map_desc osiris_iodesc[] __initdata = {
67 .virtual = (u32)OSIRIS_VA_CTRL1, 67 .virtual = (u32)OSIRIS_VA_CTRL1,
68 .pfn = __phys_to_pfn(OSIRIS_PA_CTRL1), 68 .pfn = __phys_to_pfn(OSIRIS_PA_CTRL1),
69 .length = SZ_16K, 69 .length = SZ_16K,
70 .type = MT_DEVICE 70 .type = MT_DEVICE,
71 }, { 71 }, {
72 .virtual = (u32)OSIRIS_VA_CTRL2, 72 .virtual = (u32)OSIRIS_VA_CTRL2,
73 .pfn = __phys_to_pfn(OSIRIS_PA_CTRL2), 73 .pfn = __phys_to_pfn(OSIRIS_PA_CTRL2),
74 .length = SZ_16K, 74 .length = SZ_16K,
75 .type = MT_DEVICE 75 .type = MT_DEVICE,
76 }, 76 },
77}; 77};
78 78
@@ -91,7 +91,7 @@ static struct s3c24xx_uart_clksrc osiris_serial_clocks[] = {
91 .name = "pclk", 91 .name = "pclk",
92 .divisor = 1, 92 .divisor = 1,
93 .min_baud = 0, 93 .min_baud = 0,
94 .max_baud = 0. 94 .max_baud = 0,
95 } 95 }
96}; 96};
97 97
@@ -103,7 +103,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
103 .ulcon = ULCON, 103 .ulcon = ULCON,
104 .ufcon = UFCON, 104 .ufcon = UFCON,
105 .clocks = osiris_serial_clocks, 105 .clocks = osiris_serial_clocks,
106 .clocks_size = ARRAY_SIZE(osiris_serial_clocks) 106 .clocks_size = ARRAY_SIZE(osiris_serial_clocks),
107 }, 107 },
108 [1] = { 108 [1] = {
109 .hwport = 1, 109 .hwport = 1,
@@ -112,7 +112,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
112 .ulcon = ULCON, 112 .ulcon = ULCON,
113 .ufcon = UFCON, 113 .ufcon = UFCON,
114 .clocks = osiris_serial_clocks, 114 .clocks = osiris_serial_clocks,
115 .clocks_size = ARRAY_SIZE(osiris_serial_clocks) 115 .clocks_size = ARRAY_SIZE(osiris_serial_clocks),
116 }, 116 },
117}; 117};
118 118
@@ -126,7 +126,7 @@ static struct mtd_partition osiris_default_nand_part[] = {
126 [0] = { 126 [0] = {
127 .name = "Boot Agent", 127 .name = "Boot Agent",
128 .size = SZ_16K, 128 .size = SZ_16K,
129 .offset = 0 129 .offset = 0,
130 }, 130 },
131 [1] = { 131 [1] = {
132 .name = "/boot", 132 .name = "/boot",
@@ -158,21 +158,21 @@ static struct s3c2410_nand_set osiris_nand_sets[] = {
158 .nr_chips = 1, 158 .nr_chips = 1,
159 .nr_map = external_map, 159 .nr_map = external_map,
160 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part), 160 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
161 .partitions = osiris_default_nand_part 161 .partitions = osiris_default_nand_part,
162 }, 162 },
163 [0] = { 163 [0] = {
164 .name = "chip0", 164 .name = "chip0",
165 .nr_chips = 1, 165 .nr_chips = 1,
166 .nr_map = chip0_map, 166 .nr_map = chip0_map,
167 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part), 167 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
168 .partitions = osiris_default_nand_part 168 .partitions = osiris_default_nand_part,
169 }, 169 },
170 [2] = { 170 [2] = {
171 .name = "chip1", 171 .name = "chip1",
172 .nr_chips = 1, 172 .nr_chips = 1,
173 .nr_map = chip1_map, 173 .nr_map = chip1_map,
174 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part), 174 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
175 .partitions = osiris_default_nand_part 175 .partitions = osiris_default_nand_part,
176 }, 176 },
177}; 177};
178 178
@@ -245,7 +245,7 @@ static struct s3c24xx_board osiris_board __initdata = {
245 .devices = osiris_devices, 245 .devices = osiris_devices,
246 .devices_count = ARRAY_SIZE(osiris_devices), 246 .devices_count = ARRAY_SIZE(osiris_devices),
247 .clocks = osiris_clocks, 247 .clocks = osiris_clocks,
248 .clocks_count = ARRAY_SIZE(osiris_clocks) 248 .clocks_count = ARRAY_SIZE(osiris_clocks),
249}; 249};
250 250
251static void __init osiris_map_io(void) 251static void __init osiris_map_io(void)
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index dba7dddfe57d..88a999df0ab3 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -363,7 +363,9 @@ EXPORT_SYMBOL(__ioremap);
363 363
364void __iounmap(void __iomem *addr) 364void __iounmap(void __iomem *addr)
365{ 365{
366#ifndef CONFIG_SMP
366 struct vm_struct **p, *tmp; 367 struct vm_struct **p, *tmp;
368#endif
367 unsigned int section_mapping = 0; 369 unsigned int section_mapping = 0;
368 370
369 addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr); 371 addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 6c5f0fe578a5..ab143557e688 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -13,6 +13,7 @@
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <asm/proc-fns.h> 14#include <asm/proc-fns.h>
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#include <asm/page.h>
16 17
17#ifndef MULTI_CPU 18#ifndef MULTI_CPU
18EXPORT_SYMBOL(cpu_dcache_clean_area); 19EXPORT_SYMBOL(cpu_dcache_clean_area);
@@ -30,6 +31,13 @@ EXPORT_SYMBOL(__cpuc_coherent_kern_range);
30EXPORT_SYMBOL(cpu_cache); 31EXPORT_SYMBOL(cpu_cache);
31#endif 32#endif
32 33
34#ifndef MULTI_USER
35EXPORT_SYMBOL(__cpu_clear_user_page);
36EXPORT_SYMBOL(__cpu_copy_user_page);
37#else
38EXPORT_SYMBOL(cpu_user);
39#endif
40
33/* 41/*
34 * No module should need to touch the TLB (and currently 42 * No module should need to touch the TLB (and currently
35 * no modules do. We export this for "loadkernel" support 43 * no modules do. We export this for "loadkernel" support
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 521538671f4c..561bff73a036 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -536,6 +536,11 @@ cpu_80200_name:
536 .asciz "XScale-80200" 536 .asciz "XScale-80200"
537 .size cpu_80200_name, . - cpu_80200_name 537 .size cpu_80200_name, . - cpu_80200_name
538 538
539 .type cpu_80219_name, #object
540cpu_80219_name:
541 .asciz "XScale-80219"
542 .size cpu_80219_name, . - cpu_80219_name
543
539 .type cpu_8032x_name, #object 544 .type cpu_8032x_name, #object
540cpu_8032x_name: 545cpu_8032x_name:
541 .asciz "XScale-IOP8032x Family" 546 .asciz "XScale-IOP8032x Family"
@@ -613,10 +618,33 @@ __80200_proc_info:
613 .long xscale_cache_fns 618 .long xscale_cache_fns
614 .size __80200_proc_info, . - __80200_proc_info 619 .size __80200_proc_info, . - __80200_proc_info
615 620
621 .type __80219_proc_info,#object
622__80219_proc_info:
623 .long 0x69052e20
624 .long 0xffffffe0
625 .long PMD_TYPE_SECT | \
626 PMD_SECT_BUFFERABLE | \
627 PMD_SECT_CACHEABLE | \
628 PMD_SECT_AP_WRITE | \
629 PMD_SECT_AP_READ
630 .long PMD_TYPE_SECT | \
631 PMD_SECT_AP_WRITE | \
632 PMD_SECT_AP_READ
633 b __xscale_setup
634 .long cpu_arch_name
635 .long cpu_elf_name
636 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
637 .long cpu_80219_name
638 .long xscale_processor_functions
639 .long v4wbi_tlb_fns
640 .long xscale_mc_user_fns
641 .long xscale_cache_fns
642 .size __80219_proc_info, . - __80219_proc_info
643
616 .type __8032x_proc_info,#object 644 .type __8032x_proc_info,#object
617__8032x_proc_info: 645__8032x_proc_info:
618 .long 0x69052420 646 .long 0x69052420
619 .long 0xfffff5e0 @ mask should accomodate IOP80219 also 647 .long 0xffffffe0
620 .long PMD_TYPE_SECT | \ 648 .long PMD_TYPE_SECT | \
621 PMD_SECT_BUFFERABLE | \ 649 PMD_SECT_BUFFERABLE | \
622 PMD_SECT_CACHEABLE | \ 650 PMD_SECT_CACHEABLE | \
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 923bb292f47f..8657c739656a 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -690,8 +690,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
690 /* 690 /*
691 * Now maybe handle debug registers and/or IO bitmaps 691 * Now maybe handle debug registers and/or IO bitmaps
692 */ 692 */
693 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)) 693 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
694 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) 694 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
695 __switch_to_xtra(next_p, tss); 695 __switch_to_xtra(next_p, tss);
696 696
697 disable_tsc(prev_p, next_p); 697 disable_tsc(prev_p, next_p);
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 8705c0f05788..edd00f6cee37 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -135,7 +135,7 @@ unsigned long profile_pc(struct pt_regs *regs)
135{ 135{
136 unsigned long pc = instruction_pointer(regs); 136 unsigned long pc = instruction_pointer(regs);
137 137
138 if (in_lock_functions(pc)) 138 if (!user_mode_vm(regs) && in_lock_functions(pc))
139 return *(unsigned long *)(regs->ebp + 4); 139 return *(unsigned long *)(regs->ebp + 4);
140 140
141 return pc; 141 return pc;
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 313ac1f7dc5a..021f8fdc7512 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -187,10 +187,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
187 if (unwind_init_blocked(&info, task) == 0) 187 if (unwind_init_blocked(&info, task) == 0)
188 unw_ret = show_trace_unwind(&info, log_lvl); 188 unw_ret = show_trace_unwind(&info, log_lvl);
189 } 189 }
190 if (unw_ret > 0) { 190 if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
191 if (call_trace > 0) 191#ifdef CONFIG_STACK_UNWIND
192 print_symbol("DWARF2 unwinder stuck at %s\n",
193 UNW_PC(&info));
194 if (call_trace == 1) {
195 printk("Leftover inexact backtrace:\n");
196 if (UNW_SP(&info))
197 stack = (void *)UNW_SP(&info);
198 } else if (call_trace > 1)
192 return; 199 return;
193 printk("%sLegacy call trace:\n", log_lvl); 200 else
201 printk("Full inexact backtrace again:\n");
202#else
203 printk("Inexact backtrace:\n");
204#endif
194 } 205 }
195 } 206 }
196 207
@@ -1238,8 +1249,10 @@ static int __init call_trace_setup(char *s)
1238 call_trace = -1; 1249 call_trace = -1;
1239 else if (strcmp(s, "both") == 0) 1250 else if (strcmp(s, "both") == 0)
1240 call_trace = 0; 1251 call_trace = 0;
1241 else if (strcmp(s, "new") == 0) 1252 else if (strcmp(s, "newfallback") == 0)
1242 call_trace = 1; 1253 call_trace = 1;
1254 else if (strcmp(s, "new") == 2)
1255 call_trace = 2;
1243 return 1; 1256 return 1;
1244} 1257}
1245__setup("call_trace=", call_trace_setup); 1258__setup("call_trace=", call_trace_setup);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index f4dfc10026d2..f1d4591eddbb 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,13 +1,16 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-rc1 3# Linux kernel version: 2.6.18-rc2
4# Mon Apr 3 14:34:15 2006 4# Thu Jul 27 13:51:07 2006
5# 5#
6CONFIG_MMU=y 6CONFIG_MMU=y
7CONFIG_LOCKDEP_SUPPORT=y
8CONFIG_STACKTRACE_SUPPORT=y
7CONFIG_RWSEM_XCHGADD_ALGORITHM=y 9CONFIG_RWSEM_XCHGADD_ALGORITHM=y
8CONFIG_GENERIC_HWEIGHT=y 10CONFIG_GENERIC_HWEIGHT=y
9CONFIG_GENERIC_CALIBRATE_DELAY=y 11CONFIG_GENERIC_CALIBRATE_DELAY=y
10CONFIG_S390=y 12CONFIG_S390=y
13CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
11 14
12# 15#
13# Code maturity level options 16# Code maturity level options
@@ -25,6 +28,7 @@ CONFIG_SWAP=y
25CONFIG_SYSVIPC=y 28CONFIG_SYSVIPC=y
26CONFIG_POSIX_MQUEUE=y 29CONFIG_POSIX_MQUEUE=y
27# CONFIG_BSD_PROCESS_ACCT is not set 30# CONFIG_BSD_PROCESS_ACCT is not set
31# CONFIG_TASKSTATS is not set
28CONFIG_SYSCTL=y 32CONFIG_SYSCTL=y
29CONFIG_AUDIT=y 33CONFIG_AUDIT=y
30# CONFIG_AUDITSYSCALL is not set 34# CONFIG_AUDITSYSCALL is not set
@@ -43,10 +47,12 @@ CONFIG_PRINTK=y
43CONFIG_BUG=y 47CONFIG_BUG=y
44CONFIG_ELF_CORE=y 48CONFIG_ELF_CORE=y
45CONFIG_BASE_FULL=y 49CONFIG_BASE_FULL=y
50CONFIG_RT_MUTEXES=y
46CONFIG_FUTEX=y 51CONFIG_FUTEX=y
47CONFIG_EPOLL=y 52CONFIG_EPOLL=y
48CONFIG_SHMEM=y 53CONFIG_SHMEM=y
49CONFIG_SLAB=y 54CONFIG_SLAB=y
55CONFIG_VM_EVENT_COUNTERS=y
50# CONFIG_TINY_SHMEM is not set 56# CONFIG_TINY_SHMEM is not set
51CONFIG_BASE_SMALL=0 57CONFIG_BASE_SMALL=0
52# CONFIG_SLOB is not set 58# CONFIG_SLOB is not set
@@ -94,7 +100,6 @@ CONFIG_HOTPLUG_CPU=y
94CONFIG_DEFAULT_MIGRATION_COST=1000000 100CONFIG_DEFAULT_MIGRATION_COST=1000000
95CONFIG_COMPAT=y 101CONFIG_COMPAT=y
96CONFIG_SYSVIPC_COMPAT=y 102CONFIG_SYSVIPC_COMPAT=y
97CONFIG_BINFMT_ELF32=y
98 103
99# 104#
100# Code generation options 105# Code generation options
@@ -115,6 +120,7 @@ CONFIG_FLATMEM=y
115CONFIG_FLAT_NODE_MEM_MAP=y 120CONFIG_FLAT_NODE_MEM_MAP=y
116# CONFIG_SPARSEMEM_STATIC is not set 121# CONFIG_SPARSEMEM_STATIC is not set
117CONFIG_SPLIT_PTLOCK_CPUS=4 122CONFIG_SPLIT_PTLOCK_CPUS=4
123CONFIG_RESOURCES_64BIT=y
118 124
119# 125#
120# I/O subsystem configuration 126# I/O subsystem configuration
@@ -142,6 +148,7 @@ CONFIG_VIRT_CPU_ACCOUNTING=y
142# CONFIG_APPLDATA_BASE is not set 148# CONFIG_APPLDATA_BASE is not set
143CONFIG_NO_IDLE_HZ=y 149CONFIG_NO_IDLE_HZ=y
144CONFIG_NO_IDLE_HZ_INIT=y 150CONFIG_NO_IDLE_HZ_INIT=y
151CONFIG_S390_HYPFS_FS=y
145CONFIG_KEXEC=y 152CONFIG_KEXEC=y
146 153
147# 154#
@@ -174,6 +181,8 @@ CONFIG_IP_FIB_HASH=y
174# CONFIG_INET_IPCOMP is not set 181# CONFIG_INET_IPCOMP is not set
175# CONFIG_INET_XFRM_TUNNEL is not set 182# CONFIG_INET_XFRM_TUNNEL is not set
176# CONFIG_INET_TUNNEL is not set 183# CONFIG_INET_TUNNEL is not set
184CONFIG_INET_XFRM_MODE_TRANSPORT=y
185CONFIG_INET_XFRM_MODE_TUNNEL=y
177CONFIG_INET_DIAG=y 186CONFIG_INET_DIAG=y
178CONFIG_INET_TCP_DIAG=y 187CONFIG_INET_TCP_DIAG=y
179# CONFIG_TCP_CONG_ADVANCED is not set 188# CONFIG_TCP_CONG_ADVANCED is not set
@@ -186,7 +195,10 @@ CONFIG_IPV6=y
186# CONFIG_INET6_IPCOMP is not set 195# CONFIG_INET6_IPCOMP is not set
187# CONFIG_INET6_XFRM_TUNNEL is not set 196# CONFIG_INET6_XFRM_TUNNEL is not set
188# CONFIG_INET6_TUNNEL is not set 197# CONFIG_INET6_TUNNEL is not set
198CONFIG_INET6_XFRM_MODE_TRANSPORT=y
199CONFIG_INET6_XFRM_MODE_TUNNEL=y
189# CONFIG_IPV6_TUNNEL is not set 200# CONFIG_IPV6_TUNNEL is not set
201# CONFIG_NETWORK_SECMARK is not set
190# CONFIG_NETFILTER is not set 202# CONFIG_NETFILTER is not set
191 203
192# 204#
@@ -263,6 +275,7 @@ CONFIG_NET_ESTIMATOR=y
263# Network testing 275# Network testing
264# 276#
265# CONFIG_NET_PKTGEN is not set 277# CONFIG_NET_PKTGEN is not set
278# CONFIG_NET_TCPPROBE is not set
266# CONFIG_HAMRADIO is not set 279# CONFIG_HAMRADIO is not set
267# CONFIG_IRDA is not set 280# CONFIG_IRDA is not set
268# CONFIG_BT is not set 281# CONFIG_BT is not set
@@ -276,6 +289,7 @@ CONFIG_STANDALONE=y
276CONFIG_PREVENT_FIRMWARE_BUILD=y 289CONFIG_PREVENT_FIRMWARE_BUILD=y
277# CONFIG_FW_LOADER is not set 290# CONFIG_FW_LOADER is not set
278# CONFIG_DEBUG_DRIVER is not set 291# CONFIG_DEBUG_DRIVER is not set
292CONFIG_SYS_HYPERVISOR=y
279 293
280# 294#
281# Connector - unified userspace <-> kernelspace linker 295# Connector - unified userspace <-> kernelspace linker
@@ -334,6 +348,7 @@ CONFIG_BLK_DEV_NBD=m
334CONFIG_BLK_DEV_RAM=y 348CONFIG_BLK_DEV_RAM=y
335CONFIG_BLK_DEV_RAM_COUNT=16 349CONFIG_BLK_DEV_RAM_COUNT=16
336CONFIG_BLK_DEV_RAM_SIZE=4096 350CONFIG_BLK_DEV_RAM_SIZE=4096
351CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
337CONFIG_BLK_DEV_INITRD=y 352CONFIG_BLK_DEV_INITRD=y
338# CONFIG_CDROM_PKTCDVD is not set 353# CONFIG_CDROM_PKTCDVD is not set
339 354
@@ -359,9 +374,7 @@ CONFIG_MD_LINEAR=m
359CONFIG_MD_RAID0=m 374CONFIG_MD_RAID0=m
360CONFIG_MD_RAID1=m 375CONFIG_MD_RAID1=m
361# CONFIG_MD_RAID10 is not set 376# CONFIG_MD_RAID10 is not set
362CONFIG_MD_RAID5=m 377# CONFIG_MD_RAID456 is not set
363# CONFIG_MD_RAID5_RESHAPE is not set
364# CONFIG_MD_RAID6 is not set
365CONFIG_MD_MULTIPATH=m 378CONFIG_MD_MULTIPATH=m
366# CONFIG_MD_FAULTY is not set 379# CONFIG_MD_FAULTY is not set
367CONFIG_BLK_DEV_DM=y 380CONFIG_BLK_DEV_DM=y
@@ -419,7 +432,8 @@ CONFIG_S390_TAPE_34XX=m
419# 432#
420# Cryptographic devices 433# Cryptographic devices
421# 434#
422CONFIG_Z90CRYPT=m 435CONFIG_ZCRYPT=m
436# CONFIG_ZCRYPT_MONOLITHIC is not set
423 437
424# 438#
425# Network device support 439# Network device support
@@ -509,6 +523,7 @@ CONFIG_FS_MBCACHE=y
509# CONFIG_MINIX_FS is not set 523# CONFIG_MINIX_FS is not set
510# CONFIG_ROMFS_FS is not set 524# CONFIG_ROMFS_FS is not set
511CONFIG_INOTIFY=y 525CONFIG_INOTIFY=y
526CONFIG_INOTIFY_USER=y
512# CONFIG_QUOTA is not set 527# CONFIG_QUOTA is not set
513CONFIG_DNOTIFY=y 528CONFIG_DNOTIFY=y
514# CONFIG_AUTOFS_FS is not set 529# CONFIG_AUTOFS_FS is not set
@@ -614,26 +629,36 @@ CONFIG_MSDOS_PARTITION=y
614# Instrumentation Support 629# Instrumentation Support
615# 630#
616# CONFIG_PROFILING is not set 631# CONFIG_PROFILING is not set
617# CONFIG_STATISTICS is not set 632CONFIG_STATISTICS=y
633CONFIG_KPROBES=y
618 634
619# 635#
620# Kernel hacking 636# Kernel hacking
621# 637#
638CONFIG_TRACE_IRQFLAGS_SUPPORT=y
622# CONFIG_PRINTK_TIME is not set 639# CONFIG_PRINTK_TIME is not set
623CONFIG_MAGIC_SYSRQ=y 640CONFIG_MAGIC_SYSRQ=y
641# CONFIG_UNUSED_SYMBOLS is not set
624CONFIG_DEBUG_KERNEL=y 642CONFIG_DEBUG_KERNEL=y
625CONFIG_LOG_BUF_SHIFT=17 643CONFIG_LOG_BUF_SHIFT=17
626# CONFIG_DETECT_SOFTLOCKUP is not set 644# CONFIG_DETECT_SOFTLOCKUP is not set
627# CONFIG_SCHEDSTATS is not set 645# CONFIG_SCHEDSTATS is not set
628# CONFIG_DEBUG_SLAB is not set 646# CONFIG_DEBUG_SLAB is not set
629CONFIG_DEBUG_PREEMPT=y 647CONFIG_DEBUG_PREEMPT=y
630CONFIG_DEBUG_MUTEXES=y 648# CONFIG_DEBUG_RT_MUTEXES is not set
649# CONFIG_RT_MUTEX_TESTER is not set
631CONFIG_DEBUG_SPINLOCK=y 650CONFIG_DEBUG_SPINLOCK=y
651CONFIG_DEBUG_MUTEXES=y
652# CONFIG_DEBUG_RWSEMS is not set
653# CONFIG_DEBUG_LOCK_ALLOC is not set
654# CONFIG_PROVE_LOCKING is not set
632CONFIG_DEBUG_SPINLOCK_SLEEP=y 655CONFIG_DEBUG_SPINLOCK_SLEEP=y
656# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
633# CONFIG_DEBUG_KOBJECT is not set 657# CONFIG_DEBUG_KOBJECT is not set
634# CONFIG_DEBUG_INFO is not set 658# CONFIG_DEBUG_INFO is not set
635CONFIG_DEBUG_FS=y 659CONFIG_DEBUG_FS=y
636# CONFIG_DEBUG_VM is not set 660# CONFIG_DEBUG_VM is not set
661# CONFIG_FRAME_POINTER is not set
637# CONFIG_UNWIND_INFO is not set 662# CONFIG_UNWIND_INFO is not set
638CONFIG_FORCED_INLINING=y 663CONFIG_FORCED_INLINING=y
639# CONFIG_RCU_TORTURE_TEST is not set 664# CONFIG_RCU_TORTURE_TEST is not set
@@ -688,3 +713,4 @@ CONFIG_CRYPTO=y
688# CONFIG_CRC16 is not set 713# CONFIG_CRC16 is not set
689CONFIG_CRC32=m 714CONFIG_CRC32=m
690# CONFIG_LIBCRC32C is not set 715# CONFIG_LIBCRC32C is not set
716CONFIG_PLIST=y
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index d00de17b3778..a4dc61f3285e 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -273,7 +273,7 @@ startup_continue:
273.Lbss_end: .long _end 273.Lbss_end: .long _end
274.Lparmaddr: .long PARMAREA 274.Lparmaddr: .long PARMAREA
275.Lsccbaddr: .long .Lsccb 275.Lsccbaddr: .long .Lsccb
276 .align 4096 276 .org 0x12000
277.Lsccb: 277.Lsccb:
278 .hword 0x1000 # length, one page 278 .hword 0x1000 # length, one page
279 .byte 0x00,0x00,0x00 279 .byte 0x00,0x00,0x00
@@ -290,7 +290,7 @@ startup_continue:
290.Lscpincr2: 290.Lscpincr2:
291 .quad 0x00 291 .quad 0x00
292 .fill 3984,1,0 292 .fill 3984,1,0
293 .align 4096 293 .org 0x13000
294 294
295#ifdef CONFIG_SHARED_KERNEL 295#ifdef CONFIG_SHARED_KERNEL
296 .org 0x100000 296 .org 0x100000
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 47744fcca930..9d80c5b1ef95 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -268,7 +268,7 @@ startup_continue:
268.Lparmaddr: 268.Lparmaddr:
269 .quad PARMAREA 269 .quad PARMAREA
270 270
271 .align 4096 271 .org 0x12000
272.Lsccb: 272.Lsccb:
273 .hword 0x1000 # length, one page 273 .hword 0x1000 # length, one page
274 .byte 0x00,0x00,0x00 274 .byte 0x00,0x00,0x00
@@ -285,7 +285,7 @@ startup_continue:
285.Lscpincr2: 285.Lscpincr2:
286 .quad 0x00 286 .quad 0x00
287 .fill 3984,1,0 287 .fill 3984,1,0
288 .align 4096 288 .org 0x13000
289 289
290#ifdef CONFIG_SHARED_KERNEL 290#ifdef CONFIG_SHARED_KERNEL
291 .org 0x100000 291 .org 0x100000
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 1ca34f54ea8a..c902f059c7aa 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -877,31 +877,57 @@ static struct bin_attribute ipl_scp_data_attr = {
877 877
878static decl_subsys(ipl, NULL, NULL); 878static decl_subsys(ipl, NULL, NULL);
879 879
880static int ipl_register_fcp_files(void)
881{
882 int rc;
883
884 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
885 &ipl_fcp_attr_group);
886 if (rc)
887 goto out;
888 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
889 &ipl_parameter_attr);
890 if (rc)
891 goto out_ipl_parm;
892 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
893 &ipl_scp_data_attr);
894 if (!rc)
895 goto out;
896
897 sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr);
898
899out_ipl_parm:
900 sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
901out:
902 return rc;
903}
904
880static int __init 905static int __init
881ipl_device_sysfs_register(void) { 906ipl_device_sysfs_register(void) {
882 int rc; 907 int rc;
883 908
884 rc = firmware_register(&ipl_subsys); 909 rc = firmware_register(&ipl_subsys);
885 if (rc) 910 if (rc)
886 return rc; 911 goto out;
887 912
888 switch (get_ipl_type()) { 913 switch (get_ipl_type()) {
889 case ipl_type_ccw: 914 case ipl_type_ccw:
890 sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group); 915 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
916 &ipl_ccw_attr_group);
891 break; 917 break;
892 case ipl_type_fcp: 918 case ipl_type_fcp:
893 sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group); 919 rc = ipl_register_fcp_files();
894 sysfs_create_bin_file(&ipl_subsys.kset.kobj,
895 &ipl_parameter_attr);
896 sysfs_create_bin_file(&ipl_subsys.kset.kobj,
897 &ipl_scp_data_attr);
898 break; 920 break;
899 default: 921 default:
900 sysfs_create_group(&ipl_subsys.kset.kobj, 922 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
901 &ipl_unknown_attr_group); 923 &ipl_unknown_attr_group);
902 break; 924 break;
903 } 925 }
904 return 0; 926
927 if (rc)
928 firmware_unregister(&ipl_subsys);
929out:
930 return rc;
905} 931}
906 932
907__initcall(ipl_device_sysfs_register); 933__initcall(ipl_device_sysfs_register);
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index adba9dfee35e..af90a5f9ab57 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -15,6 +15,7 @@
15 15
16#include <asm/page.h> 16#include <asm/page.h>
17#include <asm/oplib.h> 17#include <asm/oplib.h>
18#include <asm/prom.h>
18#include <asm/smp.h> 19#include <asm/smp.h>
19#include <asm/system.h> 20#include <asm/system.h>
20#include <asm/cpudata.h> 21#include <asm/cpudata.h>
@@ -34,12 +35,6 @@ static int check_cpu_node(int nd, int *cur_inst,
34 int (*compare)(int, int, void *), void *compare_arg, 35 int (*compare)(int, int, void *), void *compare_arg,
35 int *prom_node, int *mid) 36 int *prom_node, int *mid)
36{ 37{
37 char node_str[128];
38
39 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
40 if (strcmp(node_str, "cpu"))
41 return -ENODEV;
42
43 if (!compare(nd, *cur_inst, compare_arg)) { 38 if (!compare(nd, *cur_inst, compare_arg)) {
44 if (prom_node) 39 if (prom_node)
45 *prom_node = nd; 40 *prom_node = nd;
@@ -59,20 +54,14 @@ static int check_cpu_node(int nd, int *cur_inst,
59static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg, 54static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
60 int *prom_node, int *mid) 55 int *prom_node, int *mid)
61{ 56{
62 int nd, cur_inst, err; 57 struct device_node *dp;
58 int cur_inst;
63 59
64 nd = prom_root_node;
65 cur_inst = 0; 60 cur_inst = 0;
66 61 for_each_node_by_type(dp, "cpu") {
67 err = check_cpu_node(nd, &cur_inst, compare, compare_arg, 62 int err = check_cpu_node(dp->node, &cur_inst,
68 prom_node, mid); 63 compare, compare_arg,
69 if (!err) 64 prom_node, mid);
70 return 0;
71
72 nd = prom_getchild(nd);
73 while ((nd = prom_getsibling(nd)) != 0) {
74 err = check_cpu_node(nd, &cur_inst, compare, compare_arg,
75 prom_node, mid);
76 if (!err) 65 if (!err)
77 return 0; 66 return 0;
78 } 67 }
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index cde73327ca96..72f0201051a0 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -329,7 +329,7 @@ void handler_irq(int irq, struct pt_regs * regs)
329 disable_pil_irq(irq); 329 disable_pil_irq(irq);
330#ifdef CONFIG_SMP 330#ifdef CONFIG_SMP
331 /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */ 331 /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */
332 if(irq < 10) 332 if((sparc_cpu_model==sun4m) && (irq < 10))
333 smp4m_irq_rotate(cpu); 333 smp4m_irq_rotate(cpu);
334#endif 334#endif
335 action = sparc_irq[irq].action; 335 action = sparc_irq[irq].action;
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index 5a2faad5d043..97bf87e8cdde 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -596,14 +596,41 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
596 static int pil_to_sbus[] = { 596 static int pil_to_sbus[] = {
597 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, 597 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
598 }; 598 };
599 struct device_node *busp = dp->parent; 599 struct device_node *io_unit, *sbi = dp->parent;
600 struct linux_prom_registers *regs; 600 struct linux_prom_registers *regs;
601 int board = of_getintprop_default(busp, "board#", 0); 601 int board, slot;
602 int slot; 602
603 while (sbi) {
604 if (!strcmp(sbi->name, "sbi"))
605 break;
606
607 sbi = sbi->parent;
608 }
609 if (!sbi)
610 goto build_resources;
603 611
604 regs = of_get_property(dp, "reg", NULL); 612 regs = of_get_property(dp, "reg", NULL);
613 if (!regs)
614 goto build_resources;
615
605 slot = regs->which_io; 616 slot = regs->which_io;
606 617
618 /* If SBI's parent is not io-unit or the io-unit lacks
619 * a "board#" property, something is very wrong.
620 */
621 if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
622 printk("%s: Error, parent is not io-unit.\n",
623 sbi->full_name);
624 goto build_resources;
625 }
626 io_unit = sbi->parent;
627 board = of_getintprop_default(io_unit, "board#", -1);
628 if (board == -1) {
629 printk("%s: Error, lacks board# property.\n",
630 io_unit->full_name);
631 goto build_resources;
632 }
633
607 for (i = 0; i < op->num_irqs; i++) { 634 for (i = 0; i < op->num_irqs; i++) {
608 int this_irq = op->irqs[i]; 635 int this_irq = op->irqs[i];
609 int sbusl = pil_to_sbus[this_irq]; 636 int sbusl = pil_to_sbus[this_irq];
@@ -617,6 +644,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
617 } 644 }
618 } 645 }
619 646
647build_resources:
620 build_device_resources(op, parent); 648 build_device_resources(op, parent);
621 649
622 op->dev.parent = parent; 650 op->dev.parent = parent;
diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c
index 4b06dcb00ebd..4ca9e5fc97f4 100644
--- a/arch/sparc/kernel/prom.c
+++ b/arch/sparc/kernel/prom.c
@@ -444,6 +444,7 @@ static struct property * __init build_one_prop(phandle node, char *prev, char *s
444 static struct property *tmp = NULL; 444 static struct property *tmp = NULL;
445 struct property *p; 445 struct property *p;
446 int len; 446 int len;
447 const char *name;
447 448
448 if (tmp) { 449 if (tmp) {
449 p = tmp; 450 p = tmp;
@@ -456,19 +457,21 @@ static struct property * __init build_one_prop(phandle node, char *prev, char *s
456 457
457 p->name = (char *) (p + 1); 458 p->name = (char *) (p + 1);
458 if (special_name) { 459 if (special_name) {
460 strcpy(p->name, special_name);
459 p->length = special_len; 461 p->length = special_len;
460 p->value = prom_early_alloc(special_len); 462 p->value = prom_early_alloc(special_len);
461 memcpy(p->value, special_val, special_len); 463 memcpy(p->value, special_val, special_len);
462 } else { 464 } else {
463 if (prev == NULL) { 465 if (prev == NULL) {
464 prom_firstprop(node, p->name); 466 name = prom_firstprop(node, NULL);
465 } else { 467 } else {
466 prom_nextprop(node, prev, p->name); 468 name = prom_nextprop(node, prev, NULL);
467 } 469 }
468 if (strlen(p->name) == 0) { 470 if (strlen(name) == 0) {
469 tmp = p; 471 tmp = p;
470 return NULL; 472 return NULL;
471 } 473 }
474 strcpy(p->name, name);
472 p->length = prom_getproplen(node, p->name); 475 p->length = prom_getproplen(node, p->name);
473 if (p->length <= 0) { 476 if (p->length <= 0) {
474 p->length = 0; 477 p->length = 0;
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index 6135d4faeeeb..e311ade1b490 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -87,6 +87,7 @@ void __cpuinit smp_store_cpu_info(int id)
87void __init smp_cpus_done(unsigned int max_cpus) 87void __init smp_cpus_done(unsigned int max_cpus)
88{ 88{
89 extern void smp4m_smp_done(void); 89 extern void smp4m_smp_done(void);
90 extern void smp4d_smp_done(void);
90 unsigned long bogosum = 0; 91 unsigned long bogosum = 0;
91 int cpu, num; 92 int cpu, num;
92 93
@@ -100,8 +101,34 @@ void __init smp_cpus_done(unsigned int max_cpus)
100 num, bogosum/(500000/HZ), 101 num, bogosum/(500000/HZ),
101 (bogosum/(5000/HZ))%100); 102 (bogosum/(5000/HZ))%100);
102 103
103 BUG_ON(sparc_cpu_model != sun4m); 104 switch(sparc_cpu_model) {
104 smp4m_smp_done(); 105 case sun4:
106 printk("SUN4\n");
107 BUG();
108 break;
109 case sun4c:
110 printk("SUN4C\n");
111 BUG();
112 break;
113 case sun4m:
114 smp4m_smp_done();
115 break;
116 case sun4d:
117 smp4d_smp_done();
118 break;
119 case sun4e:
120 printk("SUN4E\n");
121 BUG();
122 break;
123 case sun4u:
124 printk("SUN4U\n");
125 BUG();
126 break;
127 default:
128 printk("UNKNOWN!\n");
129 BUG();
130 break;
131 };
105} 132}
106 133
107void cpu_panic(void) 134void cpu_panic(void)
@@ -267,9 +294,9 @@ int setup_profiling_timer(unsigned int multiplier)
267void __init smp_prepare_cpus(unsigned int max_cpus) 294void __init smp_prepare_cpus(unsigned int max_cpus)
268{ 295{
269 extern void smp4m_boot_cpus(void); 296 extern void smp4m_boot_cpus(void);
297 extern void smp4d_boot_cpus(void);
270 int i, cpuid, extra; 298 int i, cpuid, extra;
271 299
272 BUG_ON(sparc_cpu_model != sun4m);
273 printk("Entering SMP Mode...\n"); 300 printk("Entering SMP Mode...\n");
274 301
275 extra = 0; 302 extra = 0;
@@ -283,7 +310,34 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
283 310
284 smp_store_cpu_info(boot_cpu_id); 311 smp_store_cpu_info(boot_cpu_id);
285 312
286 smp4m_boot_cpus(); 313 switch(sparc_cpu_model) {
314 case sun4:
315 printk("SUN4\n");
316 BUG();
317 break;
318 case sun4c:
319 printk("SUN4C\n");
320 BUG();
321 break;
322 case sun4m:
323 smp4m_boot_cpus();
324 break;
325 case sun4d:
326 smp4d_boot_cpus();
327 break;
328 case sun4e:
329 printk("SUN4E\n");
330 BUG();
331 break;
332 case sun4u:
333 printk("SUN4U\n");
334 BUG();
335 break;
336 default:
337 printk("UNKNOWN!\n");
338 BUG();
339 break;
340 };
287} 341}
288 342
289/* Set this up early so that things like the scheduler can init 343/* Set this up early so that things like the scheduler can init
@@ -323,9 +377,37 @@ void __init smp_prepare_boot_cpu(void)
323int __cpuinit __cpu_up(unsigned int cpu) 377int __cpuinit __cpu_up(unsigned int cpu)
324{ 378{
325 extern int smp4m_boot_one_cpu(int); 379 extern int smp4m_boot_one_cpu(int);
326 int ret; 380 extern int smp4d_boot_one_cpu(int);
327 381 int ret=0;
328 ret = smp4m_boot_one_cpu(cpu); 382
383 switch(sparc_cpu_model) {
384 case sun4:
385 printk("SUN4\n");
386 BUG();
387 break;
388 case sun4c:
389 printk("SUN4C\n");
390 BUG();
391 break;
392 case sun4m:
393 ret = smp4m_boot_one_cpu(cpu);
394 break;
395 case sun4d:
396 ret = smp4d_boot_one_cpu(cpu);
397 break;
398 case sun4e:
399 printk("SUN4E\n");
400 BUG();
401 break;
402 case sun4u:
403 printk("SUN4U\n");
404 BUG();
405 break;
406 default:
407 printk("UNKNOWN!\n");
408 BUG();
409 break;
410 };
329 411
330 if (!ret) { 412 if (!ret) {
331 cpu_set(cpu, smp_commenced_mask); 413 cpu_set(cpu, smp_commenced_mask);
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 5fb987fc3d63..4d441a554d35 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -237,7 +237,6 @@ EXPORT_SYMBOL(prom_node_has_property);
237EXPORT_SYMBOL(prom_setprop); 237EXPORT_SYMBOL(prom_setprop);
238EXPORT_SYMBOL(saved_command_line); 238EXPORT_SYMBOL(saved_command_line);
239EXPORT_SYMBOL(prom_apply_obio_ranges); 239EXPORT_SYMBOL(prom_apply_obio_ranges);
240EXPORT_SYMBOL(prom_getname);
241EXPORT_SYMBOL(prom_feval); 240EXPORT_SYMBOL(prom_feval);
242EXPORT_SYMBOL(prom_getbool); 241EXPORT_SYMBOL(prom_getbool);
243EXPORT_SYMBOL(prom_getstring); 242EXPORT_SYMBOL(prom_getstring);
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index b141b7ee6717..ba843f6a2832 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -43,15 +43,10 @@ extern ctxd_t *srmmu_ctx_table_phys;
43extern void calibrate_delay(void); 43extern void calibrate_delay(void);
44 44
45extern volatile int smp_processors_ready; 45extern volatile int smp_processors_ready;
46extern int smp_num_cpus;
47static int smp_highest_cpu; 46static int smp_highest_cpu;
48extern volatile unsigned long cpu_callin_map[NR_CPUS]; 47extern volatile unsigned long cpu_callin_map[NR_CPUS];
49extern cpuinfo_sparc cpu_data[NR_CPUS]; 48extern cpuinfo_sparc cpu_data[NR_CPUS];
50extern unsigned char boot_cpu_id; 49extern unsigned char boot_cpu_id;
51extern int smp_activated;
52extern volatile int __cpu_number_map[NR_CPUS];
53extern volatile int __cpu_logical_map[NR_CPUS];
54extern volatile unsigned long ipi_count;
55extern volatile int smp_process_available; 50extern volatile int smp_process_available;
56 51
57extern cpumask_t smp_commenced_mask; 52extern cpumask_t smp_commenced_mask;
@@ -144,6 +139,8 @@ void __init smp4d_callin(void)
144 spin_lock_irqsave(&sun4d_imsk_lock, flags); 139 spin_lock_irqsave(&sun4d_imsk_lock, flags);
145 cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ 140 cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
146 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 141 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
142 cpu_set(cpuid, cpu_online_map);
143
147} 144}
148 145
149extern void init_IRQ(void); 146extern void init_IRQ(void);
@@ -160,51 +157,24 @@ extern unsigned long trapbase_cpu3[];
160 157
161void __init smp4d_boot_cpus(void) 158void __init smp4d_boot_cpus(void)
162{ 159{
163 int cpucount = 0;
164 int i, mid;
165
166 printk("Entering SMP Mode...\n");
167
168 if (boot_cpu_id) 160 if (boot_cpu_id)
169 current_set[0] = NULL; 161 current_set[0] = NULL;
170
171 local_irq_enable();
172 cpus_clear(cpu_present_map);
173
174 /* XXX This whole thing has to go. See sparc64. */
175 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
176 cpu_set(mid, cpu_present_map);
177 SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0]));
178 for(i=0; i < NR_CPUS; i++)
179 __cpu_number_map[i] = -1;
180 for(i=0; i < NR_CPUS; i++)
181 __cpu_logical_map[i] = -1;
182 __cpu_number_map[boot_cpu_id] = 0;
183 __cpu_logical_map[0] = boot_cpu_id;
184 current_thread_info()->cpu = boot_cpu_id;
185 smp_store_cpu_info(boot_cpu_id);
186 smp_setup_percpu_timer(); 162 smp_setup_percpu_timer();
187 local_flush_cache_all(); 163 local_flush_cache_all();
188 if (cpu_find_by_instance(1, NULL, NULL)) 164}
189 return; /* Not an MP box. */ 165
190 SMP_PRINTK(("Iterating over CPUs\n")); 166int smp4d_boot_one_cpu(int i)
191 for(i = 0; i < NR_CPUS; i++) { 167{
192 if(i == boot_cpu_id)
193 continue;
194
195 if (cpu_isset(i, cpu_present_map)) {
196 extern unsigned long sun4d_cpu_startup; 168 extern unsigned long sun4d_cpu_startup;
197 unsigned long *entry = &sun4d_cpu_startup; 169 unsigned long *entry = &sun4d_cpu_startup;
198 struct task_struct *p; 170 struct task_struct *p;
199 int timeout; 171 int timeout;
200 int no; 172 int cpu_node;
201 173
174 cpu_find_by_instance(i, &cpu_node,NULL);
202 /* Cook up an idler for this guy. */ 175 /* Cook up an idler for this guy. */
203 p = fork_idle(i); 176 p = fork_idle(i);
204 cpucount++;
205 current_set[i] = task_thread_info(p); 177 current_set[i] = task_thread_info(p);
206 for (no = 0; !cpu_find_by_instance(no, NULL, &mid)
207 && mid != i; no++) ;
208 178
209 /* 179 /*
210 * Initialize the contexts table 180 * Initialize the contexts table
@@ -216,9 +186,9 @@ void __init smp4d_boot_cpus(void)
216 smp_penguin_ctable.reg_size = 0; 186 smp_penguin_ctable.reg_size = 0;
217 187
218 /* whirrr, whirrr, whirrrrrrrrr... */ 188 /* whirrr, whirrr, whirrrrrrrrr... */
219 SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, cpu_data(no).prom_node)); 189 SMP_PRINTK(("Starting CPU %d at %p \n", i, entry));
220 local_flush_cache_all(); 190 local_flush_cache_all();
221 prom_startcpu(cpu_data(no).prom_node, 191 prom_startcpu(cpu_node,
222 &smp_penguin_ctable, 0, (char *)entry); 192 &smp_penguin_ctable, 0, (char *)entry);
223 193
224 SMP_PRINTK(("prom_startcpu returned :)\n")); 194 SMP_PRINTK(("prom_startcpu returned :)\n"));
@@ -230,39 +200,30 @@ void __init smp4d_boot_cpus(void)
230 udelay(200); 200 udelay(200);
231 } 201 }
232 202
233 if(cpu_callin_map[i]) { 203 if (!(cpu_callin_map[i])) {
234 /* Another "Red Snapper". */ 204 printk("Processor %d is stuck.\n", i);
235 __cpu_number_map[i] = cpucount; 205 return -ENODEV;
236 __cpu_logical_map[cpucount] = i; 206
237 } else {
238 cpucount--;
239 printk("Processor %d is stuck.\n", i);
240 }
241 }
242 if(!(cpu_callin_map[i])) {
243 cpu_clear(i, cpu_present_map);
244 __cpu_number_map[i] = -1;
245 }
246 } 207 }
247 local_flush_cache_all(); 208 local_flush_cache_all();
248 if(cpucount == 0) { 209 return 0;
249 printk("Error: only one Processor found.\n"); 210}
250 cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id()); 211
251 } else { 212void __init smp4d_smp_done(void)
252 unsigned long bogosum = 0; 213{
253 214 int i, first;
254 for_each_present_cpu(i) { 215 int *prev;
255 bogosum += cpu_data(i).udelay_val; 216
256 smp_highest_cpu = i; 217 /* setup cpu list for irq rotation */
218 first = 0;
219 prev = &first;
220 for (i = 0; i < NR_CPUS; i++)
221 if (cpu_online(i)) {
222 *prev = i;
223 prev = &cpu_data(i).next;
257 } 224 }
258 SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); 225 *prev = first;
259 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", 226 local_flush_cache_all();
260 cpucount + 1,
261 bogosum/(500000/HZ),
262 (bogosum/(5000/HZ))%100);
263 smp_activated = 1;
264 smp_num_cpus = cpucount + 1;
265 }
266 227
267 /* Free unneeded trap tables */ 228 /* Free unneeded trap tables */
268 ClearPageReserved(virt_to_page(trapbase_cpu1)); 229 ClearPageReserved(virt_to_page(trapbase_cpu1));
@@ -334,7 +295,7 @@ void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
334 register int i; 295 register int i;
335 296
336 mask = cpumask_of_cpu(hard_smp4d_processor_id()); 297 mask = cpumask_of_cpu(hard_smp4d_processor_id());
337 cpus_andnot(mask, cpu_present_map, mask); 298 cpus_andnot(mask, cpu_online_map, mask);
338 for(i = 0; i <= high; i++) { 299 for(i = 0; i <= high; i++) {
339 if (cpu_isset(i, mask)) { 300 if (cpu_isset(i, mask)) {
340 ccall_info.processors_in[i] = 0; 301 ccall_info.processors_in[i] = 0;
diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c
index 0cdfc9d294b4..a41c8a5c2007 100644
--- a/arch/sparc/kernel/sys_sparc.c
+++ b/arch/sparc/kernel/sys_sparc.c
@@ -465,21 +465,21 @@ sys_rt_sigaction(int sig,
465 465
466asmlinkage int sys_getdomainname(char __user *name, int len) 466asmlinkage int sys_getdomainname(char __user *name, int len)
467{ 467{
468 int nlen; 468 int nlen, err;
469 int err = -EFAULT;
470 469
470 if (len < 0 || len > __NEW_UTS_LEN)
471 return -EINVAL;
472
471 down_read(&uts_sem); 473 down_read(&uts_sem);
472 474
473 nlen = strlen(system_utsname.domainname) + 1; 475 nlen = strlen(system_utsname.domainname) + 1;
474
475 if (nlen < len) 476 if (nlen < len)
476 len = nlen; 477 len = nlen;
477 if (len > __NEW_UTS_LEN) 478
478 goto done; 479 err = -EFAULT;
479 if (copy_to_user(name, system_utsname.domainname, len)) 480 if (!copy_to_user(name, system_utsname.domainname, len))
480 goto done; 481 err = 0;
481 err = 0; 482
482done:
483 up_read(&uts_sem); 483 up_read(&uts_sem);
484 return err; 484 return err;
485} 485}
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 04eb1eab6e3e..845081b01267 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -225,6 +225,32 @@ static __inline__ int has_low_battery(void)
225 return (data1 == data2); /* Was the write blocked? */ 225 return (data1 == data2); /* Was the write blocked? */
226} 226}
227 227
228static void __init mostek_set_system_time(void)
229{
230 unsigned int year, mon, day, hour, min, sec;
231 struct mostek48t02 *mregs;
232
233 mregs = (struct mostek48t02 *)mstk48t02_regs;
234 if(!mregs) {
235 prom_printf("Something wrong, clock regs not mapped yet.\n");
236 prom_halt();
237 }
238 spin_lock_irq(&mostek_lock);
239 mregs->creg |= MSTK_CREG_READ;
240 sec = MSTK_REG_SEC(mregs);
241 min = MSTK_REG_MIN(mregs);
242 hour = MSTK_REG_HOUR(mregs);
243 day = MSTK_REG_DOM(mregs);
244 mon = MSTK_REG_MONTH(mregs);
245 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
246 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
247 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
248 set_normalized_timespec(&wall_to_monotonic,
249 -xtime.tv_sec, -xtime.tv_nsec);
250 mregs->creg &= ~MSTK_CREG_READ;
251 spin_unlock_irq(&mostek_lock);
252}
253
228/* Probe for the real time clock chip on Sun4 */ 254/* Probe for the real time clock chip on Sun4 */
229static __inline__ void sun4_clock_probe(void) 255static __inline__ void sun4_clock_probe(void)
230{ 256{
@@ -273,6 +299,7 @@ static __inline__ void sun4_clock_probe(void)
273#endif 299#endif
274} 300}
275 301
302#ifndef CONFIG_SUN4
276static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) 303static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
277{ 304{
278 struct device_node *dp = op->node; 305 struct device_node *dp = op->node;
@@ -307,6 +334,8 @@ static int __devinit clock_probe(struct of_device *op, const struct of_device_id
307 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP) 334 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
308 kick_start_clock(); 335 kick_start_clock();
309 336
337 mostek_set_system_time();
338
310 return 0; 339 return 0;
311} 340}
312 341
@@ -325,56 +354,37 @@ static struct of_platform_driver clock_driver = {
325 354
326 355
327/* Probe for the mostek real time clock chip. */ 356/* Probe for the mostek real time clock chip. */
328static void clock_init(void) 357static int __init clock_init(void)
329{ 358{
330 of_register_driver(&clock_driver, &of_bus_type); 359 return of_register_driver(&clock_driver, &of_bus_type);
331} 360}
332 361
362/* Must be after subsys_initcall() so that busses are probed. Must
363 * be before device_initcall() because things like the RTC driver
364 * need to see the clock registers.
365 */
366fs_initcall(clock_init);
367#endif /* !CONFIG_SUN4 */
368
333void __init sbus_time_init(void) 369void __init sbus_time_init(void)
334{ 370{
335 unsigned int year, mon, day, hour, min, sec;
336 struct mostek48t02 *mregs;
337
338#ifdef CONFIG_SUN4
339 int temp;
340 struct intersil *iregs;
341#endif
342 371
343 BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); 372 BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM);
344 btfixup(); 373 btfixup();
345 374
346 if (ARCH_SUN4) 375 if (ARCH_SUN4)
347 sun4_clock_probe(); 376 sun4_clock_probe();
348 else
349 clock_init();
350 377
351 sparc_init_timers(timer_interrupt); 378 sparc_init_timers(timer_interrupt);
352 379
353#ifdef CONFIG_SUN4 380#ifdef CONFIG_SUN4
354 if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) { 381 if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) {
355#endif 382 mostek_set_system_time();
356 mregs = (struct mostek48t02 *)mstk48t02_regs;
357 if(!mregs) {
358 prom_printf("Something wrong, clock regs not mapped yet.\n");
359 prom_halt();
360 }
361 spin_lock_irq(&mostek_lock);
362 mregs->creg |= MSTK_CREG_READ;
363 sec = MSTK_REG_SEC(mregs);
364 min = MSTK_REG_MIN(mregs);
365 hour = MSTK_REG_HOUR(mregs);
366 day = MSTK_REG_DOM(mregs);
367 mon = MSTK_REG_MONTH(mregs);
368 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
369 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
370 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
371 set_normalized_timespec(&wall_to_monotonic,
372 -xtime.tv_sec, -xtime.tv_nsec);
373 mregs->creg &= ~MSTK_CREG_READ;
374 spin_unlock_irq(&mostek_lock);
375#ifdef CONFIG_SUN4
376 } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) { 383 } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) {
377 /* initialise the intersil on sun4 */ 384 /* initialise the intersil on sun4 */
385 unsigned int year, mon, day, hour, min, sec;
386 int temp;
387 struct intersil *iregs;
378 388
379 iregs=intersil_clock; 389 iregs=intersil_clock;
380 if(!iregs) { 390 if(!iregs) {
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 42c1c700c0a7..2bb1309003dd 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -64,6 +64,7 @@ iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
64 64
65 sbus->iommu = (struct iommu_struct *)iounit; 65 sbus->iommu = (struct iommu_struct *)iounit;
66 iounit->page_table = xpt; 66 iounit->page_table = xpt;
67 spin_lock_init(&iounit->lock);
67 68
68 for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); 69 for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
69 xpt < xptend;) 70 xpt < xptend;)
diff --git a/arch/sparc/prom/tree.c b/arch/sparc/prom/tree.c
index 2bf03ee8cde5..5ec246573a98 100644
--- a/arch/sparc/prom/tree.c
+++ b/arch/sparc/prom/tree.c
@@ -205,24 +205,6 @@ int prom_searchsiblings(int node_start, char *nodename)
205 return 0; 205 return 0;
206} 206}
207 207
208/* Gets name in the form prom v2+ uses it (name@x,yyyyy or name (if no reg)) */
209int prom_getname (int node, char *buffer, int len)
210{
211 int i;
212 struct linux_prom_registers reg[PROMREG_MAX];
213
214 i = prom_getproperty (node, "name", buffer, len);
215 if (i <= 0) return -1;
216 buffer [i] = 0;
217 len -= i;
218 i = prom_getproperty (node, "reg", (char *)reg, sizeof (reg));
219 if (i <= 0) return 0;
220 if (len < 11) return -1;
221 buffer = strchr (buffer, 0);
222 sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
223 return 0;
224}
225
226/* Interal version of nextprop that does not alter return values. */ 208/* Interal version of nextprop that does not alter return values. */
227char * __prom_nextprop(int node, char * oprop) 209char * __prom_nextprop(int node, char * oprop)
228{ 210{
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 38353621069e..43d9229fca07 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.18-rc1 3# Linux kernel version: 2.6.18-rc2
4# Wed Jul 12 14:00:58 2006 4# Fri Jul 21 14:19:24 2006
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -36,6 +36,7 @@ CONFIG_SWAP=y
36CONFIG_SYSVIPC=y 36CONFIG_SYSVIPC=y
37CONFIG_POSIX_MQUEUE=y 37CONFIG_POSIX_MQUEUE=y
38# CONFIG_BSD_PROCESS_ACCT is not set 38# CONFIG_BSD_PROCESS_ACCT is not set
39# CONFIG_TASKSTATS is not set
39CONFIG_SYSCTL=y 40CONFIG_SYSCTL=y
40# CONFIG_AUDIT is not set 41# CONFIG_AUDIT is not set
41# CONFIG_IKCONFIG is not set 42# CONFIG_IKCONFIG is not set
@@ -1120,7 +1121,7 @@ CONFIG_USB_HIDDEV=y
1120# CONFIG_USB_LEGOTOWER is not set 1121# CONFIG_USB_LEGOTOWER is not set
1121# CONFIG_USB_LCD is not set 1122# CONFIG_USB_LCD is not set
1122# CONFIG_USB_LED is not set 1123# CONFIG_USB_LED is not set
1123# CONFIG_USB_CY7C63 is not set 1124# CONFIG_USB_CYPRESS_CY7C63 is not set
1124# CONFIG_USB_CYTHERM is not set 1125# CONFIG_USB_CYTHERM is not set
1125# CONFIG_USB_PHIDGETKIT is not set 1126# CONFIG_USB_PHIDGETKIT is not set
1126# CONFIG_USB_PHIDGETSERVO is not set 1127# CONFIG_USB_PHIDGETSERVO is not set
@@ -1279,7 +1280,6 @@ CONFIG_RAMFS=y
1279# CONFIG_NFSD is not set 1280# CONFIG_NFSD is not set
1280# CONFIG_SMB_FS is not set 1281# CONFIG_SMB_FS is not set
1281# CONFIG_CIFS is not set 1282# CONFIG_CIFS is not set
1282# CONFIG_CIFS_DEBUG2 is not set
1283# CONFIG_NCP_FS is not set 1283# CONFIG_NCP_FS is not set
1284# CONFIG_CODA_FS is not set 1284# CONFIG_CODA_FS is not set
1285# CONFIG_AFS_FS is not set 1285# CONFIG_AFS_FS is not set
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index f8ef2f2b9b37..ec10f7edcf86 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -66,9 +66,6 @@ static int check_cpu_node(struct device_node *dp, int *cur_inst,
66 void *compare_arg, 66 void *compare_arg,
67 struct device_node **dev_node, int *mid) 67 struct device_node **dev_node, int *mid)
68{ 68{
69 if (strcmp(dp->type, "cpu"))
70 return -ENODEV;
71
72 if (!compare(dp, *cur_inst, compare_arg)) { 69 if (!compare(dp, *cur_inst, compare_arg)) {
73 if (dev_node) 70 if (dev_node)
74 *dev_node = dp; 71 *dev_node = dp;
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 7064cee290ae..238bbf6de07d 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -542,9 +542,17 @@ static void __init build_device_resources(struct of_device *op,
542 /* Convert to num-cells. */ 542 /* Convert to num-cells. */
543 num_reg /= 4; 543 num_reg /= 4;
544 544
545 /* Conver to num-entries. */ 545 /* Convert to num-entries. */
546 num_reg /= na + ns; 546 num_reg /= na + ns;
547 547
548 /* Prevent overruning the op->resources[] array. */
549 if (num_reg > PROMREG_MAX) {
550 printk(KERN_WARNING "%s: Too many regs (%d), "
551 "limiting to %d.\n",
552 op->node->full_name, num_reg, PROMREG_MAX);
553 num_reg = PROMREG_MAX;
554 }
555
548 for (index = 0; index < num_reg; index++) { 556 for (index = 0; index < num_reg; index++) {
549 struct resource *r = &op->resource[index]; 557 struct resource *r = &op->resource[index];
550 u32 addr[OF_MAX_ADDR_CELLS]; 558 u32 addr[OF_MAX_ADDR_CELLS];
@@ -650,8 +658,22 @@ apply_interrupt_map(struct device_node *dp, struct device_node *pp,
650 next: 658 next:
651 imap += (na + 3); 659 imap += (na + 3);
652 } 660 }
653 if (i == imlen) 661 if (i == imlen) {
662 /* Psycho and Sabre PCI controllers can have 'interrupt-map'
663 * properties that do not include the on-board device
664 * interrupts. Instead, the device's 'interrupts' property
665 * is already a fully specified INO value.
666 *
667 * Handle this by deciding that, if we didn't get a
668 * match in the parent's 'interrupt-map', and the
669 * parent is an IRQ translater, then use the parent as
670 * our IRQ controller.
671 */
672 if (pp->irq_trans)
673 return pp;
674
654 return NULL; 675 return NULL;
676 }
655 677
656 *irq_p = irq; 678 *irq_p = irq;
657 cp = of_find_node_by_phandle(handle); 679 cp = of_find_node_by_phandle(handle);
@@ -803,6 +825,14 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
803 op->num_irqs = 0; 825 op->num_irqs = 0;
804 } 826 }
805 827
828 /* Prevent overruning the op->irqs[] array. */
829 if (op->num_irqs > PROMINTR_MAX) {
830 printk(KERN_WARNING "%s: Too many irqs (%d), "
831 "limiting to %d.\n",
832 dp->full_name, op->num_irqs, PROMINTR_MAX);
833 op->num_irqs = PROMINTR_MAX;
834 }
835
806 build_device_resources(op, parent); 836 build_device_resources(op, parent);
807 for (i = 0; i < op->num_irqs; i++) 837 for (i = 0; i < op->num_irqs; i++)
808 op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]); 838 op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]);
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index c86007a2aa3f..5cc5ab63293f 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -344,10 +344,12 @@ static unsigned long __psycho_onboard_imap_off[] = {
344/*0x2f*/ PSYCHO_IMAP_CE, 344/*0x2f*/ PSYCHO_IMAP_CE,
345/*0x30*/ PSYCHO_IMAP_A_ERR, 345/*0x30*/ PSYCHO_IMAP_A_ERR,
346/*0x31*/ PSYCHO_IMAP_B_ERR, 346/*0x31*/ PSYCHO_IMAP_B_ERR,
347/*0x32*/ PSYCHO_IMAP_PMGMT 347/*0x32*/ PSYCHO_IMAP_PMGMT,
348/*0x33*/ PSYCHO_IMAP_GFX,
349/*0x34*/ PSYCHO_IMAP_EUPA,
348}; 350};
349#define PSYCHO_ONBOARD_IRQ_BASE 0x20 351#define PSYCHO_ONBOARD_IRQ_BASE 0x20
350#define PSYCHO_ONBOARD_IRQ_LAST 0x32 352#define PSYCHO_ONBOARD_IRQ_LAST 0x34
351#define psycho_onboard_imap_offset(__ino) \ 353#define psycho_onboard_imap_offset(__ino) \
352 __psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE] 354 __psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
353 355
@@ -529,6 +531,10 @@ static unsigned long __sabre_onboard_imap_off[] = {
529/*0x2e*/ SABRE_IMAP_UE, 531/*0x2e*/ SABRE_IMAP_UE,
530/*0x2f*/ SABRE_IMAP_CE, 532/*0x2f*/ SABRE_IMAP_CE,
531/*0x30*/ SABRE_IMAP_PCIERR, 533/*0x30*/ SABRE_IMAP_PCIERR,
534/*0x31*/ 0 /* reserved */,
535/*0x32*/ 0 /* reserved */,
536/*0x33*/ SABRE_IMAP_GFX,
537/*0x34*/ SABRE_IMAP_EUPA,
532}; 538};
533#define SABRE_ONBOARD_IRQ_BASE 0x20 539#define SABRE_ONBOARD_IRQ_BASE 0x20
534#define SABRE_ONBOARD_IRQ_LAST 0x30 540#define SABRE_ONBOARD_IRQ_LAST 0x30
@@ -895,6 +901,8 @@ static unsigned long sysio_irq_offsets[] = {
895 SYSIO_IMAP_CE, 901 SYSIO_IMAP_CE,
896 SYSIO_IMAP_SBERR, 902 SYSIO_IMAP_SBERR,
897 SYSIO_IMAP_PMGMT, 903 SYSIO_IMAP_PMGMT,
904 SYSIO_IMAP_GFX,
905 SYSIO_IMAP_EUPA,
898}; 906};
899 907
900#undef bogon 908#undef bogon
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 237524d87cab..beffc82a1e85 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -254,7 +254,6 @@ EXPORT_SYMBOL(prom_getproperty);
254EXPORT_SYMBOL(prom_node_has_property); 254EXPORT_SYMBOL(prom_node_has_property);
255EXPORT_SYMBOL(prom_setprop); 255EXPORT_SYMBOL(prom_setprop);
256EXPORT_SYMBOL(saved_command_line); 256EXPORT_SYMBOL(saved_command_line);
257EXPORT_SYMBOL(prom_getname);
258EXPORT_SYMBOL(prom_finddevice); 257EXPORT_SYMBOL(prom_finddevice);
259EXPORT_SYMBOL(prom_feval); 258EXPORT_SYMBOL(prom_feval);
260EXPORT_SYMBOL(prom_getbool); 259EXPORT_SYMBOL(prom_getbool);
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 51c056df528e..054d0abdb7ee 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -701,21 +701,21 @@ extern void check_pending(int signum);
701 701
702asmlinkage long sys_getdomainname(char __user *name, int len) 702asmlinkage long sys_getdomainname(char __user *name, int len)
703{ 703{
704 int nlen; 704 int nlen, err;
705 int err = -EFAULT; 705
706 if (len < 0 || len > __NEW_UTS_LEN)
707 return -EINVAL;
706 708
707 down_read(&uts_sem); 709 down_read(&uts_sem);
708 710
709 nlen = strlen(system_utsname.domainname) + 1; 711 nlen = strlen(system_utsname.domainname) + 1;
710
711 if (nlen < len) 712 if (nlen < len)
712 len = nlen; 713 len = nlen;
713 if (len > __NEW_UTS_LEN) 714
714 goto done; 715 err = -EFAULT;
715 if (copy_to_user(name, system_utsname.domainname, len)) 716 if (!copy_to_user(name, system_utsname.domainname, len))
716 goto done; 717 err = 0;
717 err = 0; 718
718done:
719 up_read(&uts_sem); 719 up_read(&uts_sem);
720 return err; 720 return err;
721} 721}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 1605967cce91..55ae802dc0ad 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/kprobes.h> 21#include <linux/kprobes.h>
22#include <linux/kallsyms.h>
22 23
23#include <asm/page.h> 24#include <asm/page.h>
24#include <asm/pgtable.h> 25#include <asm/pgtable.h>
@@ -132,6 +133,8 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
132 133
133 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", 134 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
134 regs->tpc); 135 regs->tpc);
136 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
137 print_symbol("RPC: <%s>\n", regs->u_regs[15]);
135 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); 138 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
136 __asm__("mov %%sp, %0" : "=r" (ksp)); 139 __asm__("mov %%sp, %0" : "=r" (ksp));
137 show_stack(current, ksp); 140 show_stack(current, ksp);
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
index 49075abd7cbc..500f05e2cfcb 100644
--- a/arch/sparc64/prom/tree.c
+++ b/arch/sparc64/prom/tree.c
@@ -193,91 +193,6 @@ prom_searchsiblings(int node_start, const char *nodename)
193 return 0; 193 return 0;
194} 194}
195 195
196/* Gets name in the {name@x,yyyyy|name (if no reg)} form */
197int
198prom_getname (int node, char *buffer, int len)
199{
200 int i, sbus = 0;
201 int pci = 0, ebus = 0, ide = 0;
202 struct linux_prom_registers *reg;
203 struct linux_prom64_registers reg64[PROMREG_MAX];
204
205 for (sbus = prom_getparent (node); sbus; sbus = prom_getparent (sbus)) {
206 i = prom_getproperty (sbus, "name", buffer, len);
207 if (i > 0) {
208 buffer [i] = 0;
209 if (!strcmp (buffer, "sbus"))
210 goto getit;
211 }
212 }
213 if ((pci = prom_getparent (node))) {
214 i = prom_getproperty (pci, "name", buffer, len);
215 if (i > 0) {
216 buffer [i] = 0;
217 if (!strcmp (buffer, "pci"))
218 goto getit;
219 }
220 pci = 0;
221 }
222 if ((ebus = prom_getparent (node))) {
223 i = prom_getproperty (ebus, "name", buffer, len);
224 if (i > 0) {
225 buffer[i] = 0;
226 if (!strcmp (buffer, "ebus"))
227 goto getit;
228 }
229 ebus = 0;
230 }
231 if ((ide = prom_getparent (node))) {
232 i = prom_getproperty (ide, "name", buffer, len);
233 if (i > 0) {
234 buffer [i] = 0;
235 if (!strcmp (buffer, "ide"))
236 goto getit;
237 }
238 ide = 0;
239 }
240getit:
241 i = prom_getproperty (node, "name", buffer, len);
242 if (i <= 0) {
243 buffer [0] = 0;
244 return -1;
245 }
246 buffer [i] = 0;
247 len -= i;
248 i = prom_getproperty (node, "reg", (char *)reg64, sizeof (reg64));
249 if (i <= 0) return 0;
250 if (len < 16) return -1;
251 buffer = strchr (buffer, 0);
252 if (sbus) {
253 reg = (struct linux_prom_registers *)reg64;
254 sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
255 } else if (pci) {
256 int dev, fn;
257 reg = (struct linux_prom_registers *)reg64;
258 fn = (reg[0].which_io >> 8) & 0x07;
259 dev = (reg[0].which_io >> 11) & 0x1f;
260 if (fn)
261 sprintf (buffer, "@%x,%x", dev, fn);
262 else
263 sprintf (buffer, "@%x", dev);
264 } else if (ebus) {
265 reg = (struct linux_prom_registers *)reg64;
266 sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);
267 } else if (ide) {
268 reg = (struct linux_prom_registers *)reg64;
269 sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);
270 } else if (i == 4) { /* Happens on 8042's children on Ultra/PCI. */
271 reg = (struct linux_prom_registers *)reg64;
272 sprintf (buffer, "@%x", reg[0].which_io);
273 } else {
274 sprintf (buffer, "@%x,%x",
275 (unsigned int)(reg64[0].phys_addr >> 36),
276 (unsigned int)(reg64[0].phys_addr));
277 }
278 return 0;
279}
280
281/* Return the first property type for node 'node'. 196/* Return the first property type for node 'node'.
282 * buffer should be at least 32B in length 197 * buffer should be at least 32B in length
283 */ 198 */
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 83d389b8ebd8..840d5d93d5cc 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-git22 3# Linux kernel version: 2.6.18-rc2
4# Tue Jul 4 14:24:40 2006 4# Tue Jul 18 17:13:20 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -37,6 +37,7 @@ CONFIG_SWAP=y
37CONFIG_SYSVIPC=y 37CONFIG_SYSVIPC=y
38CONFIG_POSIX_MQUEUE=y 38CONFIG_POSIX_MQUEUE=y
39# CONFIG_BSD_PROCESS_ACCT is not set 39# CONFIG_BSD_PROCESS_ACCT is not set
40# CONFIG_TASKSTATS is not set
40CONFIG_SYSCTL=y 41CONFIG_SYSCTL=y
41# CONFIG_AUDIT is not set 42# CONFIG_AUDIT is not set
42CONFIG_IKCONFIG=y 43CONFIG_IKCONFIG=y
@@ -413,6 +414,7 @@ CONFIG_BLK_DEV_LOOP=y
413CONFIG_BLK_DEV_RAM=y 414CONFIG_BLK_DEV_RAM=y
414CONFIG_BLK_DEV_RAM_COUNT=16 415CONFIG_BLK_DEV_RAM_COUNT=16
415CONFIG_BLK_DEV_RAM_SIZE=4096 416CONFIG_BLK_DEV_RAM_SIZE=4096
417CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
416CONFIG_BLK_DEV_INITRD=y 418CONFIG_BLK_DEV_INITRD=y
417# CONFIG_CDROM_PKTCDVD is not set 419# CONFIG_CDROM_PKTCDVD is not set
418# CONFIG_ATA_OVER_ETH is not set 420# CONFIG_ATA_OVER_ETH is not set
@@ -1195,7 +1197,7 @@ CONFIG_USB_MON=y
1195# CONFIG_USB_LEGOTOWER is not set 1197# CONFIG_USB_LEGOTOWER is not set
1196# CONFIG_USB_LCD is not set 1198# CONFIG_USB_LCD is not set
1197# CONFIG_USB_LED is not set 1199# CONFIG_USB_LED is not set
1198# CONFIG_USB_CY7C63 is not set 1200# CONFIG_USB_CYPRESS_CY7C63 is not set
1199# CONFIG_USB_CYTHERM is not set 1201# CONFIG_USB_CYTHERM is not set
1200# CONFIG_USB_PHIDGETKIT is not set 1202# CONFIG_USB_PHIDGETKIT is not set
1201# CONFIG_USB_PHIDGETSERVO is not set 1203# CONFIG_USB_PHIDGETSERVO is not set
@@ -1373,7 +1375,6 @@ CONFIG_SUNRPC=y
1373# CONFIG_RPCSEC_GSS_SPKM3 is not set 1375# CONFIG_RPCSEC_GSS_SPKM3 is not set
1374# CONFIG_SMB_FS is not set 1376# CONFIG_SMB_FS is not set
1375# CONFIG_CIFS is not set 1377# CONFIG_CIFS is not set
1376# CONFIG_CIFS_DEBUG2 is not set
1377# CONFIG_NCP_FS is not set 1378# CONFIG_NCP_FS is not set
1378# CONFIG_CODA_FS is not set 1379# CONFIG_CODA_FS is not set
1379# CONFIG_AFS_FS is not set 1380# CONFIG_AFS_FS is not set
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 9b5bb413a6e9..5d4a7d125ed0 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -103,7 +103,7 @@ ENTRY(ia32_sysenter_target)
103 pushq %rax 103 pushq %rax
104 CFI_ADJUST_CFA_OFFSET 8 104 CFI_ADJUST_CFA_OFFSET 8
105 cld 105 cld
106 SAVE_ARGS 0,0,1 106 SAVE_ARGS 0,0,0
107 /* no need to do an access_ok check here because rbp has been 107 /* no need to do an access_ok check here because rbp has been
108 32bit zero extended */ 108 32bit zero extended */
1091: movl (%rbp),%r9d 1091: movl (%rbp),%r9d
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index e71ed53b08fb..146924ba5df5 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -85,7 +85,8 @@
85#define CSR_AGENT_MASK 0xffe0ffff 85#define CSR_AGENT_MASK 0xffe0ffff
86 86
87#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */ 87#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
88#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */ 88#define MAX_NUM_CHASSIS 8 /* max number of chassis */
89#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2) /* max dev->bus->number */
89#define PHBS_PER_CALGARY 4 90#define PHBS_PER_CALGARY 4
90 91
91/* register offsets in Calgary's internal register space */ 92/* register offsets in Calgary's internal register space */
@@ -110,7 +111,8 @@ static const unsigned long phb_offsets[] = {
110 0xB000 /* PHB3 */ 111 0xB000 /* PHB3 */
111}; 112};
112 113
113void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES]; 114static char bus_to_phb[MAX_PHB_BUS_NUM];
115void* tce_table_kva[MAX_PHB_BUS_NUM];
114unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; 116unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
115static int translate_empty_slots __read_mostly = 0; 117static int translate_empty_slots __read_mostly = 0;
116static int calgary_detected __read_mostly = 0; 118static int calgary_detected __read_mostly = 0;
@@ -119,7 +121,7 @@ static int calgary_detected __read_mostly = 0;
119 * the bitmap of PHBs the user requested that we disable 121 * the bitmap of PHBs the user requested that we disable
120 * translation on. 122 * translation on.
121 */ 123 */
122static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM); 124static DECLARE_BITMAP(translation_disabled, MAX_PHB_BUS_NUM);
123 125
124static void tce_cache_blast(struct iommu_table *tbl); 126static void tce_cache_blast(struct iommu_table *tbl);
125 127
@@ -452,7 +454,7 @@ static struct dma_mapping_ops calgary_dma_ops = {
452 454
453static inline int busno_to_phbid(unsigned char num) 455static inline int busno_to_phbid(unsigned char num)
454{ 456{
455 return bus_to_phb(num) % PHBS_PER_CALGARY; 457 return bus_to_phb[num];
456} 458}
457 459
458static inline unsigned long split_queue_offset(unsigned char num) 460static inline unsigned long split_queue_offset(unsigned char num)
@@ -812,7 +814,7 @@ static int __init calgary_init(void)
812 int i, ret = -ENODEV; 814 int i, ret = -ENODEV;
813 struct pci_dev *dev = NULL; 815 struct pci_dev *dev = NULL;
814 816
815 for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) { 817 for (i = 0; i < MAX_PHB_BUS_NUM; i++) {
816 dev = pci_get_device(PCI_VENDOR_ID_IBM, 818 dev = pci_get_device(PCI_VENDOR_ID_IBM,
817 PCI_DEVICE_ID_IBM_CALGARY, 819 PCI_DEVICE_ID_IBM_CALGARY,
818 dev); 820 dev);
@@ -822,7 +824,7 @@ static int __init calgary_init(void)
822 calgary_init_one_nontraslated(dev); 824 calgary_init_one_nontraslated(dev);
823 continue; 825 continue;
824 } 826 }
825 if (!tce_table_kva[i] && !translate_empty_slots) { 827 if (!tce_table_kva[dev->bus->number] && !translate_empty_slots) {
826 pci_dev_put(dev); 828 pci_dev_put(dev);
827 continue; 829 continue;
828 } 830 }
@@ -842,7 +844,7 @@ error:
842 pci_dev_put(dev); 844 pci_dev_put(dev);
843 continue; 845 continue;
844 } 846 }
845 if (!tce_table_kva[i] && !translate_empty_slots) 847 if (!tce_table_kva[dev->bus->number] && !translate_empty_slots)
846 continue; 848 continue;
847 calgary_disable_translation(dev); 849 calgary_disable_translation(dev);
848 calgary_free_tar(dev); 850 calgary_free_tar(dev);
@@ -876,9 +878,10 @@ static inline int __init determine_tce_table_size(u64 ram)
876void __init detect_calgary(void) 878void __init detect_calgary(void)
877{ 879{
878 u32 val; 880 u32 val;
879 int bus, table_idx; 881 int bus;
880 void *tbl; 882 void *tbl;
881 int detected = 0; 883 int calgary_found = 0;
884 int phb = -1;
882 885
883 /* 886 /*
884 * if the user specified iommu=off or iommu=soft or we found 887 * if the user specified iommu=off or iommu=soft or we found
@@ -889,38 +892,46 @@ void __init detect_calgary(void)
889 892
890 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE); 893 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
891 894
892 for (bus = 0, table_idx = 0; 895 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
893 bus <= num_online_nodes() * MAX_PHB_BUS_NUM; 896 int dev;
894 bus++) { 897
895 BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM); 898 tce_table_kva[bus] = NULL;
899 bus_to_phb[bus] = -1;
900
896 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY) 901 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
897 continue; 902 continue;
903
904 /*
905 * There are 4 PHBs per Calgary chip. Set phb to which phb (0-3)
906 * it is connected to releative to the clagary chip.
907 */
908 phb = (phb + 1) % PHBS_PER_CALGARY;
909
898 if (test_bit(bus, translation_disabled)) { 910 if (test_bit(bus, translation_disabled)) {
899 printk(KERN_INFO "Calgary: translation is disabled for " 911 printk(KERN_INFO "Calgary: translation is disabled for "
900 "PHB 0x%x\n", bus); 912 "PHB 0x%x\n", bus);
901 /* skip this phb, don't allocate a tbl for it */ 913 /* skip this phb, don't allocate a tbl for it */
902 tce_table_kva[table_idx] = NULL;
903 table_idx++;
904 continue; 914 continue;
905 } 915 }
906 /* 916 /*
907 * scan the first slot of the PCI bus to see if there 917 * Scan the slots of the PCI bus to see if there is a device present.
908 * are any devices present 918 * The parent bus will be the zero-ith device, so start at 1.
909 */ 919 */
910 val = read_pci_config(bus, 1, 0, 0); 920 for (dev = 1; dev < 8; dev++) {
911 if (val != 0xffffffff || translate_empty_slots) { 921 val = read_pci_config(bus, dev, 0, 0);
912 tbl = alloc_tce_table(); 922 if (val != 0xffffffff || translate_empty_slots) {
913 if (!tbl) 923 tbl = alloc_tce_table();
914 goto cleanup; 924 if (!tbl)
915 detected = 1; 925 goto cleanup;
916 } else 926 tce_table_kva[bus] = tbl;
917 tbl = NULL; 927 bus_to_phb[bus] = phb;
918 928 calgary_found = 1;
919 tce_table_kva[table_idx] = tbl; 929 break;
920 table_idx++; 930 }
931 }
921 } 932 }
922 933
923 if (detected) { 934 if (calgary_found) {
924 iommu_detected = 1; 935 iommu_detected = 1;
925 calgary_detected = 1; 936 calgary_detected = 1;
926 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. " 937 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. "
@@ -929,9 +940,9 @@ void __init detect_calgary(void)
929 return; 940 return;
930 941
931cleanup: 942cleanup:
932 for (--table_idx; table_idx >= 0; --table_idx) 943 for (--bus; bus >= 0; --bus)
933 if (tce_table_kva[table_idx]) 944 if (tce_table_kva[bus])
934 free_tce_table(tce_table_kva[table_idx]); 945 free_tce_table(tce_table_kva[bus]);
935} 946}
936 947
937int __init calgary_iommu_init(void) 948int __init calgary_iommu_init(void)
@@ -1002,7 +1013,7 @@ static int __init calgary_parse_options(char *p)
1002 if (p == endp) 1013 if (p == endp)
1003 break; 1014 break;
1004 1015
1005 if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) { 1016 if (bridge < MAX_PHB_BUS_NUM) {
1006 printk(KERN_INFO "Calgary: disabling " 1017 printk(KERN_INFO "Calgary: disabling "
1007 "translation for PHB 0x%x\n", bridge); 1018 "translation for PHB 0x%x\n", bridge);
1008 set_bit(bridge, translation_disabled); 1019 set_bit(bridge, translation_disabled);
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index ebdb77fe2057..6a55f87ba97f 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -31,9 +31,10 @@ struct dma_mapping_ops swiotlb_dma_ops = {
31void pci_swiotlb_init(void) 31void pci_swiotlb_init(void)
32{ 32{
33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
34 if (!iommu_detected && !no_iommu && 34 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
35 (end_pfn > MAX_DMA32_PFN || force_iommu))
36 swiotlb = 1; 35 swiotlb = 1;
36 if (swiotlb_force)
37 swiotlb = 1;
37 if (swiotlb) { 38 if (swiotlb) {
38 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); 39 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
39 swiotlb_init(); 40 swiotlb_init();
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
index d3a9e79e954c..5530dda3f27a 100644
--- a/arch/x86_64/kernel/tce.c
+++ b/arch/x86_64/kernel/tce.c
@@ -96,7 +96,6 @@ static inline unsigned int table_size_to_number_of_entries(unsigned char size)
96static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) 96static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
97{ 97{
98 unsigned int bitmapsz; 98 unsigned int bitmapsz;
99 unsigned int tce_table_index;
100 unsigned long bmppages; 99 unsigned long bmppages;
101 int ret; 100 int ret;
102 101
@@ -105,8 +104,7 @@ static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
105 /* set the tce table size - measured in entries */ 104 /* set the tce table size - measured in entries */
106 tbl->it_size = table_size_to_number_of_entries(specified_table_size); 105 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
107 106
108 tce_table_index = bus_to_phb(tbl->it_busno); 107 tbl->it_base = (unsigned long)tce_table_kva[dev->bus->number];
109 tbl->it_base = (unsigned long)tce_table_kva[tce_table_index];
110 if (!tbl->it_base) { 108 if (!tbl->it_base) {
111 printk(KERN_ERR "Calgary: iommu_table_setparms: " 109 printk(KERN_ERR "Calgary: iommu_table_setparms: "
112 "no table allocated?!\n"); 110 "no table allocated?!\n");
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index b9ff75992c16..7a9b18224182 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -28,6 +28,7 @@
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#ifdef CONFIG_ACPI 29#ifdef CONFIG_ACPI
30#include <acpi/achware.h> /* for PM timer frequency */ 30#include <acpi/achware.h> /* for PM timer frequency */
31#include <acpi/acpi_bus.h>
31#endif 32#endif
32#include <asm/8253pit.h> 33#include <asm/8253pit.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
@@ -193,7 +194,7 @@ unsigned long profile_pc(struct pt_regs *regs)
193 is just accounted to the spinlock function. 194 is just accounted to the spinlock function.
194 Better would be to write these functions in assembler again 195 Better would be to write these functions in assembler again
195 and check exactly. */ 196 and check exactly. */
196 if (in_lock_functions(pc)) { 197 if (!user_mode(regs) && in_lock_functions(pc)) {
197 char *v = *(char **)regs->rsp; 198 char *v = *(char **)regs->rsp;
198 if ((v >= _stext && v <= _etext) || 199 if ((v >= _stext && v <= _etext) ||
199 (v >= _sinittext && v <= _einittext) || 200 (v >= _sinittext && v <= _einittext) ||
@@ -953,11 +954,18 @@ __cpuinit int unsynchronized_tsc(void)
953#ifdef CONFIG_SMP 954#ifdef CONFIG_SMP
954 if (apic_is_clustered_box()) 955 if (apic_is_clustered_box())
955 return 1; 956 return 1;
956 /* Intel systems are normally all synchronized. Exceptions
957 are handled in the check above. */
958 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
959 return 0;
960#endif 957#endif
958 /* Most intel systems have synchronized TSCs except for
959 multi node systems */
960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
961#ifdef CONFIG_ACPI
962 /* But TSC doesn't tick in C3 so don't use it there */
963 if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 100)
964 return 1;
965#endif
966 return 0;
967 }
968
961 /* Assume multi socket systems are not synchronized */ 969 /* Assume multi socket systems are not synchronized */
962 return num_present_cpus() > 1; 970 return num_present_cpus() > 1;
963} 971}
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index eb39a2775236..f7a9d1421078 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -254,7 +254,6 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
254{ 254{
255 const unsigned cpu = safe_smp_processor_id(); 255 const unsigned cpu = safe_smp_processor_id();
256 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; 256 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
257 int i = 11;
258 unsigned used = 0; 257 unsigned used = 0;
259 258
260 printk("\nCall Trace:\n"); 259 printk("\nCall Trace:\n");
@@ -275,11 +274,20 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
275 if (unwind_init_blocked(&info, tsk) == 0) 274 if (unwind_init_blocked(&info, tsk) == 0)
276 unw_ret = show_trace_unwind(&info, NULL); 275 unw_ret = show_trace_unwind(&info, NULL);
277 } 276 }
278 if (unw_ret > 0) { 277 if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
279 if (call_trace > 0) 278#ifdef CONFIG_STACK_UNWIND
279 unsigned long rip = info.regs.rip;
280 print_symbol("DWARF2 unwinder stuck at %s\n", rip);
281 if (call_trace == 1) {
282 printk("Leftover inexact backtrace:\n");
283 stack = (unsigned long *)info.regs.rsp;
284 } else if (call_trace > 1)
280 return; 285 return;
281 printk("Legacy call trace:"); 286 else
282 i = 18; 287 printk("Full inexact backtrace again:\n");
288#else
289 printk("Inexact backtrace:\n");
290#endif
283 } 291 }
284 } 292 }
285 293
@@ -1118,8 +1126,10 @@ static int __init call_trace_setup(char *s)
1118 call_trace = -1; 1126 call_trace = -1;
1119 else if (strcmp(s, "both") == 0) 1127 else if (strcmp(s, "both") == 0)
1120 call_trace = 0; 1128 call_trace = 0;
1121 else if (strcmp(s, "new") == 0) 1129 else if (strcmp(s, "newfallback") == 0)
1122 call_trace = 1; 1130 call_trace = 1;
1131 else if (strcmp(s, "new") == 0)
1132 call_trace = 2;
1123 return 1; 1133 return 1;
1124} 1134}
1125__setup("call_trace=", call_trace_setup); 1135__setup("call_trace=", call_trace_setup);
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index b50a7c7c47f8..3acf60ded2a0 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -2,7 +2,6 @@
2#include <linux/pci.h> 2#include <linux/pci.h>
3#include <asm/mpspec.h> 3#include <asm/mpspec.h>
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <asm/k8.h>
6 5
7/* 6/*
8 * This discovers the pcibus <-> node mapping on AMD K8. 7 * This discovers the pcibus <-> node mapping on AMD K8.
@@ -19,6 +18,7 @@
19#define NR_LDT_BUS_NUMBER_REGISTERS 3 18#define NR_LDT_BUS_NUMBER_REGISTERS 3
20#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF) 19#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF)
21#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF) 20#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF)
21#define PCI_DEVICE_ID_K8HTCONFIG 0x1100
22 22
23/** 23/**
24 * fill_mp_bus_to_cpumask() 24 * fill_mp_bus_to_cpumask()
@@ -28,7 +28,8 @@
28__init static int 28__init static int
29fill_mp_bus_to_cpumask(void) 29fill_mp_bus_to_cpumask(void)
30{ 30{
31 int i, j, k; 31 struct pci_dev *nb_dev = NULL;
32 int i, j;
32 u32 ldtbus, nid; 33 u32 ldtbus, nid;
33 static int lbnr[3] = { 34 static int lbnr[3] = {
34 LDT_BUS_NUMBER_REGISTER_0, 35 LDT_BUS_NUMBER_REGISTER_0,
@@ -36,9 +37,8 @@ fill_mp_bus_to_cpumask(void)
36 LDT_BUS_NUMBER_REGISTER_2 37 LDT_BUS_NUMBER_REGISTER_2
37 }; 38 };
38 39
39 cache_k8_northbridges(); 40 while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
40 for (k = 0; k < num_k8_northbridges; k++) { 41 PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) {
41 struct pci_dev *nb_dev = k8_northbridges[k];
42 pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid); 42 pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid);
43 43
44 for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) { 44 for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) {
diff --git a/block/blktrace.c b/block/blktrace.c
index b8c0702777ff..265f7a830619 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -80,7 +80,7 @@ static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC
80#define trace_sync_bit(rw) \ 80#define trace_sync_bit(rw) \
81 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) 81 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
82#define trace_ahead_bit(rw) \ 82#define trace_ahead_bit(rw) \
83 (((rw) & (1 << BIO_RW_AHEAD)) << (BIO_RW_AHEAD - 0)) 83 (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
84 84
85/* 85/*
86 * The worker for the various blk_add_trace*() types. Fills out a 86 * The worker for the various blk_add_trace*() types. Fills out a
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 102ebc2c5c34..aae3123bf3ee 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -936,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
936 * seeks. so allow a little bit of time for him to submit a new rq 936 * seeks. so allow a little bit of time for him to submit a new rq
937 */ 937 */
938 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 938 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
939 sl = 2; 939 sl = min(sl, msecs_to_jiffies(2));
940 940
941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
942 return 1; 942 return 1;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 1c4df22dfd2a..7b0eca703a67 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1233,6 +1233,50 @@ static inline void complete_buffers(struct bio *bio, int status)
1233 } 1233 }
1234} 1234}
1235 1235
1236static void cciss_check_queues(ctlr_info_t *h)
1237{
1238 int start_queue = h->next_to_run;
1239 int i;
1240
1241 /* check to see if we have maxed out the number of commands that can
1242 * be placed on the queue. If so then exit. We do this check here
1243 * in case the interrupt we serviced was from an ioctl and did not
1244 * free any new commands.
1245 */
1246 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1247 return;
1248
1249 /* We have room on the queue for more commands. Now we need to queue
1250 * them up. We will also keep track of the next queue to run so
1251 * that every queue gets a chance to be started first.
1252 */
1253 for (i = 0; i < h->highest_lun + 1; i++) {
1254 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1255 /* make sure the disk has been added and the drive is real
1256 * because this can be called from the middle of init_one.
1257 */
1258 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1259 continue;
1260 blk_start_queue(h->gendisk[curr_queue]->queue);
1261
1262 /* check to see if we have maxed out the number of commands
1263 * that can be placed on the queue.
1264 */
1265 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1266 if (curr_queue == start_queue) {
1267 h->next_to_run =
1268 (start_queue + 1) % (h->highest_lun + 1);
1269 break;
1270 } else {
1271 h->next_to_run = curr_queue;
1272 break;
1273 }
1274 } else {
1275 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1276 }
1277 }
1278}
1279
1236static void cciss_softirq_done(struct request *rq) 1280static void cciss_softirq_done(struct request *rq)
1237{ 1281{
1238 CommandList_struct *cmd = rq->completion_data; 1282 CommandList_struct *cmd = rq->completion_data;
@@ -1264,6 +1308,7 @@ static void cciss_softirq_done(struct request *rq)
1264 spin_lock_irqsave(&h->lock, flags); 1308 spin_lock_irqsave(&h->lock, flags);
1265 end_that_request_last(rq, rq->errors); 1309 end_that_request_last(rq, rq->errors);
1266 cmd_free(h, cmd, 1); 1310 cmd_free(h, cmd, 1);
1311 cciss_check_queues(h);
1267 spin_unlock_irqrestore(&h->lock, flags); 1312 spin_unlock_irqrestore(&h->lock, flags);
1268} 1313}
1269 1314
@@ -2528,8 +2573,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2528 CommandList_struct *c; 2573 CommandList_struct *c;
2529 unsigned long flags; 2574 unsigned long flags;
2530 __u32 a, a1, a2; 2575 __u32 a, a1, a2;
2531 int j;
2532 int start_queue = h->next_to_run;
2533 2576
2534 if (interrupt_not_for_us(h)) 2577 if (interrupt_not_for_us(h))
2535 return IRQ_NONE; 2578 return IRQ_NONE;
@@ -2588,45 +2631,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2588 } 2631 }
2589 } 2632 }
2590 2633
2591 /* check to see if we have maxed out the number of commands that can
2592 * be placed on the queue. If so then exit. We do this check here
2593 * in case the interrupt we serviced was from an ioctl and did not
2594 * free any new commands.
2595 */
2596 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2597 goto cleanup;
2598
2599 /* We have room on the queue for more commands. Now we need to queue
2600 * them up. We will also keep track of the next queue to run so
2601 * that every queue gets a chance to be started first.
2602 */
2603 for (j = 0; j < h->highest_lun + 1; j++) {
2604 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2605 /* make sure the disk has been added and the drive is real
2606 * because this can be called from the middle of init_one.
2607 */
2608 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
2609 continue;
2610 blk_start_queue(h->gendisk[curr_queue]->queue);
2611
2612 /* check to see if we have maxed out the number of commands
2613 * that can be placed on the queue.
2614 */
2615 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
2616 if (curr_queue == start_queue) {
2617 h->next_to_run =
2618 (start_queue + 1) % (h->highest_lun + 1);
2619 goto cleanup;
2620 } else {
2621 h->next_to_run = curr_queue;
2622 goto cleanup;
2623 }
2624 } else {
2625 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2626 }
2627 }
2628
2629 cleanup:
2630 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2634 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2631 return IRQ_HANDLED; 2635 return IRQ_HANDLED;
2632} 2636}
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 6a0c2230f82f..e2d4beac7420 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -67,6 +67,8 @@ static int ignore = 0;
67static int ignore_dga = 0; 67static int ignore_dga = 0;
68static int ignore_csr = 0; 68static int ignore_csr = 0;
69static int ignore_sniffer = 0; 69static int ignore_sniffer = 0;
70static int disable_scofix = 0;
71static int force_scofix = 0;
70static int reset = 0; 72static int reset = 0;
71 73
72#ifdef CONFIG_BT_HCIUSB_SCO 74#ifdef CONFIG_BT_HCIUSB_SCO
@@ -107,9 +109,12 @@ static struct usb_device_id blacklist_ids[] = {
107 { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE }, 109 { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE },
108 110
109 /* Broadcom BCM2035 */ 111 /* Broadcom BCM2035 */
110 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_BROKEN_ISOC }, 112 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
111 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, 113 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 },
112 114
115 /* IBM/Lenovo ThinkPad with Broadcom chip */
116 { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU },
117
113 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ 118 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */
114 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, 119 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET },
115 120
@@ -119,11 +124,13 @@ static struct usb_device_id blacklist_ids[] = {
119 /* ISSC Bluetooth Adapter v3.1 */ 124 /* ISSC Bluetooth Adapter v3.1 */
120 { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET }, 125 { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET },
121 126
122 /* RTX Telecom based adapter with buggy SCO support */ 127 /* RTX Telecom based adapters with buggy SCO support */
123 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, 128 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC },
129 { USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC },
124 130
125 /* Belkin F8T012 */ 131 /* Belkin F8T012 and F8T013 devices */
126 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU }, 132 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU },
133 { USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_WRONG_SCO_MTU },
127 134
128 /* Digianswer devices */ 135 /* Digianswer devices */
129 { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER }, 136 { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER },
@@ -990,8 +997,10 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id
990 if (reset || id->driver_info & HCI_RESET) 997 if (reset || id->driver_info & HCI_RESET)
991 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); 998 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
992 999
993 if (id->driver_info & HCI_WRONG_SCO_MTU) 1000 if (force_scofix || id->driver_info & HCI_WRONG_SCO_MTU) {
994 set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); 1001 if (!disable_scofix)
1002 set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
1003 }
995 1004
996 if (id->driver_info & HCI_SNIFFER) { 1005 if (id->driver_info & HCI_SNIFFER) {
997 if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) 1006 if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
@@ -1161,6 +1170,12 @@ MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001");
1161module_param(ignore_sniffer, bool, 0644); 1170module_param(ignore_sniffer, bool, 0644);
1162MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002"); 1171MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002");
1163 1172
1173module_param(disable_scofix, bool, 0644);
1174MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
1175
1176module_param(force_scofix, bool, 0644);
1177MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
1178
1164module_param(reset, bool, 0644); 1179module_param(reset, bool, 0644);
1165MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); 1180MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
1166 1181
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 17bc8abd5df5..00f574cbb0d4 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -1174,8 +1174,12 @@ static void dcd_change(MGSLPC_INFO *info)
1174 else 1174 else
1175 info->input_signal_events.dcd_down++; 1175 info->input_signal_events.dcd_down++;
1176#ifdef CONFIG_HDLC 1176#ifdef CONFIG_HDLC
1177 if (info->netcount) 1177 if (info->netcount) {
1178 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, info->netdev); 1178 if (info->serial_signals & SerialSignal_DCD)
1179 netif_carrier_on(info->netdev);
1180 else
1181 netif_carrier_off(info->netdev);
1182 }
1179#endif 1183#endif
1180 wake_up_interruptible(&info->status_event_wait_q); 1184 wake_up_interruptible(&info->status_event_wait_q);
1181 wake_up_interruptible(&info->event_wait_q); 1185 wake_up_interruptible(&info->event_wait_q);
@@ -4251,8 +4255,10 @@ static int hdlcdev_open(struct net_device *dev)
4251 spin_lock_irqsave(&info->lock, flags); 4255 spin_lock_irqsave(&info->lock, flags);
4252 get_signals(info); 4256 get_signals(info);
4253 spin_unlock_irqrestore(&info->lock, flags); 4257 spin_unlock_irqrestore(&info->lock, flags);
4254 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 4258 if (info->serial_signals & SerialSignal_DCD)
4255 4259 netif_carrier_on(dev);
4260 else
4261 netif_carrier_off(dev);
4256 return 0; 4262 return 0;
4257} 4263}
4258 4264
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index df782dd1098c..78b1b1a2732b 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -1344,8 +1344,12 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
1344 } else 1344 } else
1345 info->input_signal_events.dcd_down++; 1345 info->input_signal_events.dcd_down++;
1346#ifdef CONFIG_HDLC 1346#ifdef CONFIG_HDLC
1347 if (info->netcount) 1347 if (info->netcount) {
1348 hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev); 1348 if (status & MISCSTATUS_DCD)
1349 netif_carrier_on(info->netdev);
1350 else
1351 netif_carrier_off(info->netdev);
1352 }
1349#endif 1353#endif
1350 } 1354 }
1351 if (status & MISCSTATUS_CTS_LATCHED) 1355 if (status & MISCSTATUS_CTS_LATCHED)
@@ -7844,8 +7848,10 @@ static int hdlcdev_open(struct net_device *dev)
7844 spin_lock_irqsave(&info->irq_spinlock, flags); 7848 spin_lock_irqsave(&info->irq_spinlock, flags);
7845 usc_get_serial_signals(info); 7849 usc_get_serial_signals(info);
7846 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7850 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7847 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 7851 if (info->serial_signals & SerialSignal_DCD)
7848 7852 netif_carrier_on(dev);
7853 else
7854 netif_carrier_off(dev);
7849 return 0; 7855 return 0;
7850} 7856}
7851 7857
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index e829594195c1..b2dbbdb1bf81 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -1497,8 +1497,10 @@ static int hdlcdev_open(struct net_device *dev)
1497 spin_lock_irqsave(&info->lock, flags); 1497 spin_lock_irqsave(&info->lock, flags);
1498 get_signals(info); 1498 get_signals(info);
1499 spin_unlock_irqrestore(&info->lock, flags); 1499 spin_unlock_irqrestore(&info->lock, flags);
1500 hdlc_set_carrier(info->signals & SerialSignal_DCD, dev); 1500 if (info->signals & SerialSignal_DCD)
1501 1501 netif_carrier_on(dev);
1502 else
1503 netif_carrier_off(dev);
1502 return 0; 1504 return 0;
1503} 1505}
1504 1506
@@ -1997,8 +1999,12 @@ static void dcd_change(struct slgt_info *info)
1997 info->input_signal_events.dcd_down++; 1999 info->input_signal_events.dcd_down++;
1998 } 2000 }
1999#ifdef CONFIG_HDLC 2001#ifdef CONFIG_HDLC
2000 if (info->netcount) 2002 if (info->netcount) {
2001 hdlc_set_carrier(info->signals & SerialSignal_DCD, info->netdev); 2003 if (info->signals & SerialSignal_DCD)
2004 netif_carrier_on(info->netdev);
2005 else
2006 netif_carrier_off(info->netdev);
2007 }
2002#endif 2008#endif
2003 wake_up_interruptible(&info->status_event_wait_q); 2009 wake_up_interruptible(&info->status_event_wait_q);
2004 wake_up_interruptible(&info->event_wait_q); 2010 wake_up_interruptible(&info->event_wait_q);
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 1e443a233f51..66f3754fbbdf 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -1752,8 +1752,10 @@ static int hdlcdev_open(struct net_device *dev)
1752 spin_lock_irqsave(&info->lock, flags); 1752 spin_lock_irqsave(&info->lock, flags);
1753 get_signals(info); 1753 get_signals(info);
1754 spin_unlock_irqrestore(&info->lock, flags); 1754 spin_unlock_irqrestore(&info->lock, flags);
1755 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 1755 if (info->serial_signals & SerialSignal_DCD)
1756 1756 netif_carrier_on(dev);
1757 else
1758 netif_carrier_off(dev);
1757 return 0; 1759 return 0;
1758} 1760}
1759 1761
@@ -2522,8 +2524,12 @@ void isr_io_pin( SLMP_INFO *info, u16 status )
2522 } else 2524 } else
2523 info->input_signal_events.dcd_down++; 2525 info->input_signal_events.dcd_down++;
2524#ifdef CONFIG_HDLC 2526#ifdef CONFIG_HDLC
2525 if (info->netcount) 2527 if (info->netcount) {
2526 hdlc_set_carrier(status & SerialSignal_DCD, info->netdev); 2528 if (status & SerialSignal_DCD)
2529 netif_carrier_on(info->netdev);
2530 else
2531 netif_carrier_off(info->netdev);
2532 }
2527#endif 2533#endif
2528 } 2534 }
2529 if (status & MISCSTATUS_CTS_LATCHED) 2535 if (status & MISCSTATUS_CTS_LATCHED)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8d328186f774..bc1088d9b379 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -364,10 +364,12 @@ static ssize_t store_##file_name \
364 if (ret != 1) \ 364 if (ret != 1) \
365 return -EINVAL; \ 365 return -EINVAL; \
366 \ 366 \
367 lock_cpu_hotplug(); \
367 mutex_lock(&policy->lock); \ 368 mutex_lock(&policy->lock); \
368 ret = __cpufreq_set_policy(policy, &new_policy); \ 369 ret = __cpufreq_set_policy(policy, &new_policy); \
369 policy->user_policy.object = policy->object; \ 370 policy->user_policy.object = policy->object; \
370 mutex_unlock(&policy->lock); \ 371 mutex_unlock(&policy->lock); \
372 unlock_cpu_hotplug(); \
371 \ 373 \
372 return ret ? ret : count; \ 374 return ret ? ret : count; \
373} 375}
@@ -1197,20 +1199,18 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1197 *********************************************************************/ 1199 *********************************************************************/
1198 1200
1199 1201
1202/* Must be called with lock_cpu_hotplug held */
1200int __cpufreq_driver_target(struct cpufreq_policy *policy, 1203int __cpufreq_driver_target(struct cpufreq_policy *policy,
1201 unsigned int target_freq, 1204 unsigned int target_freq,
1202 unsigned int relation) 1205 unsigned int relation)
1203{ 1206{
1204 int retval = -EINVAL; 1207 int retval = -EINVAL;
1205 1208
1206 lock_cpu_hotplug();
1207 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1209 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1208 target_freq, relation); 1210 target_freq, relation);
1209 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1211 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1210 retval = cpufreq_driver->target(policy, target_freq, relation); 1212 retval = cpufreq_driver->target(policy, target_freq, relation);
1211 1213
1212 unlock_cpu_hotplug();
1213
1214 return retval; 1214 return retval;
1215} 1215}
1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1225,17 +1225,23 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1225 if (!policy) 1225 if (!policy)
1226 return -EINVAL; 1226 return -EINVAL;
1227 1227
1228 lock_cpu_hotplug();
1228 mutex_lock(&policy->lock); 1229 mutex_lock(&policy->lock);
1229 1230
1230 ret = __cpufreq_driver_target(policy, target_freq, relation); 1231 ret = __cpufreq_driver_target(policy, target_freq, relation);
1231 1232
1232 mutex_unlock(&policy->lock); 1233 mutex_unlock(&policy->lock);
1234 unlock_cpu_hotplug();
1233 1235
1234 cpufreq_cpu_put(policy); 1236 cpufreq_cpu_put(policy);
1235 return ret; 1237 return ret;
1236} 1238}
1237EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1239EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1238 1240
1241/*
1242 * Locking: Must be called with the lock_cpu_hotplug() lock held
1243 * when "event" is CPUFREQ_GOV_LIMITS
1244 */
1239 1245
1240static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1246static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1241{ 1247{
@@ -1257,24 +1263,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1257} 1263}
1258 1264
1259 1265
1260int cpufreq_governor(unsigned int cpu, unsigned int event)
1261{
1262 int ret = 0;
1263 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1264
1265 if (!policy)
1266 return -EINVAL;
1267
1268 mutex_lock(&policy->lock);
1269 ret = __cpufreq_governor(policy, event);
1270 mutex_unlock(&policy->lock);
1271
1272 cpufreq_cpu_put(policy);
1273 return ret;
1274}
1275EXPORT_SYMBOL_GPL(cpufreq_governor);
1276
1277
1278int cpufreq_register_governor(struct cpufreq_governor *governor) 1266int cpufreq_register_governor(struct cpufreq_governor *governor)
1279{ 1267{
1280 struct cpufreq_governor *t; 1268 struct cpufreq_governor *t;
@@ -1342,6 +1330,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1342EXPORT_SYMBOL(cpufreq_get_policy); 1330EXPORT_SYMBOL(cpufreq_get_policy);
1343 1331
1344 1332
1333/*
1334 * Locking: Must be called with the lock_cpu_hotplug() lock held
1335 */
1345static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1336static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
1346{ 1337{
1347 int ret = 0; 1338 int ret = 0;
@@ -1436,6 +1427,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1436 if (!data) 1427 if (!data)
1437 return -EINVAL; 1428 return -EINVAL;
1438 1429
1430 lock_cpu_hotplug();
1431
1439 /* lock this CPU */ 1432 /* lock this CPU */
1440 mutex_lock(&data->lock); 1433 mutex_lock(&data->lock);
1441 1434
@@ -1446,6 +1439,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1446 data->user_policy.governor = data->governor; 1439 data->user_policy.governor = data->governor;
1447 1440
1448 mutex_unlock(&data->lock); 1441 mutex_unlock(&data->lock);
1442
1443 unlock_cpu_hotplug();
1449 cpufreq_cpu_put(data); 1444 cpufreq_cpu_put(data);
1450 1445
1451 return ret; 1446 return ret;
@@ -1469,6 +1464,7 @@ int cpufreq_update_policy(unsigned int cpu)
1469 if (!data) 1464 if (!data)
1470 return -ENODEV; 1465 return -ENODEV;
1471 1466
1467 lock_cpu_hotplug();
1472 mutex_lock(&data->lock); 1468 mutex_lock(&data->lock);
1473 1469
1474 dprintk("updating policy for CPU %u\n", cpu); 1470 dprintk("updating policy for CPU %u\n", cpu);
@@ -1494,7 +1490,7 @@ int cpufreq_update_policy(unsigned int cpu)
1494 ret = __cpufreq_set_policy(data, &policy); 1490 ret = __cpufreq_set_policy(data, &policy);
1495 1491
1496 mutex_unlock(&data->lock); 1492 mutex_unlock(&data->lock);
1497 1493 unlock_cpu_hotplug();
1498 cpufreq_cpu_put(data); 1494 cpufreq_cpu_put(data);
1499 return ret; 1495 return ret;
1500} 1496}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b3ebc8f01975..c4c578defabf 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -525,7 +525,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
525 break; 525 break;
526 526
527 case CPUFREQ_GOV_LIMITS: 527 case CPUFREQ_GOV_LIMITS:
528 lock_cpu_hotplug();
529 mutex_lock(&dbs_mutex); 528 mutex_lock(&dbs_mutex);
530 if (policy->max < this_dbs_info->cur_policy->cur) 529 if (policy->max < this_dbs_info->cur_policy->cur)
531 __cpufreq_driver_target( 530 __cpufreq_driver_target(
@@ -536,7 +535,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
536 this_dbs_info->cur_policy, 535 this_dbs_info->cur_policy,
537 policy->min, CPUFREQ_RELATION_L); 536 policy->min, CPUFREQ_RELATION_L);
538 mutex_unlock(&dbs_mutex); 537 mutex_unlock(&dbs_mutex);
539 unlock_cpu_hotplug();
540 break; 538 break;
541 } 539 }
542 return 0; 540 return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 87299924e735..52cf1f021825 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -239,6 +239,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
240 this_dbs_info->prev_cpu_wall); 240 this_dbs_info->prev_cpu_wall);
241 this_dbs_info->prev_cpu_wall = cur_jiffies; 241 this_dbs_info->prev_cpu_wall = cur_jiffies;
242 if (!total_ticks)
243 return;
242 /* 244 /*
243 * Every sampling_rate, we check, if current idle time is less 245 * Every sampling_rate, we check, if current idle time is less
244 * than 20% (default), then we try to increase frequency 246 * than 20% (default), then we try to increase frequency
@@ -304,7 +306,12 @@ static void do_dbs_timer(void *data)
304 unsigned int cpu = smp_processor_id(); 306 unsigned int cpu = smp_processor_id();
305 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 307 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
306 308
309 if (!dbs_info->enable)
310 return;
311
312 lock_cpu_hotplug();
307 dbs_check_cpu(dbs_info); 313 dbs_check_cpu(dbs_info);
314 unlock_cpu_hotplug();
308 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 315 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
309 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 316 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
310} 317}
@@ -319,11 +326,11 @@ static inline void dbs_timer_init(unsigned int cpu)
319 return; 326 return;
320} 327}
321 328
322static inline void dbs_timer_exit(unsigned int cpu) 329static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
323{ 330{
324 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 331 dbs_info->enable = 0;
325 332 cancel_delayed_work(&dbs_info->work);
326 cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work); 333 flush_workqueue(kondemand_wq);
327} 334}
328 335
329static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 336static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -396,8 +403,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
396 403
397 case CPUFREQ_GOV_STOP: 404 case CPUFREQ_GOV_STOP:
398 mutex_lock(&dbs_mutex); 405 mutex_lock(&dbs_mutex);
399 dbs_timer_exit(policy->cpu); 406 dbs_timer_exit(this_dbs_info);
400 this_dbs_info->enable = 0;
401 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 407 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
402 dbs_enable--; 408 dbs_enable--;
403 if (dbs_enable == 0) 409 if (dbs_enable == 0)
@@ -408,7 +414,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
408 break; 414 break;
409 415
410 case CPUFREQ_GOV_LIMITS: 416 case CPUFREQ_GOV_LIMITS:
411 lock_cpu_hotplug();
412 mutex_lock(&dbs_mutex); 417 mutex_lock(&dbs_mutex);
413 if (policy->max < this_dbs_info->cur_policy->cur) 418 if (policy->max < this_dbs_info->cur_policy->cur)
414 __cpufreq_driver_target(this_dbs_info->cur_policy, 419 __cpufreq_driver_target(this_dbs_info->cur_policy,
@@ -419,7 +424,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
419 policy->min, 424 policy->min,
420 CPUFREQ_RELATION_L); 425 CPUFREQ_RELATION_L);
421 mutex_unlock(&dbs_mutex); 426 mutex_unlock(&dbs_mutex);
422 unlock_cpu_hotplug();
423 break; 427 break;
424 } 428 }
425 return 0; 429 return 0;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 44ae5e5b94cf..a06c204589cd 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -18,6 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/cpufreq.h> 20#include <linux/cpufreq.h>
21#include <linux/cpu.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/fs.h> 23#include <linux/fs.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
@@ -70,6 +71,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
70 71
71 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
72 73
74 lock_cpu_hotplug();
73 mutex_lock(&userspace_mutex); 75 mutex_lock(&userspace_mutex);
74 if (!cpu_is_managed[policy->cpu]) 76 if (!cpu_is_managed[policy->cpu])
75 goto err; 77 goto err;
@@ -92,6 +94,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
92 94
93 err: 95 err:
94 mutex_unlock(&userspace_mutex); 96 mutex_unlock(&userspace_mutex);
97 unlock_cpu_hotplug();
95 return ret; 98 return ret;
96} 99}
97 100
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 78bf46d917b7..dbd4d6c3698e 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -828,7 +828,7 @@ static int __init ioat_init_module(void)
828 /* if forced, worst case is that rmmod hangs */ 828 /* if forced, worst case is that rmmod hangs */
829 __unsafe(THIS_MODULE); 829 __unsafe(THIS_MODULE);
830 830
831 return pci_module_init(&ioat_pci_drv); 831 return pci_register_driver(&ioat_pci_drv);
832} 832}
833 833
834module_init(ioat_init_module); 834module_init(ioat_init_module);
diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c
index 66d03f242d3c..1a159e8843ca 100644
--- a/drivers/fc4/fc.c
+++ b/drivers/fc4/fc.c
@@ -429,7 +429,7 @@ static inline void fcp_scsi_receive(fc_channel *fc, int token, int status, fc_hd
429 429
430 if (fcmd->data) { 430 if (fcmd->data) {
431 if (SCpnt->use_sg) 431 if (SCpnt->use_sg)
432 dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->buffer, 432 dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->request_buffer,
433 SCpnt->use_sg, 433 SCpnt->use_sg,
434 SCpnt->sc_data_direction); 434 SCpnt->sc_data_direction);
435 else 435 else
@@ -810,7 +810,7 @@ static int fcp_scsi_queue_it(fc_channel *fc, Scsi_Cmnd *SCpnt, fcp_cmnd *fcmd, i
810 SCpnt->request_bufflen, 810 SCpnt->request_bufflen,
811 SCpnt->sc_data_direction); 811 SCpnt->sc_data_direction);
812 } else { 812 } else {
813 struct scatterlist *sg = (struct scatterlist *)SCpnt->buffer; 813 struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
814 int nents; 814 int nents;
815 815
816 FCD(("XXX: Use_sg %d %d\n", SCpnt->use_sg, sg->length)) 816 FCD(("XXX: Use_sg %d %d\n", SCpnt->use_sg, sg->length))
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index f712e4cfd9dc..7cf3eb023521 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive)
776 * not available so we don't need to recheck that. 776 * not available so we don't need to recheck that.
777 */ 777 */
778 capacity = idedisk_capacity(drive); 778 capacity = idedisk_capacity(drive);
779 barrier = ide_id_has_flush_cache(id) && 779 barrier = ide_id_has_flush_cache(id) && !drive->noflush &&
780 (drive->addressing == 0 || capacity <= (1ULL << 28) || 780 (drive->addressing == 0 || capacity <= (1ULL << 28) ||
781 ide_id_has_flush_cache_ext(id)); 781 ide_id_has_flush_cache_ext(id));
782 782
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 98918fb6b2ce..7c3a13e1cf64 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -750,7 +750,7 @@ void ide_dma_verbose(ide_drive_t *drive)
750 goto bug_dma_off; 750 goto bug_dma_off;
751 printk(", DMA"); 751 printk(", DMA");
752 } else if (id->field_valid & 1) { 752 } else if (id->field_valid & 1) {
753 printk(", BUG"); 753 goto bug_dma_off;
754 } 754 }
755 return; 755 return;
756bug_dma_off: 756bug_dma_off:
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 05fbd9298db7..defd4b4bd374 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1539,7 +1539,7 @@ static int __init ide_setup(char *s)
1539 const char *hd_words[] = { 1539 const char *hd_words[] = {
1540 "none", "noprobe", "nowerr", "cdrom", "serialize", 1540 "none", "noprobe", "nowerr", "cdrom", "serialize",
1541 "autotune", "noautotune", "minus8", "swapdata", "bswap", 1541 "autotune", "noautotune", "minus8", "swapdata", "bswap",
1542 "minus11", "remap", "remap63", "scsi", NULL }; 1542 "noflush", "remap", "remap63", "scsi", NULL };
1543 unit = s[2] - 'a'; 1543 unit = s[2] - 'a';
1544 hw = unit / MAX_DRIVES; 1544 hw = unit / MAX_DRIVES;
1545 unit = unit % MAX_DRIVES; 1545 unit = unit % MAX_DRIVES;
@@ -1578,6 +1578,9 @@ static int __init ide_setup(char *s)
1578 case -10: /* "bswap" */ 1578 case -10: /* "bswap" */
1579 drive->bswap = 1; 1579 drive->bswap = 1;
1580 goto done; 1580 goto done;
1581 case -11: /* noflush */
1582 drive->noflush = 1;
1583 goto done;
1581 case -12: /* "remap" */ 1584 case -12: /* "remap" */
1582 drive->remap_0_to_1 = 1; 1585 drive->remap_0_to_1 = 1;
1583 goto done; 1586 goto done;
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 3cb04424d351..e9bad185968a 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -498,9 +498,14 @@ static int config_chipset_for_dma (ide_drive_t *drive)
498{ 498{
499 u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); 499 u8 speed = ide_dma_speed(drive, it821x_ratemask(drive));
500 500
501 config_it821x_chipset_for_pio(drive, !speed); 501 if (speed) {
502 it821x_tune_chipset(drive, speed); 502 config_it821x_chipset_for_pio(drive, 0);
503 return ide_dma_enable(drive); 503 it821x_tune_chipset(drive, speed);
504
505 return ide_dma_enable(drive);
506 }
507
508 return 0;
504} 509}
505 510
506/** 511/**
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5ed4dab52a6f..1c3cfbbe6a97 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -167,6 +167,15 @@ static int is_vendor_method_in_use(
167 return 0; 167 return 0;
168} 168}
169 169
170int ib_response_mad(struct ib_mad *mad)
171{
172 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
173 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
174 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
175 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
176}
177EXPORT_SYMBOL(ib_response_mad);
178
170/* 179/*
171 * ib_register_mad_agent - Register to send/receive MADs 180 * ib_register_mad_agent - Register to send/receive MADs
172 */ 181 */
@@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
570} 579}
571EXPORT_SYMBOL(ib_unregister_mad_agent); 580EXPORT_SYMBOL(ib_unregister_mad_agent);
572 581
573static inline int response_mad(struct ib_mad *mad)
574{
575 /* Trap represses are responses although response bit is reset */
576 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
577 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
578}
579
580static void dequeue_mad(struct ib_mad_list_head *mad_list) 582static void dequeue_mad(struct ib_mad_list_head *mad_list)
581{ 583{
582 struct ib_mad_queue *mad_queue; 584 struct ib_mad_queue *mad_queue;
@@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
723 switch (ret) 725 switch (ret)
724 { 726 {
725 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 727 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
726 if (response_mad(&mad_priv->mad.mad) && 728 if (ib_response_mad(&mad_priv->mad.mad) &&
727 mad_agent_priv->agent.recv_handler) { 729 mad_agent_priv->agent.recv_handler) {
728 local->mad_priv = mad_priv; 730 local->mad_priv = mad_priv;
729 local->recv_mad_agent = mad_agent_priv; 731 local->recv_mad_agent = mad_agent_priv;
@@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
1551 unsigned long flags; 1553 unsigned long flags;
1552 1554
1553 spin_lock_irqsave(&port_priv->reg_lock, flags); 1555 spin_lock_irqsave(&port_priv->reg_lock, flags);
1554 if (response_mad(mad)) { 1556 if (ib_response_mad(mad)) {
1555 u32 hi_tid; 1557 u32 hi_tid;
1556 struct ib_mad_agent_private *entry; 1558 struct ib_mad_agent_private *entry;
1557 1559
@@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1799 } 1801 }
1800 1802
1801 /* Complete corresponding request */ 1803 /* Complete corresponding request */
1802 if (response_mad(mad_recv_wc->recv_buf.mad)) { 1804 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1803 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1805 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1804 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 1806 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1805 if (!mad_send_wr) { 1807 if (!mad_send_wr) {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index afe70a549c2f..1273f8807e84 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -112,8 +112,10 @@ struct ib_umad_device {
112struct ib_umad_file { 112struct ib_umad_file {
113 struct ib_umad_port *port; 113 struct ib_umad_port *port;
114 struct list_head recv_list; 114 struct list_head recv_list;
115 struct list_head send_list;
115 struct list_head port_list; 116 struct list_head port_list;
116 spinlock_t recv_lock; 117 spinlock_t recv_lock;
118 spinlock_t send_lock;
117 wait_queue_head_t recv_wait; 119 wait_queue_head_t recv_wait;
118 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; 120 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
119 int agents_dead; 121 int agents_dead;
@@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file,
177 return ret; 179 return ret;
178} 180}
179 181
182static void dequeue_send(struct ib_umad_file *file,
183 struct ib_umad_packet *packet)
184 {
185 spin_lock_irq(&file->send_lock);
186 list_del(&packet->list);
187 spin_unlock_irq(&file->send_lock);
188 }
189
180static void send_handler(struct ib_mad_agent *agent, 190static void send_handler(struct ib_mad_agent *agent,
181 struct ib_mad_send_wc *send_wc) 191 struct ib_mad_send_wc *send_wc)
182{ 192{
183 struct ib_umad_file *file = agent->context; 193 struct ib_umad_file *file = agent->context;
184 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; 194 struct ib_umad_packet *packet = send_wc->send_buf->context[0];
185 195
196 dequeue_send(file, packet);
186 ib_destroy_ah(packet->msg->ah); 197 ib_destroy_ah(packet->msg->ah);
187 ib_free_send_mad(packet->msg); 198 ib_free_send_mad(packet->msg);
188 199
@@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
370 return 0; 381 return 0;
371} 382}
372 383
384static int same_destination(struct ib_user_mad_hdr *hdr1,
385 struct ib_user_mad_hdr *hdr2)
386{
387 if (!hdr1->grh_present && !hdr2->grh_present)
388 return (hdr1->lid == hdr2->lid);
389
390 if (hdr1->grh_present && hdr2->grh_present)
391 return !memcmp(hdr1->gid, hdr2->gid, 16);
392
393 return 0;
394}
395
396static int is_duplicate(struct ib_umad_file *file,
397 struct ib_umad_packet *packet)
398{
399 struct ib_umad_packet *sent_packet;
400 struct ib_mad_hdr *sent_hdr, *hdr;
401
402 hdr = (struct ib_mad_hdr *) packet->mad.data;
403 list_for_each_entry(sent_packet, &file->send_list, list) {
404 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
405
406 if ((hdr->tid != sent_hdr->tid) ||
407 (hdr->mgmt_class != sent_hdr->mgmt_class))
408 continue;
409
410 /*
411 * No need to be overly clever here. If two new operations have
412 * the same TID, reject the second as a duplicate. This is more
413 * restrictive than required by the spec.
414 */
415 if (!ib_response_mad((struct ib_mad *) hdr)) {
416 if (!ib_response_mad((struct ib_mad *) sent_hdr))
417 return 1;
418 continue;
419 } else if (!ib_response_mad((struct ib_mad *) sent_hdr))
420 continue;
421
422 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
423 return 1;
424 }
425
426 return 0;
427}
428
373static ssize_t ib_umad_write(struct file *filp, const char __user *buf, 429static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
374 size_t count, loff_t *pos) 430 size_t count, loff_t *pos)
375{ 431{
@@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
379 struct ib_ah_attr ah_attr; 435 struct ib_ah_attr ah_attr;
380 struct ib_ah *ah; 436 struct ib_ah *ah;
381 struct ib_rmpp_mad *rmpp_mad; 437 struct ib_rmpp_mad *rmpp_mad;
382 u8 method;
383 __be64 *tid; 438 __be64 *tid;
384 int ret, data_len, hdr_len, copy_offset, rmpp_active; 439 int ret, data_len, hdr_len, copy_offset, rmpp_active;
385 440
@@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
473 } 528 }
474 529
475 /* 530 /*
476 * If userspace is generating a request that will generate a 531 * Set the high-order part of the transaction ID to make MADs from
477 * response, we need to make sure the high-order part of the 532 * different agents unique, and allow routing responses back to the
478 * transaction ID matches the agent being used to send the 533 * original requestor.
479 * MAD.
480 */ 534 */
481 method = ((struct ib_mad_hdr *) packet->msg->mad)->method; 535 if (!ib_response_mad(packet->msg->mad)) {
482
483 if (!(method & IB_MGMT_METHOD_RESP) &&
484 method != IB_MGMT_METHOD_TRAP_REPRESS &&
485 method != IB_MGMT_METHOD_SEND) {
486 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; 536 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
487 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | 537 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
488 (be64_to_cpup(tid) & 0xffffffff)); 538 (be64_to_cpup(tid) & 0xffffffff));
539 rmpp_mad->mad_hdr.tid = *tid;
540 }
541
542 spin_lock_irq(&file->send_lock);
543 ret = is_duplicate(file, packet);
544 if (!ret)
545 list_add_tail(&packet->list, &file->send_list);
546 spin_unlock_irq(&file->send_lock);
547 if (ret) {
548 ret = -EINVAL;
549 goto err_msg;
489 } 550 }
490 551
491 ret = ib_post_send_mad(packet->msg, NULL); 552 ret = ib_post_send_mad(packet->msg, NULL);
492 if (ret) 553 if (ret)
493 goto err_msg; 554 goto err_send;
494 555
495 up_read(&file->port->mutex); 556 up_read(&file->port->mutex);
496 return count; 557 return count;
497 558
559err_send:
560 dequeue_send(file, packet);
498err_msg: 561err_msg:
499 ib_free_send_mad(packet->msg); 562 ib_free_send_mad(packet->msg);
500err_ah: 563err_ah:
@@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
657 } 720 }
658 721
659 spin_lock_init(&file->recv_lock); 722 spin_lock_init(&file->recv_lock);
723 spin_lock_init(&file->send_lock);
660 INIT_LIST_HEAD(&file->recv_list); 724 INIT_LIST_HEAD(&file->recv_list);
725 INIT_LIST_HEAD(&file->send_list);
661 init_waitqueue_head(&file->recv_wait); 726 init_waitqueue_head(&file->recv_wait);
662 727
663 file->port = port; 728 file->port = port;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index bdf5d5098190..30923eb68ec7 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -42,6 +42,13 @@
42 42
43#include "uverbs.h" 43#include "uverbs.h"
44 44
45static struct lock_class_key pd_lock_key;
46static struct lock_class_key mr_lock_key;
47static struct lock_class_key cq_lock_key;
48static struct lock_class_key qp_lock_key;
49static struct lock_class_key ah_lock_key;
50static struct lock_class_key srq_lock_key;
51
45#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 52#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
46 do { \ 53 do { \
47 (udata)->inbuf = (void __user *) (ibuf); \ 54 (udata)->inbuf = (void __user *) (ibuf); \
@@ -76,12 +83,13 @@
76 */ 83 */
77 84
78static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 85static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
79 struct ib_ucontext *context) 86 struct ib_ucontext *context, struct lock_class_key *key)
80{ 87{
81 uobj->user_handle = user_handle; 88 uobj->user_handle = user_handle;
82 uobj->context = context; 89 uobj->context = context;
83 kref_init(&uobj->ref); 90 kref_init(&uobj->ref);
84 init_rwsem(&uobj->mutex); 91 init_rwsem(&uobj->mutex);
92 lockdep_set_class(&uobj->mutex, key);
85 uobj->live = 0; 93 uobj->live = 0;
86} 94}
87 95
@@ -470,7 +478,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
470 if (!uobj) 478 if (!uobj)
471 return -ENOMEM; 479 return -ENOMEM;
472 480
473 init_uobj(uobj, 0, file->ucontext); 481 init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
474 down_write(&uobj->mutex); 482 down_write(&uobj->mutex);
475 483
476 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 484 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
@@ -591,7 +599,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
591 if (!obj) 599 if (!obj)
592 return -ENOMEM; 600 return -ENOMEM;
593 601
594 init_uobj(&obj->uobject, 0, file->ucontext); 602 init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key);
595 down_write(&obj->uobject.mutex); 603 down_write(&obj->uobject.mutex);
596 604
597 /* 605 /*
@@ -770,7 +778,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
770 if (!obj) 778 if (!obj)
771 return -ENOMEM; 779 return -ENOMEM;
772 780
773 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); 781 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
774 down_write(&obj->uobject.mutex); 782 down_write(&obj->uobject.mutex);
775 783
776 if (cmd.comp_channel >= 0) { 784 if (cmd.comp_channel >= 0) {
@@ -1051,13 +1059,14 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1051 if (!obj) 1059 if (!obj)
1052 return -ENOMEM; 1060 return -ENOMEM;
1053 1061
1054 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext); 1062 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
1055 down_write(&obj->uevent.uobject.mutex); 1063 down_write(&obj->uevent.uobject.mutex);
1056 1064
1065 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
1057 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1066 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1058 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); 1067 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext);
1059 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext); 1068 rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
1060 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; 1069 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext);
1061 1070
1062 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { 1071 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
1063 ret = -EINVAL; 1072 ret = -EINVAL;
@@ -1125,7 +1134,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1125 1134
1126 put_pd_read(pd); 1135 put_pd_read(pd);
1127 put_cq_read(scq); 1136 put_cq_read(scq);
1128 put_cq_read(rcq); 1137 if (rcq != scq)
1138 put_cq_read(rcq);
1129 if (srq) 1139 if (srq)
1130 put_srq_read(srq); 1140 put_srq_read(srq);
1131 1141
@@ -1150,7 +1160,7 @@ err_put:
1150 put_pd_read(pd); 1160 put_pd_read(pd);
1151 if (scq) 1161 if (scq)
1152 put_cq_read(scq); 1162 put_cq_read(scq);
1153 if (rcq) 1163 if (rcq && rcq != scq)
1154 put_cq_read(rcq); 1164 put_cq_read(rcq);
1155 if (srq) 1165 if (srq)
1156 put_srq_read(srq); 1166 put_srq_read(srq);
@@ -1751,7 +1761,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1751 if (!uobj) 1761 if (!uobj)
1752 return -ENOMEM; 1762 return -ENOMEM;
1753 1763
1754 init_uobj(uobj, cmd.user_handle, file->ucontext); 1764 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
1755 down_write(&uobj->mutex); 1765 down_write(&uobj->mutex);
1756 1766
1757 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1767 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
@@ -1775,7 +1785,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1775 ah = ib_create_ah(pd, &attr); 1785 ah = ib_create_ah(pd, &attr);
1776 if (IS_ERR(ah)) { 1786 if (IS_ERR(ah)) {
1777 ret = PTR_ERR(ah); 1787 ret = PTR_ERR(ah);
1778 goto err; 1788 goto err_put;
1779 } 1789 }
1780 1790
1781 ah->uobject = uobj; 1791 ah->uobject = uobj;
@@ -1811,6 +1821,9 @@ err_copy:
1811err_destroy: 1821err_destroy:
1812 ib_destroy_ah(ah); 1822 ib_destroy_ah(ah);
1813 1823
1824err_put:
1825 put_pd_read(pd);
1826
1814err: 1827err:
1815 put_uobj_write(uobj); 1828 put_uobj_write(uobj);
1816 return ret; 1829 return ret;
@@ -1963,7 +1976,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1963 if (!obj) 1976 if (!obj)
1964 return -ENOMEM; 1977 return -ENOMEM;
1965 1978
1966 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); 1979 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
1967 down_write(&obj->uobject.mutex); 1980 down_write(&obj->uobject.mutex);
1968 1981
1969 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1982 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
@@ -1984,7 +1997,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1984 srq = pd->device->create_srq(pd, &attr, &udata); 1997 srq = pd->device->create_srq(pd, &attr, &udata);
1985 if (IS_ERR(srq)) { 1998 if (IS_ERR(srq)) {
1986 ret = PTR_ERR(srq); 1999 ret = PTR_ERR(srq);
1987 goto err; 2000 goto err_put;
1988 } 2001 }
1989 2002
1990 srq->device = pd->device; 2003 srq->device = pd->device;
@@ -2029,6 +2042,9 @@ err_copy:
2029err_destroy: 2042err_destroy:
2030 ib_destroy_srq(srq); 2043 ib_destroy_srq(srq);
2031 2044
2045err_put:
2046 put_pd_read(pd);
2047
2032err: 2048err:
2033 put_uobj_write(&obj->uobject); 2049 put_uobj_write(&obj->uobject);
2034 return ret; 2050 return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 823131d58b34..f98518d912b5 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -859,6 +859,38 @@ static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
859 __ipath_layer_rcv_lid(dd, hdr); 859 __ipath_layer_rcv_lid(dd, hdr);
860} 860}
861 861
862static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
863 u32 eflags,
864 u32 l,
865 u32 etail,
866 u64 *rc)
867{
868 char emsg[128];
869 struct ipath_message_header *hdr;
870
871 get_rhf_errstring(eflags, emsg, sizeof emsg);
872 hdr = (struct ipath_message_header *)&rc[1];
873 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
874 "tlen=%x opcode=%x egridx=%x: %s\n",
875 eflags, l,
876 ipath_hdrget_rcv_type((__le32 *) rc),
877 ipath_hdrget_length_in_bytes((__le32 *) rc),
878 be32_to_cpu(hdr->bth[0]) >> 24,
879 etail, emsg);
880
881 /* Count local link integrity errors. */
882 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
883 u8 n = (dd->ipath_ibcctrl >>
884 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
885 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
886
887 if (++dd->ipath_lli_counter > n) {
888 dd->ipath_lli_counter = 0;
889 dd->ipath_lli_errors++;
890 }
891 }
892}
893
862/* 894/*
863 * ipath_kreceive - receive a packet 895 * ipath_kreceive - receive a packet
864 * @dd: the infinipath device 896 * @dd: the infinipath device
@@ -875,7 +907,6 @@ void ipath_kreceive(struct ipath_devdata *dd)
875 struct ipath_message_header *hdr; 907 struct ipath_message_header *hdr;
876 u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0; 908 u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
877 static u64 totcalls; /* stats, may eventually remove */ 909 static u64 totcalls; /* stats, may eventually remove */
878 char emsg[128];
879 910
880 if (!dd->ipath_hdrqtailptr) { 911 if (!dd->ipath_hdrqtailptr) {
881 ipath_dev_err(dd, 912 ipath_dev_err(dd,
@@ -938,26 +969,9 @@ reloop:
938 "%x\n", etype); 969 "%x\n", etype);
939 } 970 }
940 971
941 if (eflags & ~(INFINIPATH_RHF_H_TIDERR | 972 if (unlikely(eflags))
942 INFINIPATH_RHF_H_IHDRERR)) { 973 ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
943 get_rhf_errstring(eflags, emsg, sizeof emsg); 974 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
944 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
945 "tlen=%x opcode=%x egridx=%x: %s\n",
946 eflags, l, etype, tlen, bthbytes[0],
947 ipath_hdrget_index((__le32 *) rc), emsg);
948 /* Count local link integrity errors. */
949 if (eflags & (INFINIPATH_RHF_H_ICRCERR |
950 INFINIPATH_RHF_H_VCRCERR)) {
951 u8 n = (dd->ipath_ibcctrl >>
952 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
953 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
954
955 if (++dd->ipath_lli_counter > n) {
956 dd->ipath_lli_counter = 0;
957 dd->ipath_lli_errors++;
958 }
959 }
960 } else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
961 int ret = __ipath_verbs_rcv(dd, rc + 1, 975 int ret = __ipath_verbs_rcv(dd, rc + 1,
962 ebuf, tlen); 976 ebuf, tlen);
963 if (ret == -ENODEV) 977 if (ret == -ENODEV)
@@ -981,25 +995,7 @@ reloop:
981 else if (etype == RCVHQ_RCV_TYPE_EXPECTED) 995 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
982 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", 996 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
983 be32_to_cpu(hdr->bth[0]) & 0xff); 997 be32_to_cpu(hdr->bth[0]) & 0xff);
984 else if (eflags & (INFINIPATH_RHF_H_TIDERR | 998 else {
985 INFINIPATH_RHF_H_IHDRERR)) {
986 /*
987 * This is a type 3 packet, only the LRH is in the
988 * rcvhdrq, the rest of the header is in the eager
989 * buffer.
990 */
991 u8 opcode;
992 if (ebuf) {
993 bthbytes = (u8 *) ebuf;
994 opcode = *bthbytes;
995 }
996 else
997 opcode = 0;
998 get_rhf_errstring(eflags, emsg, sizeof emsg);
999 ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, "
1000 "len %x\n", eflags, emsg, opcode, etail,
1001 tlen);
1002 } else {
1003 /* 999 /*
1004 * error packet, type of error unknown. 1000 * error packet, type of error unknown.
1005 * Probably type 3, but we don't know, so don't 1001 * Probably type 3, but we don't know, so don't
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 46773c673a1a..a5ca279370aa 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -197,6 +197,21 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
197 size_t off; 197 size_t off;
198 int ret; 198 int ret;
199 199
200 /*
201 * We use RKEY == zero for physical addresses
202 * (see ipath_get_dma_mr).
203 */
204 if (rkey == 0) {
205 sge->mr = NULL;
206 sge->vaddr = phys_to_virt(vaddr);
207 sge->length = len;
208 sge->sge_length = len;
209 ss->sg_list = NULL;
210 ss->num_sge = 1;
211 ret = 1;
212 goto bail;
213 }
214
200 mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; 215 mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
201 if (unlikely(mr == NULL || mr->lkey != rkey)) { 216 if (unlikely(mr == NULL || mr->lkey != rkey)) {
202 ret = 0; 217 ret = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 56ac336dd1ec..d70a9b6b5239 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -191,10 +191,6 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
191{ 191{
192 struct ipath_sge *sge = &ss->sge; 192 struct ipath_sge *sge = &ss->sge;
193 193
194 while (length > sge->sge_length) {
195 length -= sge->sge_length;
196 ss->sge = *ss->sg_list++;
197 }
198 while (length) { 194 while (length) {
199 u32 len = sge->length; 195 u32 len = sge->length;
200 196
@@ -627,6 +623,7 @@ static int ipath_query_device(struct ib_device *ibdev,
627 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | 623 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
628 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 624 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
629 IB_DEVICE_SYS_IMAGE_GUID; 625 IB_DEVICE_SYS_IMAGE_GUID;
626 props->page_size_cap = PAGE_SIZE;
630 props->vendor_id = ipath_layer_get_vendorid(dev->dd); 627 props->vendor_id = ipath_layer_get_vendorid(dev->dd);
631 props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); 628 props->vendor_part_id = ipath_layer_get_deviceid(dev->dd);
632 props->hw_ver = ipath_layer_get_pcirev(dev->dd); 629 props->hw_ver = ipath_layer_get_pcirev(dev->dd);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index d0f7731802c9..deabc14b4ea4 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -778,11 +778,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
778 ((dev->fw_ver & 0xffff0000ull) >> 16) | 778 ((dev->fw_ver & 0xffff0000ull) >> 16) |
779 ((dev->fw_ver & 0x0000ffffull) << 16); 779 ((dev->fw_ver & 0x0000ffffull) << 16);
780 780
781 MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
782 dev->cmd.max_cmds = 1 << lg;
783
781 mthca_dbg(dev, "FW version %012llx, max commands %d\n", 784 mthca_dbg(dev, "FW version %012llx, max commands %d\n",
782 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); 785 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
783 786
784 MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
785 dev->cmd.max_cmds = 1 << lg;
786 MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET); 787 MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
787 MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET); 788 MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
788 789
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index fab417c5cf43..b60a9d79ae54 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -370,7 +370,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
370 return -EINVAL; 370 return -EINVAL;
371 371
372 if (attr_mask & IB_SRQ_LIMIT) { 372 if (attr_mask & IB_SRQ_LIMIT) {
373 if (attr->srq_limit > srq->max) 373 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
374 if (attr->srq_limit > max_wr)
374 return -EINVAL; 375 return -EINVAL;
375 376
376 mutex_lock(&srq->mutex); 377 mutex_lock(&srq->mutex);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 3f89f5e19036..474aa214ab57 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -212,6 +212,7 @@ struct ipoib_path {
212 212
213struct ipoib_neigh { 213struct ipoib_neigh {
214 struct ipoib_ah *ah; 214 struct ipoib_ah *ah;
215 union ib_gid dgid;
215 struct sk_buff_head queue; 216 struct sk_buff_head queue;
216 217
217 struct neighbour *neighbour; 218 struct neighbour *neighbour;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 1c6ea1c682a5..cf71d2a5515c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -404,6 +404,8 @@ static void path_rec_completion(int status,
404 list_for_each_entry(neigh, &path->neigh_list, list) { 404 list_for_each_entry(neigh, &path->neigh_list, list) {
405 kref_get(&path->ah->ref); 405 kref_get(&path->ah->ref);
406 neigh->ah = path->ah; 406 neigh->ah = path->ah;
407 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
408 sizeof(union ib_gid));
407 409
408 while ((skb = __skb_dequeue(&neigh->queue))) 410 while ((skb = __skb_dequeue(&neigh->queue)))
409 __skb_queue_tail(&skqueue, skb); 411 __skb_queue_tail(&skqueue, skb);
@@ -510,6 +512,8 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
510 if (path->ah) { 512 if (path->ah) {
511 kref_get(&path->ah->ref); 513 kref_get(&path->ah->ref);
512 neigh->ah = path->ah; 514 neigh->ah = path->ah;
515 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
516 sizeof(union ib_gid));
513 517
514 ipoib_send(dev, skb, path->ah, 518 ipoib_send(dev, skb, path->ah,
515 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 519 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
@@ -633,6 +637,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
633 neigh = *to_ipoib_neigh(skb->dst->neighbour); 637 neigh = *to_ipoib_neigh(skb->dst->neighbour);
634 638
635 if (likely(neigh->ah)) { 639 if (likely(neigh->ah)) {
640 if (unlikely(memcmp(&neigh->dgid.raw,
641 skb->dst->neighbour->ha + 4,
642 sizeof(union ib_gid)))) {
643 spin_lock(&priv->lock);
644 /*
645 * It's safe to call ipoib_put_ah() inside
646 * priv->lock here, because we know that
647 * path->ah will always hold one more reference,
648 * so ipoib_put_ah() will never do more than
649 * decrement the ref count.
650 */
651 ipoib_put_ah(neigh->ah);
652 list_del(&neigh->list);
653 ipoib_neigh_free(neigh);
654 spin_unlock(&priv->lock);
655 ipoib_path_lookup(skb, dev);
656 goto out;
657 }
658
636 ipoib_send(dev, skb, neigh->ah, 659 ipoib_send(dev, skb, neigh->ah,
637 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 660 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
638 goto out; 661 goto out;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ab40488182b3..b5e6a7be603d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -264,6 +264,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
264 if (!ah) { 264 if (!ah) {
265 ipoib_warn(priv, "ib_address_create failed\n"); 265 ipoib_warn(priv, "ib_address_create failed\n");
266 } else { 266 } else {
267 spin_lock_irq(&priv->lock);
268 mcast->ah = ah;
269 spin_unlock_irq(&priv->lock);
270
267 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT 271 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
268 " AV %p, LID 0x%04x, SL %d\n", 272 " AV %p, LID 0x%04x, SL %d\n",
269 IPOIB_GID_ARG(mcast->mcmember.mgid), 273 IPOIB_GID_ARG(mcast->mcmember.mgid),
@@ -271,10 +275,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
271 be16_to_cpu(mcast->mcmember.mlid), 275 be16_to_cpu(mcast->mcmember.mlid),
272 mcast->mcmember.sl); 276 mcast->mcmember.sl);
273 } 277 }
274
275 spin_lock_irq(&priv->lock);
276 mcast->ah = ah;
277 spin_unlock_irq(&priv->lock);
278 } 278 }
279 279
280 /* actually send any queued packets */ 280 /* actually send any queued packets */
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index bbc229852881..ea31d8470510 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -48,10 +48,8 @@ config FUSION_SAS
48 List of supported controllers: 48 List of supported controllers:
49 49
50 LSISAS1064 50 LSISAS1064
51 LSISAS1066
52 LSISAS1068 51 LSISAS1068
53 LSISAS1064E 52 LSISAS1064E
54 LSISAS1066E
55 LSISAS1068E 53 LSISAS1068E
56 54
57config FUSION_MAX_SGE 55config FUSION_MAX_SGE
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index b114236f4395..341691390e86 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -9,7 +9,6 @@
9#EXTRA_CFLAGS += -DMPT_DEBUG_EXIT 9#EXTRA_CFLAGS += -DMPT_DEBUG_EXIT
10#EXTRA_CFLAGS += -DMPT_DEBUG_FAIL 10#EXTRA_CFLAGS += -DMPT_DEBUG_FAIL
11 11
12
13# 12#
14# driver/module specifics... 13# driver/module specifics...
15# 14#
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 43308df64623..29d0635cce1d 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -436,8 +436,6 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
436 */ 436 */
437 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) { 437 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
438 freereq = 0; 438 freereq = 0;
439 devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p does not return Request frame\n",
440 ioc->name, pEvReply));
441 } else { 439 } else {
442 devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", 440 devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n",
443 ioc->name, pEvReply)); 441 ioc->name, pEvReply));
@@ -678,19 +676,19 @@ int
678mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx) 676mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx)
679{ 677{
680 MPT_ADAPTER *ioc; 678 MPT_ADAPTER *ioc;
679 const struct pci_device_id *id;
681 680
682 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) { 681 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
683 return -EINVAL; 682 return -EINVAL;
684 }
685 683
686 MptDeviceDriverHandlers[cb_idx] = dd_cbfunc; 684 MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
687 685
688 /* call per pci device probe entry point */ 686 /* call per pci device probe entry point */
689 list_for_each_entry(ioc, &ioc_list, list) { 687 list_for_each_entry(ioc, &ioc_list, list) {
690 if(dd_cbfunc->probe) { 688 id = ioc->pcidev->driver ?
691 dd_cbfunc->probe(ioc->pcidev, 689 ioc->pcidev->driver->id_table : NULL;
692 ioc->pcidev->driver->id_table); 690 if (dd_cbfunc->probe)
693 } 691 dd_cbfunc->probe(ioc->pcidev, id);
694 } 692 }
695 693
696 return 0; 694 return 0;
@@ -1056,9 +1054,8 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1056 1054
1057 dinitprintk((MYIOC_s_INFO_FMT 1055 dinitprintk((MYIOC_s_INFO_FMT
1058 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n", 1056 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
1059 ioc->name, 1057 ioc->name, ioc->HostPageBuffer,
1060 ioc->HostPageBuffer, 1058 (u32)ioc->HostPageBuffer_dma,
1061 ioc->HostPageBuffer_dma,
1062 host_page_buffer_sz)); 1059 host_page_buffer_sz));
1063 ioc->alloc_total += host_page_buffer_sz; 1060 ioc->alloc_total += host_page_buffer_sz;
1064 ioc->HostPageBuffer_sz = host_page_buffer_sz; 1061 ioc->HostPageBuffer_sz = host_page_buffer_sz;
@@ -1380,6 +1377,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1380 printk(KERN_WARNING MYNAM 1377 printk(KERN_WARNING MYNAM
1381 ": WARNING - %s did not initialize properly! (%d)\n", 1378 ": WARNING - %s did not initialize properly! (%d)\n",
1382 ioc->name, r); 1379 ioc->name, r);
1380
1383 list_del(&ioc->list); 1381 list_del(&ioc->list);
1384 if (ioc->alt_ioc) 1382 if (ioc->alt_ioc)
1385 ioc->alt_ioc->alt_ioc = NULL; 1383 ioc->alt_ioc->alt_ioc = NULL;
@@ -1762,9 +1760,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1762 * chips (mpt_adapter_disable, 1760 * chips (mpt_adapter_disable,
1763 * mpt_diag_reset) 1761 * mpt_diag_reset)
1764 */ 1762 */
1765 ioc->cached_fw = NULL;
1766 ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n", 1763 ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n",
1767 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw)); 1764 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
1765 ioc->alt_ioc->cached_fw = NULL;
1768 } 1766 }
1769 } else { 1767 } else {
1770 printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); 1768 printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
@@ -1885,7 +1883,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1885 /* FIXME? Examine results here? */ 1883 /* FIXME? Examine results here? */
1886 } 1884 }
1887 1885
1888out: 1886 out:
1889 if ((ret != 0) && irq_allocated) { 1887 if ((ret != 0) && irq_allocated) {
1890 free_irq(ioc->pci_irq, ioc); 1888 free_irq(ioc->pci_irq, ioc);
1891 if (mpt_msi_enable) 1889 if (mpt_msi_enable)
@@ -2670,6 +2668,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2670 dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", 2668 dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n",
2671 ioc->name, count)); 2669 ioc->name, count));
2672 2670
2671 ioc->aen_event_read_flag=0;
2673 return r; 2672 return r;
2674} 2673}
2675 2674
@@ -2737,6 +2736,8 @@ mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
2737 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) { 2736 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
2738 ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */ 2737 ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
2739 ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma; 2738 ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
2739 ioc->alloc_total += size;
2740 ioc->alt_ioc->alloc_total -= size;
2740 } else { 2741 } else {
2741 if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) ) 2742 if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) )
2742 ioc->alloc_total += size; 2743 ioc->alloc_total += size;
@@ -3166,6 +3167,7 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
3166static int 3167static int
3167mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) 3168mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3168{ 3169{
3170 MPT_ADAPTER *iocp=NULL;
3169 u32 diag0val; 3171 u32 diag0val;
3170 u32 doorbell; 3172 u32 doorbell;
3171 int hard_reset_done = 0; 3173 int hard_reset_done = 0;
@@ -3301,17 +3303,23 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3301 /* FIXME? Examine results here? */ 3303 /* FIXME? Examine results here? */
3302 } 3304 }
3303 3305
3304 if (ioc->cached_fw) { 3306 if (ioc->cached_fw)
3307 iocp = ioc;
3308 else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
3309 iocp = ioc->alt_ioc;
3310 if (iocp) {
3305 /* If the DownloadBoot operation fails, the 3311 /* If the DownloadBoot operation fails, the
3306 * IOC will be left unusable. This is a fatal error 3312 * IOC will be left unusable. This is a fatal error
3307 * case. _diag_reset will return < 0 3313 * case. _diag_reset will return < 0
3308 */ 3314 */
3309 for (count = 0; count < 30; count ++) { 3315 for (count = 0; count < 30; count ++) {
3310 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3316 diag0val = CHIPREG_READ32(&iocp->chip->Diagnostic);
3311 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) { 3317 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
3312 break; 3318 break;
3313 } 3319 }
3314 3320
3321 dprintk((MYIOC_s_INFO_FMT "cached_fw: diag0val=%x count=%d\n",
3322 iocp->name, diag0val, count));
3315 /* wait 1 sec */ 3323 /* wait 1 sec */
3316 if (sleepFlag == CAN_SLEEP) { 3324 if (sleepFlag == CAN_SLEEP) {
3317 msleep (1000); 3325 msleep (1000);
@@ -3320,7 +3328,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3320 } 3328 }
3321 } 3329 }
3322 if ((count = mpt_downloadboot(ioc, 3330 if ((count = mpt_downloadboot(ioc,
3323 (MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) { 3331 (MpiFwHeader_t *)iocp->cached_fw, sleepFlag)) < 0) {
3324 printk(KERN_WARNING MYNAM 3332 printk(KERN_WARNING MYNAM
3325 ": firmware downloadboot failure (%d)!\n", count); 3333 ": firmware downloadboot failure (%d)!\n", count);
3326 } 3334 }
@@ -3907,18 +3915,18 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3907 3915
3908 if (sleepFlag == CAN_SLEEP) { 3916 if (sleepFlag == CAN_SLEEP) {
3909 while (--cntdn) { 3917 while (--cntdn) {
3918 msleep (1);
3910 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3919 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
3911 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) 3920 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
3912 break; 3921 break;
3913 msleep (1);
3914 count++; 3922 count++;
3915 } 3923 }
3916 } else { 3924 } else {
3917 while (--cntdn) { 3925 while (--cntdn) {
3926 mdelay (1);
3918 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3927 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
3919 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) 3928 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
3920 break; 3929 break;
3921 mdelay (1);
3922 count++; 3930 count++;
3923 } 3931 }
3924 } 3932 }
@@ -4883,6 +4891,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
4883 pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma); 4891 pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
4884 if (!pIoc4) 4892 if (!pIoc4)
4885 return; 4893 return;
4894 ioc->alloc_total += iocpage4sz;
4886 } else { 4895 } else {
4887 ioc4_dma = ioc->spi_data.IocPg4_dma; 4896 ioc4_dma = ioc->spi_data.IocPg4_dma;
4888 iocpage4sz = ioc->spi_data.IocPg4Sz; 4897 iocpage4sz = ioc->spi_data.IocPg4Sz;
@@ -4899,6 +4908,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
4899 } else { 4908 } else {
4900 pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma); 4909 pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
4901 ioc->spi_data.pIocPg4 = NULL; 4910 ioc->spi_data.pIocPg4 = NULL;
4911 ioc->alloc_total -= iocpage4sz;
4902 } 4912 }
4903} 4913}
4904 4914
@@ -5030,19 +5040,18 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
5030 EventAck_t *pAck; 5040 EventAck_t *pAck;
5031 5041
5032 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5042 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5033 printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK " 5043 dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
5034 "request frame for Event=%x EventContext=%x EventData=%x!\n", 5044 ioc->name,__FUNCTION__));
5035 ioc->name, evnp->Event, le32_to_cpu(evnp->EventContext),
5036 le32_to_cpu(evnp->Data[0]));
5037 return -1; 5045 return -1;
5038 } 5046 }
5039 memset(pAck, 0, sizeof(*pAck));
5040 5047
5041 dprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name)); 5048 devtverboseprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name));
5042 5049
5043 pAck->Function = MPI_FUNCTION_EVENT_ACK; 5050 pAck->Function = MPI_FUNCTION_EVENT_ACK;
5044 pAck->ChainOffset = 0; 5051 pAck->ChainOffset = 0;
5052 pAck->Reserved[0] = pAck->Reserved[1] = 0;
5045 pAck->MsgFlags = 0; 5053 pAck->MsgFlags = 0;
5054 pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0;
5046 pAck->Event = evnp->Event; 5055 pAck->Event = evnp->Event;
5047 pAck->EventContext = evnp->EventContext; 5056 pAck->EventContext = evnp->EventContext;
5048 5057
@@ -5704,9 +5713,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5704 break; 5713 break;
5705 case MPI_EVENT_EVENT_CHANGE: 5714 case MPI_EVENT_EVENT_CHANGE:
5706 if (evData0) 5715 if (evData0)
5707 ds = "Events(ON) Change"; 5716 ds = "Events ON";
5708 else 5717 else
5709 ds = "Events(OFF) Change"; 5718 ds = "Events OFF";
5710 break; 5719 break;
5711 case MPI_EVENT_INTEGRATED_RAID: 5720 case MPI_EVENT_INTEGRATED_RAID:
5712 { 5721 {
@@ -5777,8 +5786,27 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5777 break; 5786 break;
5778 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 5787 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
5779 snprintf(evStr, EVENT_DESCR_STR_SZ, 5788 snprintf(evStr, EVENT_DESCR_STR_SZ,
5780 "SAS Device Status Change: No Persistancy " 5789 "SAS Device Status Change: No Persistancy: id=%d", id);
5781 "Added: id=%d", id); 5790 break;
5791 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
5792 snprintf(evStr, EVENT_DESCR_STR_SZ,
5793 "SAS Device Status Change: Internal Device Reset : id=%d", id);
5794 break;
5795 case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
5796 snprintf(evStr, EVENT_DESCR_STR_SZ,
5797 "SAS Device Status Change: Internal Task Abort : id=%d", id);
5798 break;
5799 case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
5800 snprintf(evStr, EVENT_DESCR_STR_SZ,
5801 "SAS Device Status Change: Internal Abort Task Set : id=%d", id);
5802 break;
5803 case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
5804 snprintf(evStr, EVENT_DESCR_STR_SZ,
5805 "SAS Device Status Change: Internal Clear Task Set : id=%d", id);
5806 break;
5807 case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
5808 snprintf(evStr, EVENT_DESCR_STR_SZ,
5809 "SAS Device Status Change: Internal Query Task : id=%d", id);
5782 break; 5810 break;
5783 default: 5811 default:
5784 snprintf(evStr, EVENT_DESCR_STR_SZ, 5812 snprintf(evStr, EVENT_DESCR_STR_SZ,
@@ -6034,7 +6062,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6034 * @ioc: Pointer to MPT_ADAPTER structure 6062 * @ioc: Pointer to MPT_ADAPTER structure
6035 * @log_info: U32 LogInfo reply word from the IOC 6063 * @log_info: U32 LogInfo reply word from the IOC
6036 * 6064 *
6037 * Refer to lsi/fc_log.h. 6065 * Refer to lsi/mpi_log_fc.h.
6038 */ 6066 */
6039static void 6067static void
6040mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info) 6068mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
@@ -6131,8 +6159,10 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
6131 "Invalid SAS Address", /* 01h */ 6159 "Invalid SAS Address", /* 01h */
6132 NULL, /* 02h */ 6160 NULL, /* 02h */
6133 "Invalid Page", /* 03h */ 6161 "Invalid Page", /* 03h */
6134 NULL, /* 04h */ 6162 "Diag Message Error", /* 04h */
6135 "Task Terminated" /* 05h */ 6163 "Task Terminated", /* 05h */
6164 "Enclosure Management", /* 06h */
6165 "Target Mode" /* 07h */
6136 }; 6166 };
6137 static char *pl_code_str[] = { 6167 static char *pl_code_str[] = {
6138 NULL, /* 00h */ 6168 NULL, /* 00h */
@@ -6158,7 +6188,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
6158 "IO Executed", /* 14h */ 6188 "IO Executed", /* 14h */
6159 "Persistant Reservation Out Not Affiliation Owner", /* 15h */ 6189 "Persistant Reservation Out Not Affiliation Owner", /* 15h */
6160 "Open Transmit DMA Abort", /* 16h */ 6190 "Open Transmit DMA Abort", /* 16h */
6161 NULL, /* 17h */ 6191 "IO Device Missing Delay Retry", /* 17h */
6162 NULL, /* 18h */ 6192 NULL, /* 18h */
6163 NULL, /* 19h */ 6193 NULL, /* 19h */
6164 NULL, /* 1Ah */ 6194 NULL, /* 1Ah */
@@ -6238,7 +6268,7 @@ static void
6238mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf) 6268mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
6239{ 6269{
6240 u32 status = ioc_status & MPI_IOCSTATUS_MASK; 6270 u32 status = ioc_status & MPI_IOCSTATUS_MASK;
6241 char *desc = ""; 6271 char *desc = NULL;
6242 6272
6243 switch (status) { 6273 switch (status) {
6244 case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */ 6274 case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
@@ -6348,7 +6378,7 @@ mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
6348 desc = "Others"; 6378 desc = "Others";
6349 break; 6379 break;
6350 } 6380 }
6351 if (desc != "") 6381 if (desc != NULL)
6352 printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc); 6382 printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc);
6353} 6383}
6354 6384
@@ -6386,7 +6416,6 @@ EXPORT_SYMBOL(mpt_alloc_fw_memory);
6386EXPORT_SYMBOL(mpt_free_fw_memory); 6416EXPORT_SYMBOL(mpt_free_fw_memory);
6387EXPORT_SYMBOL(mptbase_sas_persist_operation); 6417EXPORT_SYMBOL(mptbase_sas_persist_operation);
6388 6418
6389
6390/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6419/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6391/* 6420/*
6392 * fusion_init - Fusion MPT base driver initialization routine. 6421 * fusion_init - Fusion MPT base driver initialization routine.
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index a5ce10b67d02..d4cb144ab402 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -75,8 +75,8 @@
75#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 75#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
76#endif 76#endif
77 77
78#define MPT_LINUX_VERSION_COMMON "3.04.00" 78#define MPT_LINUX_VERSION_COMMON "3.04.01"
79#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.00" 79#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.01"
80#define WHAT_MAGIC_STRING "@" "(" "#" ")" 80#define WHAT_MAGIC_STRING "@" "(" "#" ")"
81 81
82#define show_mptmod_ver(s,ver) \ 82#define show_mptmod_ver(s,ver) \
@@ -307,8 +307,8 @@ typedef struct _SYSIF_REGS
307 u32 HostIndex; /* 50 Host Index register */ 307 u32 HostIndex; /* 50 Host Index register */
308 u32 Reserved4[15]; /* 54-8F */ 308 u32 Reserved4[15]; /* 54-8F */
309 u32 Fubar; /* 90 For Fubar usage */ 309 u32 Fubar; /* 90 For Fubar usage */
310 u32 Reserved5[1050];/* 94-10F8 */ 310 u32 Reserved5[1050];/* 94-10F8 */
311 u32 Reset_1078; /* 10FC Reset 1078 */ 311 u32 Reset_1078; /* 10FC Reset 1078 */
312} SYSIF_REGS; 312} SYSIF_REGS;
313 313
314/* 314/*
@@ -363,6 +363,7 @@ typedef struct _VirtDevice {
363#define MPT_TARGET_FLAGS_VALID_56 0x10 363#define MPT_TARGET_FLAGS_VALID_56 0x10
364#define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20 364#define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20
365#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40 365#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40
366#define MPT_TARGET_FLAGS_LED_ON 0x80
366 367
367/* 368/*
368 * /proc/mpt interface 369 * /proc/mpt interface
@@ -634,7 +635,6 @@ typedef struct _MPT_ADAPTER
634 u16 handle; 635 u16 handle;
635 int sas_index; /* index refrencing */ 636 int sas_index; /* index refrencing */
636 MPT_SAS_MGMT sas_mgmt; 637 MPT_SAS_MGMT sas_mgmt;
637 int num_ports;
638 struct work_struct sas_persist_task; 638 struct work_struct sas_persist_task;
639 639
640 struct work_struct fc_setup_reset_work; 640 struct work_struct fc_setup_reset_work;
@@ -644,7 +644,6 @@ typedef struct _MPT_ADAPTER
644 struct work_struct fc_rescan_work; 644 struct work_struct fc_rescan_work;
645 char fc_rescan_work_q_name[KOBJ_NAME_LEN]; 645 char fc_rescan_work_q_name[KOBJ_NAME_LEN];
646 struct workqueue_struct *fc_rescan_work_q; 646 struct workqueue_struct *fc_rescan_work_q;
647 u8 port_serial_number;
648} MPT_ADAPTER; 647} MPT_ADAPTER;
649 648
650/* 649/*
@@ -982,7 +981,7 @@ typedef struct _MPT_SCSI_HOST {
982 wait_queue_head_t scandv_waitq; 981 wait_queue_head_t scandv_waitq;
983 int scandv_wait_done; 982 int scandv_wait_done;
984 long last_queue_full; 983 long last_queue_full;
985 u8 mpt_pq_filter; 984 u16 tm_iocstatus;
986} MPT_SCSI_HOST; 985} MPT_SCSI_HOST;
987 986
988/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 987/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index b4967bb8a7d6..30975ccd9947 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2332,7 +2332,7 @@ done_free_mem:
2332} 2332}
2333 2333
2334/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2334/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2335/* Prototype Routine for the HP HOST INFO command. 2335/* Prototype Routine for the HOST INFO command.
2336 * 2336 *
2337 * Outputs: None. 2337 * Outputs: None.
2338 * Return: 0 if successful 2338 * Return: 0 if successful
@@ -2568,7 +2568,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2568} 2568}
2569 2569
2570/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2570/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2571/* Prototype Routine for the HP TARGET INFO command. 2571/* Prototype Routine for the TARGET INFO command.
2572 * 2572 *
2573 * Outputs: None. 2573 * Outputs: None.
2574 * Return: 0 if successful 2574 * Return: 0 if successful
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
index a2f8a97992e6..043941882c6e 100644
--- a/drivers/message/fusion/mptctl.h
+++ b/drivers/message/fusion/mptctl.h
@@ -354,9 +354,6 @@ struct mpt_ioctl_command32 {
354 354
355 355
356/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 356/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
357/*
358 * HP Specific IOCTL Defines and Structures
359 */
360 357
361#define CPQFCTS_IOC_MAGIC 'Z' 358#define CPQFCTS_IOC_MAGIC 'Z'
362#define HP_IOC_MAGIC 'Z' 359#define HP_IOC_MAGIC 'Z'
@@ -364,8 +361,6 @@ struct mpt_ioctl_command32 {
364#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t) 361#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t)
365#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t) 362#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t)
366 363
367/* All HP IOCTLs must include this header
368 */
369typedef struct _hp_header { 364typedef struct _hp_header {
370 unsigned int iocnum; 365 unsigned int iocnum;
371 unsigned int host; 366 unsigned int host;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index a8f2fa985455..90da7d63b08e 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -77,10 +77,6 @@ MODULE_DESCRIPTION(my_NAME);
77MODULE_LICENSE("GPL"); 77MODULE_LICENSE("GPL");
78 78
79/* Command line args */ 79/* Command line args */
80static int mpt_pq_filter = 0;
81module_param(mpt_pq_filter, int, 0);
82MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
83
84#define MPTFC_DEV_LOSS_TMO (60) 80#define MPTFC_DEV_LOSS_TMO (60)
85static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */ 81static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */
86module_param(mptfc_dev_loss_tmo, int, 0); 82module_param(mptfc_dev_loss_tmo, int, 0);
@@ -513,8 +509,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
513 509
514 if (vtarget->num_luns == 0) { 510 if (vtarget->num_luns == 0) {
515 vtarget->ioc_id = hd->ioc->id; 511 vtarget->ioc_id = hd->ioc->id;
516 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES | 512 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
517 MPT_TARGET_FLAGS_VALID_INQUIRY;
518 hd->Targets[sdev->id] = vtarget; 513 hd->Targets[sdev->id] = vtarget;
519 } 514 }
520 515
@@ -1129,13 +1124,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1129 hd->timer.data = (unsigned long) hd; 1124 hd->timer.data = (unsigned long) hd;
1130 hd->timer.function = mptscsih_timer_expired; 1125 hd->timer.function = mptscsih_timer_expired;
1131 1126
1132 hd->mpt_pq_filter = mpt_pq_filter;
1133
1134 ddvprintk((MYIOC_s_INFO_FMT
1135 "mpt_pq_filter %x\n",
1136 ioc->name,
1137 mpt_pq_filter));
1138
1139 init_waitqueue_head(&hd->scandv_waitq); 1127 init_waitqueue_head(&hd->scandv_waitq);
1140 hd->scandv_wait_done = 0; 1128 hd->scandv_wait_done = 0;
1141 hd->last_queue_full = 0; 1129 hd->last_queue_full = 0;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index f7bd8b11ed3b..f66f2203143a 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -67,20 +67,19 @@
67#define my_VERSION MPT_LINUX_VERSION_COMMON 67#define my_VERSION MPT_LINUX_VERSION_COMMON
68#define MYNAM "mptsas" 68#define MYNAM "mptsas"
69 69
70/*
71 * Reserved channel for integrated raid
72 */
73#define MPTSAS_RAID_CHANNEL 1
74
70MODULE_AUTHOR(MODULEAUTHOR); 75MODULE_AUTHOR(MODULEAUTHOR);
71MODULE_DESCRIPTION(my_NAME); 76MODULE_DESCRIPTION(my_NAME);
72MODULE_LICENSE("GPL"); 77MODULE_LICENSE("GPL");
73 78
74static int mpt_pq_filter;
75module_param(mpt_pq_filter, int, 0);
76MODULE_PARM_DESC(mpt_pq_filter,
77 "Enable peripheral qualifier filter: enable=1 "
78 "(default=0)");
79
80static int mpt_pt_clear; 79static int mpt_pt_clear;
81module_param(mpt_pt_clear, int, 0); 80module_param(mpt_pt_clear, int, 0);
82MODULE_PARM_DESC(mpt_pt_clear, 81MODULE_PARM_DESC(mpt_pt_clear,
83 "Clear persistency table: enable=1 " 82 " Clear persistency table: enable=1 "
84 "(default=MPTSCSIH_PT_CLEAR=0)"); 83 "(default=MPTSCSIH_PT_CLEAR=0)");
85 84
86static int mptsasDoneCtx = -1; 85static int mptsasDoneCtx = -1;
@@ -144,7 +143,6 @@ struct mptsas_devinfo {
144 * Specific details on ports, wide/narrow 143 * Specific details on ports, wide/narrow
145 */ 144 */
146struct mptsas_portinfo_details{ 145struct mptsas_portinfo_details{
147 u8 port_id; /* port number provided to transport */
148 u16 num_phys; /* number of phys belong to this port */ 146 u16 num_phys; /* number of phys belong to this port */
149 u64 phy_bitmask; /* TODO, extend support for 255 phys */ 147 u64 phy_bitmask; /* TODO, extend support for 255 phys */
150 struct sas_rphy *rphy; /* transport layer rphy object */ 148 struct sas_rphy *rphy; /* transport layer rphy object */
@@ -350,10 +348,10 @@ mptsas_port_delete(struct mptsas_portinfo_details * port_details)
350 port_info = port_details->port_info; 348 port_info = port_details->port_info;
351 phy_info = port_info->phy_info; 349 phy_info = port_info->phy_info;
352 350
353 dsaswideprintk((KERN_DEBUG "%s: [%p]: port=%02d num_phys=%02d " 351 dsaswideprintk((KERN_DEBUG "%s: [%p]: num_phys=%02d "
354 "bitmask=0x%016llX\n", 352 "bitmask=0x%016llX\n",
355 __FUNCTION__, port_details, port_details->port_id, 353 __FUNCTION__, port_details, port_details->num_phys,
356 port_details->num_phys, port_details->phy_bitmask)); 354 port_details->phy_bitmask));
357 355
358 for (i = 0; i < port_info->num_phys; i++, phy_info++) { 356 for (i = 0; i < port_info->num_phys; i++, phy_info++) {
359 if(phy_info->port_details != port_details) 357 if(phy_info->port_details != port_details)
@@ -462,9 +460,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
462 * phy be removed by firmware events. 460 * phy be removed by firmware events.
463 */ 461 */
464 dsaswideprintk((KERN_DEBUG 462 dsaswideprintk((KERN_DEBUG
465 "%s: [%p]: port=%d deleting phy = %d\n", 463 "%s: [%p]: deleting phy = %d\n",
466 __FUNCTION__, port_details, 464 __FUNCTION__, port_details, i));
467 port_details->port_id, i));
468 port_details->num_phys--; 465 port_details->num_phys--;
469 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); 466 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
470 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 467 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
@@ -493,7 +490,6 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
493 goto out; 490 goto out;
494 port_details->num_phys = 1; 491 port_details->num_phys = 1;
495 port_details->port_info = port_info; 492 port_details->port_info = port_info;
496 port_details->port_id = ioc->port_serial_number++;
497 if (phy_info->phy_id < 64 ) 493 if (phy_info->phy_id < 64 )
498 port_details->phy_bitmask |= 494 port_details->phy_bitmask |=
499 (1 << phy_info->phy_id); 495 (1 << phy_info->phy_id);
@@ -525,12 +521,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
525 mptsas_get_port(phy_info_cmp); 521 mptsas_get_port(phy_info_cmp);
526 port_details->starget = 522 port_details->starget =
527 mptsas_get_starget(phy_info_cmp); 523 mptsas_get_starget(phy_info_cmp);
528 port_details->port_id =
529 phy_info_cmp->port_details->port_id;
530 port_details->num_phys = 524 port_details->num_phys =
531 phy_info_cmp->port_details->num_phys; 525 phy_info_cmp->port_details->num_phys;
532// port_info->port_serial_number--;
533 ioc->port_serial_number--;
534 if (!phy_info_cmp->port_details->num_phys) 526 if (!phy_info_cmp->port_details->num_phys)
535 kfree(phy_info_cmp->port_details); 527 kfree(phy_info_cmp->port_details);
536 } else 528 } else
@@ -554,11 +546,11 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
554 if (!port_details) 546 if (!port_details)
555 continue; 547 continue;
556 dsaswideprintk((KERN_DEBUG 548 dsaswideprintk((KERN_DEBUG
557 "%s: [%p]: phy_id=%02d port_id=%02d num_phys=%02d " 549 "%s: [%p]: phy_id=%02d num_phys=%02d "
558 "bitmask=0x%016llX\n", 550 "bitmask=0x%016llX\n",
559 __FUNCTION__, 551 __FUNCTION__,
560 port_details, i, port_details->port_id, 552 port_details, i, port_details->num_phys,
561 port_details->num_phys, port_details->phy_bitmask)); 553 port_details->phy_bitmask));
562 dsaswideprintk((KERN_DEBUG"\t\tport = %p rphy=%p\n", 554 dsaswideprintk((KERN_DEBUG"\t\tport = %p rphy=%p\n",
563 port_details->port, port_details->rphy)); 555 port_details->port, port_details->rphy));
564 } 556 }
@@ -651,16 +643,13 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
651static int 643static int
652mptsas_slave_configure(struct scsi_device *sdev) 644mptsas_slave_configure(struct scsi_device *sdev)
653{ 645{
654 struct Scsi_Host *host = sdev->host;
655 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
656 646
657 /* 647 if (sdev->channel == MPTSAS_RAID_CHANNEL)
658 * RAID volumes placed beyond the last expected port. 648 goto out;
659 * Ignore sending sas mode pages in that case.. 649
660 */ 650 sas_read_port_mode_page(sdev);
661 if (sdev->channel < hd->ioc->num_ports)
662 sas_read_port_mode_page(sdev);
663 651
652 out:
664 return mptscsih_slave_configure(sdev); 653 return mptscsih_slave_configure(sdev);
665} 654}
666 655
@@ -689,10 +678,7 @@ mptsas_target_alloc(struct scsi_target *starget)
689 678
690 hd->Targets[target_id] = vtarget; 679 hd->Targets[target_id] = vtarget;
691 680
692 /* 681 if (starget->channel == MPTSAS_RAID_CHANNEL)
693 * RAID volumes placed beyond the last expected port.
694 */
695 if (starget->channel == hd->ioc->num_ports)
696 goto out; 682 goto out;
697 683
698 rphy = dev_to_rphy(starget->dev.parent); 684 rphy = dev_to_rphy(starget->dev.parent);
@@ -743,7 +729,7 @@ mptsas_target_destroy(struct scsi_target *starget)
743 if (!starget->hostdata) 729 if (!starget->hostdata)
744 return; 730 return;
745 731
746 if (starget->channel == hd->ioc->num_ports) 732 if (starget->channel == MPTSAS_RAID_CHANNEL)
747 goto out; 733 goto out;
748 734
749 rphy = dev_to_rphy(starget->dev.parent); 735 rphy = dev_to_rphy(starget->dev.parent);
@@ -783,10 +769,7 @@ mptsas_slave_alloc(struct scsi_device *sdev)
783 starget = scsi_target(sdev); 769 starget = scsi_target(sdev);
784 vdev->vtarget = starget->hostdata; 770 vdev->vtarget = starget->hostdata;
785 771
786 /* 772 if (sdev->channel == MPTSAS_RAID_CHANNEL)
787 * RAID volumes placed beyond the last expected port.
788 */
789 if (sdev->channel == hd->ioc->num_ports)
790 goto out; 773 goto out;
791 774
792 rphy = dev_to_rphy(sdev->sdev_target->dev.parent); 775 rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
@@ -1608,11 +1591,7 @@ static int mptsas_probe_one_phy(struct device *dev,
1608 if (phy_info->sas_port_add_phy) { 1591 if (phy_info->sas_port_add_phy) {
1609 1592
1610 if (!port) { 1593 if (!port) {
1611 port = sas_port_alloc(dev, 1594 port = sas_port_alloc_num(dev);
1612 phy_info->port_details->port_id);
1613 dsaswideprintk((KERN_DEBUG
1614 "sas_port_alloc: port=%p dev=%p port_id=%d\n",
1615 port, dev, phy_info->port_details->port_id));
1616 if (!port) { 1595 if (!port) {
1617 error = -ENOMEM; 1596 error = -ENOMEM;
1618 goto out; 1597 goto out;
@@ -1625,6 +1604,9 @@ static int mptsas_probe_one_phy(struct device *dev,
1625 goto out; 1604 goto out;
1626 } 1605 }
1627 mptsas_set_port(phy_info, port); 1606 mptsas_set_port(phy_info, port);
1607 dsaswideprintk((KERN_DEBUG
1608 "sas_port_alloc: port=%p dev=%p port_id=%d\n",
1609 port, dev, port->port_identifier));
1628 } 1610 }
1629 dsaswideprintk((KERN_DEBUG "sas_port_add_phy: phy_id=%d\n", 1611 dsaswideprintk((KERN_DEBUG "sas_port_add_phy: phy_id=%d\n",
1630 phy_info->phy_id)); 1612 phy_info->phy_id));
@@ -1736,7 +1718,6 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
1736 hba = NULL; 1718 hba = NULL;
1737 } 1719 }
1738 mutex_unlock(&ioc->sas_topology_mutex); 1720 mutex_unlock(&ioc->sas_topology_mutex);
1739 ioc->num_ports = port_info->num_phys;
1740 1721
1741 for (i = 0; i < port_info->num_phys; i++) { 1722 for (i = 0; i < port_info->num_phys; i++) {
1742 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], 1723 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
@@ -1939,7 +1920,8 @@ mptsas_delete_expander_phys(MPT_ADAPTER *ioc)
1939 expander_sas_address) 1920 expander_sas_address)
1940 continue; 1921 continue;
1941#ifdef MPT_DEBUG_SAS_WIDE 1922#ifdef MPT_DEBUG_SAS_WIDE
1942 dev_printk(KERN_DEBUG, &port->dev, "delete\n"); 1923 dev_printk(KERN_DEBUG, &port->dev,
1924 "delete port (%d)\n", port->port_identifier);
1943#endif 1925#endif
1944 sas_port_delete(port); 1926 sas_port_delete(port);
1945 mptsas_port_delete(phy_info->port_details); 1927 mptsas_port_delete(phy_info->port_details);
@@ -1984,7 +1966,7 @@ mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
1984 if (!ioc->raid_data.pIocPg2->NumActiveVolumes) 1966 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
1985 goto out; 1967 goto out;
1986 for (i=0; i<ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { 1968 for (i=0; i<ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
1987 scsi_add_device(ioc->sh, ioc->num_ports, 1969 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
1988 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); 1970 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
1989 } 1971 }
1990 out: 1972 out:
@@ -2185,7 +2167,8 @@ mptsas_hotplug_work(void *arg)
2185 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id); 2167 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
2186 2168
2187#ifdef MPT_DEBUG_SAS_WIDE 2169#ifdef MPT_DEBUG_SAS_WIDE
2188 dev_printk(KERN_DEBUG, &port->dev, "delete\n"); 2170 dev_printk(KERN_DEBUG, &port->dev,
2171 "delete port (%d)\n", port->port_identifier);
2189#endif 2172#endif
2190 sas_port_delete(port); 2173 sas_port_delete(port);
2191 mptsas_port_delete(phy_info->port_details); 2174 mptsas_port_delete(phy_info->port_details);
@@ -2289,35 +2272,26 @@ mptsas_hotplug_work(void *arg)
2289 mptsas_set_rphy(phy_info, rphy); 2272 mptsas_set_rphy(phy_info, rphy);
2290 break; 2273 break;
2291 case MPTSAS_ADD_RAID: 2274 case MPTSAS_ADD_RAID:
2292 sdev = scsi_device_lookup( 2275 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
2293 ioc->sh, 2276 ev->id, 0);
2294 ioc->num_ports,
2295 ev->id,
2296 0);
2297 if (sdev) { 2277 if (sdev) {
2298 scsi_device_put(sdev); 2278 scsi_device_put(sdev);
2299 break; 2279 break;
2300 } 2280 }
2301 printk(MYIOC_s_INFO_FMT 2281 printk(MYIOC_s_INFO_FMT
2302 "attaching raid volume, channel %d, id %d\n", 2282 "attaching raid volume, channel %d, id %d\n",
2303 ioc->name, ioc->num_ports, ev->id); 2283 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2304 scsi_add_device(ioc->sh, 2284 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
2305 ioc->num_ports,
2306 ev->id,
2307 0);
2308 mpt_findImVolumes(ioc); 2285 mpt_findImVolumes(ioc);
2309 break; 2286 break;
2310 case MPTSAS_DEL_RAID: 2287 case MPTSAS_DEL_RAID:
2311 sdev = scsi_device_lookup( 2288 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
2312 ioc->sh, 2289 ev->id, 0);
2313 ioc->num_ports,
2314 ev->id,
2315 0);
2316 if (!sdev) 2290 if (!sdev)
2317 break; 2291 break;
2318 printk(MYIOC_s_INFO_FMT 2292 printk(MYIOC_s_INFO_FMT
2319 "removing raid volume, channel %d, id %d\n", 2293 "removing raid volume, channel %d, id %d\n",
2320 ioc->name, ioc->num_ports, ev->id); 2294 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2321 vdevice = sdev->hostdata; 2295 vdevice = sdev->hostdata;
2322 vdevice->vtarget->deleted = 1; 2296 vdevice->vtarget->deleted = 1;
2323 mptsas_target_reset(ioc, vdevice->vtarget); 2297 mptsas_target_reset(ioc, vdevice->vtarget);
@@ -2723,7 +2697,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2723 hd->timer.data = (unsigned long) hd; 2697 hd->timer.data = (unsigned long) hd;
2724 hd->timer.function = mptscsih_timer_expired; 2698 hd->timer.function = mptscsih_timer_expired;
2725 2699
2726 hd->mpt_pq_filter = mpt_pq_filter;
2727 ioc->sas_data.ptClear = mpt_pt_clear; 2700 ioc->sas_data.ptClear = mpt_pt_clear;
2728 2701
2729 if (ioc->sas_data.ptClear==1) { 2702 if (ioc->sas_data.ptClear==1) {
@@ -2731,12 +2704,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2731 ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT); 2704 ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
2732 } 2705 }
2733 2706
2734 ddvprintk((MYIOC_s_INFO_FMT
2735 "mpt_pq_filter %x mpt_pq_filter %x\n",
2736 ioc->name,
2737 mpt_pq_filter,
2738 mpt_pq_filter));
2739
2740 init_waitqueue_head(&hd->scandv_waitq); 2707 init_waitqueue_head(&hd->scandv_waitq);
2741 hd->scandv_wait_done = 0; 2708 hd->scandv_wait_done = 0;
2742 hd->last_queue_full = 0; 2709 hd->last_queue_full = 0;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 8242b16e3168..30524dc54b16 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -66,6 +66,7 @@
66 66
67#include "mptbase.h" 67#include "mptbase.h"
68#include "mptscsih.h" 68#include "mptscsih.h"
69#include "lsi/mpi_log_sas.h"
69 70
70/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 71/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
71#define my_NAME "Fusion MPT SCSI Host driver" 72#define my_NAME "Fusion MPT SCSI Host driver"
@@ -127,7 +128,7 @@ static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
127static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); 128static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
128static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd); 129static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
129static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout ); 130static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
130static u32 SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc); 131static int SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc);
131 132
132static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout); 133static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
133 134
@@ -497,6 +498,34 @@ nextSGEset:
497 return SUCCESS; 498 return SUCCESS;
498} /* mptscsih_AddSGE() */ 499} /* mptscsih_AddSGE() */
499 500
501static void
502mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
503 U32 SlotStatus)
504{
505 MPT_FRAME_HDR *mf;
506 SEPRequest_t *SEPMsg;
507
508 if (ioc->bus_type == FC)
509 return;
510
511 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
512 dfailprintk((MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
513 ioc->name,__FUNCTION__));
514 return;
515 }
516
517 SEPMsg = (SEPRequest_t *)mf;
518 SEPMsg->Function = MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
519 SEPMsg->Bus = vtarget->bus_id;
520 SEPMsg->TargetID = vtarget->target_id;
521 SEPMsg->Action = MPI_SEP_REQ_ACTION_WRITE_STATUS;
522 SEPMsg->SlotStatus = SlotStatus;
523 devtverboseprintk((MYIOC_s_WARN_FMT
524 "Sending SEP cmd=%x id=%d bus=%d\n",
525 ioc->name, SlotStatus, SEPMsg->TargetID, SEPMsg->Bus));
526 mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
527}
528
500/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 529/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
501/* 530/*
502 * mptscsih_io_done - Main SCSI IO callback routine registered to 531 * mptscsih_io_done - Main SCSI IO callback routine registered to
@@ -520,6 +549,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
520 SCSIIORequest_t *pScsiReq; 549 SCSIIORequest_t *pScsiReq;
521 SCSIIOReply_t *pScsiReply; 550 SCSIIOReply_t *pScsiReply;
522 u16 req_idx, req_idx_MR; 551 u16 req_idx, req_idx_MR;
552 VirtDevice *vdev;
553 VirtTarget *vtarget;
523 554
524 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; 555 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
525 556
@@ -538,6 +569,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
538 } 569 }
539 570
540 sc = hd->ScsiLookup[req_idx]; 571 sc = hd->ScsiLookup[req_idx];
572 hd->ScsiLookup[req_idx] = NULL;
541 if (sc == NULL) { 573 if (sc == NULL) {
542 MPIHeader_t *hdr = (MPIHeader_t *)mf; 574 MPIHeader_t *hdr = (MPIHeader_t *)mf;
543 575
@@ -553,6 +585,12 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
553 return 1; 585 return 1;
554 } 586 }
555 587
588 if ((unsigned char *)mf != sc->host_scribble) {
589 mptscsih_freeChainBuffers(ioc, req_idx);
590 return 1;
591 }
592
593 sc->host_scribble = NULL;
556 sc->result = DID_OK << 16; /* Set default reply as OK */ 594 sc->result = DID_OK << 16; /* Set default reply as OK */
557 pScsiReq = (SCSIIORequest_t *) mf; 595 pScsiReq = (SCSIIORequest_t *) mf;
558 pScsiReply = (SCSIIOReply_t *) mr; 596 pScsiReply = (SCSIIOReply_t *) mr;
@@ -640,10 +678,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
640 678
641 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) 679 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
642 hd->sel_timeout[pScsiReq->TargetID]++; 680 hd->sel_timeout[pScsiReq->TargetID]++;
681
682 vdev = sc->device->hostdata;
683 if (!vdev)
684 break;
685 vtarget = vdev->vtarget;
686 if (vtarget->tflags & MPT_TARGET_FLAGS_LED_ON) {
687 mptscsih_issue_sep_command(ioc, vtarget,
688 MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED);
689 vtarget->tflags &= ~MPT_TARGET_FLAGS_LED_ON;
690 }
643 break; 691 break;
644 692
645 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
646 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ 693 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
694 if ( ioc->bus_type == SAS ) {
695 u16 ioc_status = le16_to_cpu(pScsiReply->IOCStatus);
696 if (ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
697 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
698 log_info &=SAS_LOGINFO_MASK;
699 if (log_info == SAS_LOGINFO_NEXUS_LOSS) {
700 sc->result = (DID_BUS_BUSY << 16);
701 break;
702 }
703 }
704 }
705
706 /*
707 * Allow non-SAS & non-NEXUS_LOSS to drop into below code
708 */
709
710 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
647 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ 711 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
648 /* Linux handles an unsolicited DID_RESET better 712 /* Linux handles an unsolicited DID_RESET better
649 * than an unsolicited DID_ABORT. 713 * than an unsolicited DID_ABORT.
@@ -658,7 +722,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
658 sc->result=DID_SOFT_ERROR << 16; 722 sc->result=DID_SOFT_ERROR << 16;
659 else /* Sufficient data transfer occurred */ 723 else /* Sufficient data transfer occurred */
660 sc->result = (DID_OK << 16) | scsi_status; 724 sc->result = (DID_OK << 16) | scsi_status;
661 dreplyprintk((KERN_NOTICE 725 dreplyprintk((KERN_NOTICE
662 "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id)); 726 "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id));
663 break; 727 break;
664 728
@@ -784,8 +848,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
784 sc->request_bufflen, sc->sc_data_direction); 848 sc->request_bufflen, sc->sc_data_direction);
785 } 849 }
786 850
787 hd->ScsiLookup[req_idx] = NULL;
788
789 sc->scsi_done(sc); /* Issue the command callback */ 851 sc->scsi_done(sc); /* Issue the command callback */
790 852
791 /* Free Chain buffers */ 853 /* Free Chain buffers */
@@ -827,9 +889,17 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
827 dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n", 889 dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n",
828 mf, SCpnt)); 890 mf, SCpnt));
829 891
892 /* Free Chain buffers */
893 mptscsih_freeChainBuffers(ioc, ii);
894
895 /* Free Message frames */
896 mpt_free_msg_frame(ioc, mf);
897
898 if ((unsigned char *)mf != SCpnt->host_scribble)
899 continue;
900
830 /* Set status, free OS resources (SG DMA buffers) 901 /* Set status, free OS resources (SG DMA buffers)
831 * Do OS callback 902 * Do OS callback
832 * Free driver resources (chain, msg buffers)
833 */ 903 */
834 if (SCpnt->use_sg) { 904 if (SCpnt->use_sg) {
835 pci_unmap_sg(ioc->pcidev, 905 pci_unmap_sg(ioc->pcidev,
@@ -845,12 +915,6 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
845 SCpnt->result = DID_RESET << 16; 915 SCpnt->result = DID_RESET << 16;
846 SCpnt->host_scribble = NULL; 916 SCpnt->host_scribble = NULL;
847 917
848 /* Free Chain buffers */
849 mptscsih_freeChainBuffers(ioc, ii);
850
851 /* Free Message frames */
852 mpt_free_msg_frame(ioc, mf);
853
854 SCpnt->scsi_done(SCpnt); /* Issue the command callback */ 918 SCpnt->scsi_done(SCpnt); /* Issue the command callback */
855 } 919 }
856 } 920 }
@@ -887,10 +951,10 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
887 if ((sc = hd->ScsiLookup[ii]) != NULL) { 951 if ((sc = hd->ScsiLookup[ii]) != NULL) {
888 952
889 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii); 953 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
890 954 if (mf == NULL)
955 continue;
891 dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n", 956 dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n",
892 hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1])); 957 hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1]));
893
894 if ((mf->TargetID != ((u8)vdevice->vtarget->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun))) 958 if ((mf->TargetID != ((u8)vdevice->vtarget->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun)))
895 continue; 959 continue;
896 960
@@ -899,6 +963,8 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
899 hd->ScsiLookup[ii] = NULL; 963 hd->ScsiLookup[ii] = NULL;
900 mptscsih_freeChainBuffers(hd->ioc, ii); 964 mptscsih_freeChainBuffers(hd->ioc, ii);
901 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf); 965 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
966 if ((unsigned char *)mf != sc->host_scribble)
967 continue;
902 if (sc->use_sg) { 968 if (sc->use_sg) {
903 pci_unmap_sg(hd->ioc->pcidev, 969 pci_unmap_sg(hd->ioc->pcidev,
904 (struct scatterlist *) sc->request_buffer, 970 (struct scatterlist *) sc->request_buffer,
@@ -1341,8 +1407,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1341 goto fail; 1407 goto fail;
1342 } 1408 }
1343 1409
1410 SCpnt->host_scribble = (unsigned char *)mf;
1344 hd->ScsiLookup[my_idx] = SCpnt; 1411 hd->ScsiLookup[my_idx] = SCpnt;
1345 SCpnt->host_scribble = NULL;
1346 1412
1347 mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf); 1413 mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf);
1348 dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n", 1414 dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n",
@@ -1529,6 +1595,12 @@ mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, in
1529 rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP); 1595 rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP);
1530 } 1596 }
1531 1597
1598 /*
1599 * Check IOCStatus from TM reply message
1600 */
1601 if (hd->tm_iocstatus != MPI_IOCSTATUS_SUCCESS)
1602 rc = FAILED;
1603
1532 dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc)); 1604 dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc));
1533 1605
1534 return rc; 1606 return rc;
@@ -1654,6 +1726,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1654 int scpnt_idx; 1726 int scpnt_idx;
1655 int retval; 1727 int retval;
1656 VirtDevice *vdev; 1728 VirtDevice *vdev;
1729 ulong sn = SCpnt->serial_number;
1657 1730
1658 /* If we can't locate our host adapter structure, return FAILED status. 1731 /* If we can't locate our host adapter structure, return FAILED status.
1659 */ 1732 */
@@ -1707,6 +1780,11 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1707 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun, 1780 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun,
1708 ctx2abort, mptscsih_get_tm_timeout(hd->ioc)); 1781 ctx2abort, mptscsih_get_tm_timeout(hd->ioc));
1709 1782
1783 if (SCPNT_TO_LOOKUP_IDX(SCpnt) == scpnt_idx &&
1784 SCpnt->serial_number == sn) {
1785 retval = FAILED;
1786 }
1787
1710 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n", 1788 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
1711 hd->ioc->name, 1789 hd->ioc->name,
1712 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1790 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
@@ -2023,6 +2101,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m
2023 DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply); 2101 DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply);
2024 2102
2025 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; 2103 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2104 hd->tm_iocstatus = iocstatus;
2026 dtmprintk((MYIOC_s_WARN_FMT " SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n", 2105 dtmprintk((MYIOC_s_WARN_FMT " SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n",
2027 ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo))); 2106 ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo)));
2028 /* Error? (anything non-zero?) */ 2107 /* Error? (anything non-zero?) */
@@ -2401,6 +2480,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2401 ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12]; 2480 ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12];
2402 2481
2403 ioc->eventContext++; 2482 ioc->eventContext++;
2483 if (hd->ioc->pcidev->vendor ==
2484 PCI_VENDOR_ID_IBM) {
2485 mptscsih_issue_sep_command(hd->ioc,
2486 vdev->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
2487 vdev->vtarget->tflags |=
2488 MPT_TARGET_FLAGS_LED_ON;
2489 }
2404 } 2490 }
2405 } 2491 }
2406 } else { 2492 } else {
@@ -2409,7 +2495,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2409 } 2495 }
2410} 2496}
2411 2497
2412static u32 2498static int
2413SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc) 2499SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc)
2414{ 2500{
2415 MPT_SCSI_HOST *hd; 2501 MPT_SCSI_HOST *hd;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 0a1ff762205f..e4cc3dd5fc9f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -83,10 +83,6 @@ static int mpt_saf_te = MPTSCSIH_SAF_TE;
83module_param(mpt_saf_te, int, 0); 83module_param(mpt_saf_te, int, 0);
84MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)"); 84MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)");
85 85
86static int mpt_pq_filter = 0;
87module_param(mpt_pq_filter, int, 0);
88MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
89
90static void mptspi_write_offset(struct scsi_target *, int); 86static void mptspi_write_offset(struct scsi_target *, int);
91static void mptspi_write_width(struct scsi_target *, int); 87static void mptspi_write_width(struct scsi_target *, int);
92static int mptspi_write_spi_device_pg1(struct scsi_target *, 88static int mptspi_write_spi_device_pg1(struct scsi_target *,
@@ -1047,14 +1043,12 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1047 hd->timer.function = mptscsih_timer_expired; 1043 hd->timer.function = mptscsih_timer_expired;
1048 1044
1049 ioc->spi_data.Saf_Te = mpt_saf_te; 1045 ioc->spi_data.Saf_Te = mpt_saf_te;
1050 hd->mpt_pq_filter = mpt_pq_filter;
1051 1046
1052 hd->negoNvram = MPT_SCSICFG_USE_NVRAM; 1047 hd->negoNvram = MPT_SCSICFG_USE_NVRAM;
1053 ddvprintk((MYIOC_s_INFO_FMT 1048 ddvprintk((MYIOC_s_INFO_FMT
1054 "saf_te %x mpt_pq_filter %x\n", 1049 "saf_te %x\n",
1055 ioc->name, 1050 ioc->name,
1056 mpt_saf_te, 1051 mpt_saf_te));
1057 mpt_pq_filter));
1058 ioc->spi_data.noQas = 0; 1052 ioc->spi_data.noQas = 0;
1059 1053
1060 init_waitqueue_head(&hd->scandv_waitq); 1054 init_waitqueue_head(&hd->scandv_waitq);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 36d511729f71..2146cf74425e 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -132,6 +132,7 @@ static int __init dummy_init_module(void)
132 for (i = 0; i < numdummies && !err; i++) 132 for (i = 0; i < numdummies && !err; i++)
133 err = dummy_init_one(i); 133 err = dummy_init_one(i);
134 if (err) { 134 if (err) {
135 i--;
135 while (--i >= 0) 136 while (--i >= 0)
136 dummy_free_one(i); 137 dummy_free_one(i);
137 } 138 }
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index f411bbb44f86..d304297c496c 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -110,6 +110,9 @@ struct e1000_adapter;
110#define E1000_MIN_RXD 80 110#define E1000_MIN_RXD 80
111#define E1000_MAX_82544_RXD 4096 111#define E1000_MAX_82544_RXD 4096
112 112
113/* this is the size past which hardware will drop packets when setting LPE=0 */
114#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
115
113/* Supported Rx Buffer Sizes */ 116/* Supported Rx Buffer Sizes */
114#define E1000_RXBUFFER_128 128 /* Used for packet split */ 117#define E1000_RXBUFFER_128 128 /* Used for packet split */
115#define E1000_RXBUFFER_256 256 /* Used for packet split */ 118#define E1000_RXBUFFER_256 256 /* Used for packet split */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6d3d41934503..da62db897426 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "7.1.9-k2"DRIVERNAPI 39#define DRV_VERSION "7.1.9-k4"DRIVERNAPI
40char e1000_driver_version[] = DRV_VERSION; 40char e1000_driver_version[] = DRV_VERSION;
41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -1068,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
1068 1068
1069 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1069 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1070 1070
1071 adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; 1071 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1072 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; 1072 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1073 hw->max_frame_size = netdev->mtu + 1073 hw->max_frame_size = netdev->mtu +
1074 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 1074 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
@@ -3148,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3148 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3148 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3149 3149
3150 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3150 /* adjust allocation if LPE protects us, and we aren't using SBP */
3151#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
3152 if (!adapter->hw.tbi_compatibility_on && 3151 if (!adapter->hw.tbi_compatibility_on &&
3153 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || 3152 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3154 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3153 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
@@ -3387,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3387 E1000_WRITE_REG(hw, IMC, ~0); 3386 E1000_WRITE_REG(hw, IMC, ~0);
3388 E1000_WRITE_FLUSH(hw); 3387 E1000_WRITE_FLUSH(hw);
3389 } 3388 }
3390 if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) 3389 if (likely(netif_rx_schedule_prep(netdev)))
3391 __netif_rx_schedule(&adapter->polling_netdev[0]); 3390 __netif_rx_schedule(netdev);
3392 else 3391 else
3393 e1000_irq_enable(adapter); 3392 e1000_irq_enable(adapter);
3394#else 3393#else
@@ -3431,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3431{ 3430{
3432 struct e1000_adapter *adapter; 3431 struct e1000_adapter *adapter;
3433 int work_to_do = min(*budget, poll_dev->quota); 3432 int work_to_do = min(*budget, poll_dev->quota);
3434 int tx_cleaned = 0, i = 0, work_done = 0; 3433 int tx_cleaned = 0, work_done = 0;
3435 3434
3436 /* Must NOT use netdev_priv macro here. */ 3435 /* Must NOT use netdev_priv macro here. */
3437 adapter = poll_dev->priv; 3436 adapter = poll_dev->priv;
3438 3437
3439 /* Keep link state information with original netdev */ 3438 /* Keep link state information with original netdev */
3440 if (!netif_carrier_ok(adapter->netdev)) 3439 if (!netif_carrier_ok(poll_dev))
3441 goto quit_polling; 3440 goto quit_polling;
3442 3441
3443 while (poll_dev != &adapter->polling_netdev[i]) { 3442 /* e1000_clean is called per-cpu. This lock protects
3444 i++; 3443 * tx_ring[0] from being cleaned by multiple cpus
3445 BUG_ON(i == adapter->num_rx_queues); 3444 * simultaneously. A failure obtaining the lock means
3445 * tx_ring[0] is currently being cleaned anyway. */
3446 if (spin_trylock(&adapter->tx_queue_lock)) {
3447 tx_cleaned = e1000_clean_tx_irq(adapter,
3448 &adapter->tx_ring[0]);
3449 spin_unlock(&adapter->tx_queue_lock);
3446 } 3450 }
3447 3451
3448 if (likely(adapter->num_tx_queues == 1)) { 3452 adapter->clean_rx(adapter, &adapter->rx_ring[0],
3449 /* e1000_clean is called per-cpu. This lock protects
3450 * tx_ring[0] from being cleaned by multiple cpus
3451 * simultaneously. A failure obtaining the lock means
3452 * tx_ring[0] is currently being cleaned anyway. */
3453 if (spin_trylock(&adapter->tx_queue_lock)) {
3454 tx_cleaned = e1000_clean_tx_irq(adapter,
3455 &adapter->tx_ring[0]);
3456 spin_unlock(&adapter->tx_queue_lock);
3457 }
3458 } else
3459 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3460
3461 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3462 &work_done, work_to_do); 3453 &work_done, work_to_do);
3463 3454
3464 *budget -= work_done; 3455 *budget -= work_done;
@@ -3466,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3466 3457
3467 /* If no Tx and not enough Rx work done, exit the polling mode */ 3458 /* If no Tx and not enough Rx work done, exit the polling mode */
3468 if ((!tx_cleaned && (work_done == 0)) || 3459 if ((!tx_cleaned && (work_done == 0)) ||
3469 !netif_running(adapter->netdev)) { 3460 !netif_running(poll_dev)) {
3470quit_polling: 3461quit_polling:
3471 netif_rx_complete(poll_dev); 3462 netif_rx_complete(poll_dev);
3472 e1000_irq_enable(adapter); 3463 e1000_irq_enable(adapter);
@@ -3681,6 +3672,9 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3681 3672
3682 length = le16_to_cpu(rx_desc->length); 3673 length = le16_to_cpu(rx_desc->length);
3683 3674
3675 /* adjust length to remove Ethernet CRC */
3676 length -= 4;
3677
3684 if (unlikely(!(status & E1000_RXD_STAT_EOP))) { 3678 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
3685 /* All receives must fit into a single buffer */ 3679 /* All receives must fit into a single buffer */
3686 E1000_DBG("%s: Receive packet consumed multiple" 3680 E1000_DBG("%s: Receive packet consumed multiple"
@@ -3885,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3885 pci_dma_sync_single_for_device(pdev, 3879 pci_dma_sync_single_for_device(pdev,
3886 ps_page_dma->ps_page_dma[0], 3880 ps_page_dma->ps_page_dma[0],
3887 PAGE_SIZE, PCI_DMA_FROMDEVICE); 3881 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3882 /* remove the CRC */
3883 l1 -= 4;
3888 skb_put(skb, l1); 3884 skb_put(skb, l1);
3889 length += l1;
3890 goto copydone; 3885 goto copydone;
3891 } /* if */ 3886 } /* if */
3892 } 3887 }
@@ -3905,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3905 skb->truesize += length; 3900 skb->truesize += length;
3906 } 3901 }
3907 3902
3903 /* strip the ethernet crc, problem is we're using pages now so
3904 * this whole operation can get a little cpu intensive */
3905 pskb_trim(skb, skb->len - 4);
3906
3908copydone: 3907copydone:
3909 e1000_rx_checksum(adapter, staterr, 3908 e1000_rx_checksum(adapter, staterr,
3910 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); 3909 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
@@ -4752,6 +4751,7 @@ static void
4752e1000_netpoll(struct net_device *netdev) 4751e1000_netpoll(struct net_device *netdev)
4753{ 4752{
4754 struct e1000_adapter *adapter = netdev_priv(netdev); 4753 struct e1000_adapter *adapter = netdev_priv(netdev);
4754
4755 disable_irq(adapter->pdev->irq); 4755 disable_irq(adapter->pdev->irq);
4756 e1000_intr(adapter->pdev->irq, netdev, NULL); 4756 e1000_intr(adapter->pdev->irq, netdev, NULL);
4757 e1000_clean_tx_irq(adapter, adapter->tx_ring); 4757 e1000_clean_tx_irq(adapter, adapter->tx_ring);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 3a42afab5036..43e3f33ed5e2 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -271,6 +271,7 @@ static int __init ifb_init_module(void)
271 for (i = 0; i < numifbs && !err; i++) 271 for (i = 0; i < numifbs && !err; i++)
272 err = ifb_init_one(i); 272 err = ifb_init_one(i);
273 if (err) { 273 if (err) {
274 i--;
274 while (--i >= 0) 275 while (--i >= 0)
275 ifb_free_one(i); 276 ifb_free_one(i);
276 } 277 }
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 07ca9480a6fe..c3e52c806b13 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -620,7 +620,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
620 return -ENXIO; 620 return -ENXIO;
621 } 621 }
622 dev_info(&mgp->pdev->dev, "handoff confirmed\n"); 622 dev_info(&mgp->pdev->dev, "handoff confirmed\n");
623 myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); 623 myri10ge_dummy_rdma(mgp, 1);
624 624
625 return 0; 625 return 0;
626} 626}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 82200bfaa8ed..7de9a07b2ac2 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev,
516/* Chip internal frequency for clock calculations */ 516/* Chip internal frequency for clock calculations */
517static inline u32 hwkhz(const struct skge_hw *hw) 517static inline u32 hwkhz(const struct skge_hw *hw)
518{ 518{
519 if (hw->chip_id == CHIP_ID_GENESIS) 519 return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
520 return 53215; /* or: 53.125 MHz */
521 else
522 return 78215; /* or: 78.125 MHz */
523} 520}
524 521
525/* Chip HZ to microseconds */ 522/* Chip HZ to microseconds */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d98f28c34e5c..de91609ca112 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.4" 53#define DRV_VERSION "1.5"
54#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
55 55
56/* 56/*
@@ -2204,9 +2204,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2204 int work_done = 0; 2204 int work_done = 0;
2205 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2205 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2206 2206
2207 if (!~status)
2208 goto out;
2209
2210 if (status & Y2_IS_HW_ERR) 2207 if (status & Y2_IS_HW_ERR)
2211 sky2_hw_intr(hw); 2208 sky2_hw_intr(hw);
2212 2209
@@ -2243,7 +2240,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2243 2240
2244 if (sky2_more_work(hw)) 2241 if (sky2_more_work(hw))
2245 return 1; 2242 return 1;
2246out: 2243
2247 netif_rx_complete(dev0); 2244 netif_rx_complete(dev0);
2248 2245
2249 sky2_read32(hw, B0_Y2_SP_LISR); 2246 sky2_read32(hw, B0_Y2_SP_LISR);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index fb1d5a8a45cf..647f62e9707d 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
84 * 84 *
85 * returns the content of the specified SMMIO register. 85 * returns the content of the specified SMMIO register.
86 */ 86 */
87static u32 87static inline u32
88spider_net_read_reg(struct spider_net_card *card, u32 reg) 88spider_net_read_reg(struct spider_net_card *card, u32 reg)
89{ 89{
90 u32 value; 90 u32 value;
@@ -101,7 +101,7 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg)
101 * @reg: register to write to 101 * @reg: register to write to
102 * @value: value to write into the specified SMMIO register 102 * @value: value to write into the specified SMMIO register
103 */ 103 */
104static void 104static inline void
105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) 105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
106{ 106{
107 value = cpu_to_le32(value); 107 value = cpu_to_le32(value);
@@ -259,39 +259,10 @@ spider_net_get_mac_address(struct net_device *netdev)
259 * 259 *
260 * returns the status as in the dmac_cmd_status field of the descriptor 260 * returns the status as in the dmac_cmd_status field of the descriptor
261 */ 261 */
262static enum spider_net_descr_status 262static inline int
263spider_net_get_descr_status(struct spider_net_descr *descr) 263spider_net_get_descr_status(struct spider_net_descr *descr)
264{ 264{
265 u32 cmd_status; 265 return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
266
267 cmd_status = descr->dmac_cmd_status;
268 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
269 /* no need to mask out any bits, as cmd_status is 32 bits wide only
270 * (and unsigned) */
271 return cmd_status;
272}
273
274/**
275 * spider_net_set_descr_status -- sets the status of a descriptor
276 * @descr: descriptor to change
277 * @status: status to set in the descriptor
278 *
279 * changes the status to the specified value. Doesn't change other bits
280 * in the status
281 */
282static void
283spider_net_set_descr_status(struct spider_net_descr *descr,
284 enum spider_net_descr_status status)
285{
286 u32 cmd_status;
287 /* read the status */
288 cmd_status = descr->dmac_cmd_status;
289 /* clean the upper 4 bits */
290 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
291 /* add the status to it */
292 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
293 /* and write it back */
294 descr->dmac_cmd_status = cmd_status;
295} 266}
296 267
297/** 268/**
@@ -328,24 +299,23 @@ spider_net_free_chain(struct spider_net_card *card,
328static int 299static int
329spider_net_init_chain(struct spider_net_card *card, 300spider_net_init_chain(struct spider_net_card *card,
330 struct spider_net_descr_chain *chain, 301 struct spider_net_descr_chain *chain,
331 struct spider_net_descr *start_descr, int no) 302 struct spider_net_descr *start_descr,
303 int direction, int no)
332{ 304{
333 int i; 305 int i;
334 struct spider_net_descr *descr; 306 struct spider_net_descr *descr;
335 dma_addr_t buf; 307 dma_addr_t buf;
336 308
337 atomic_set(&card->rx_chain_refill,0);
338
339 descr = start_descr; 309 descr = start_descr;
340 memset(descr, 0, sizeof(*descr) * no); 310 memset(descr, 0, sizeof(*descr) * no);
341 311
342 /* set up the hardware pointers in each descriptor */ 312 /* set up the hardware pointers in each descriptor */
343 for (i=0; i<no; i++, descr++) { 313 for (i=0; i<no; i++, descr++) {
344 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 314 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
345 315
346 buf = pci_map_single(card->pdev, descr, 316 buf = pci_map_single(card->pdev, descr,
347 SPIDER_NET_DESCR_SIZE, 317 SPIDER_NET_DESCR_SIZE,
348 PCI_DMA_BIDIRECTIONAL); 318 direction);
349 319
350 if (buf == DMA_ERROR_CODE) 320 if (buf == DMA_ERROR_CODE)
351 goto iommu_error; 321 goto iommu_error;
@@ -360,10 +330,11 @@ spider_net_init_chain(struct spider_net_card *card,
360 start_descr->prev = descr-1; 330 start_descr->prev = descr-1;
361 331
362 descr = start_descr; 332 descr = start_descr;
363 for (i=0; i < no; i++, descr++) { 333 if (direction == PCI_DMA_FROMDEVICE)
364 descr->next_descr_addr = descr->next->bus_addr; 334 for (i=0; i < no; i++, descr++)
365 } 335 descr->next_descr_addr = descr->next->bus_addr;
366 336
337 spin_lock_init(&chain->lock);
367 chain->head = start_descr; 338 chain->head = start_descr;
368 chain->tail = start_descr; 339 chain->tail = start_descr;
369 340
@@ -375,7 +346,7 @@ iommu_error:
375 if (descr->bus_addr) 346 if (descr->bus_addr)
376 pci_unmap_single(card->pdev, descr->bus_addr, 347 pci_unmap_single(card->pdev, descr->bus_addr,
377 SPIDER_NET_DESCR_SIZE, 348 SPIDER_NET_DESCR_SIZE,
378 PCI_DMA_BIDIRECTIONAL); 349 direction);
379 return -ENOMEM; 350 return -ENOMEM;
380} 351}
381 352
@@ -396,7 +367,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
396 dev_kfree_skb(descr->skb); 367 dev_kfree_skb(descr->skb);
397 pci_unmap_single(card->pdev, descr->buf_addr, 368 pci_unmap_single(card->pdev, descr->buf_addr,
398 SPIDER_NET_MAX_FRAME, 369 SPIDER_NET_MAX_FRAME,
399 PCI_DMA_BIDIRECTIONAL); 370 PCI_DMA_FROMDEVICE);
400 } 371 }
401 descr = descr->next; 372 descr = descr->next;
402 } 373 }
@@ -446,15 +417,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
446 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 417 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
447 /* io-mmu-map the skb */ 418 /* io-mmu-map the skb */
448 buf = pci_map_single(card->pdev, descr->skb->data, 419 buf = pci_map_single(card->pdev, descr->skb->data,
449 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); 420 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
450 descr->buf_addr = buf; 421 descr->buf_addr = buf;
451 if (buf == DMA_ERROR_CODE) { 422 if (buf == DMA_ERROR_CODE) {
452 dev_kfree_skb_any(descr->skb); 423 dev_kfree_skb_any(descr->skb);
453 if (netif_msg_rx_err(card) && net_ratelimit()) 424 if (netif_msg_rx_err(card) && net_ratelimit())
454 pr_err("Could not iommu-map rx buffer\n"); 425 pr_err("Could not iommu-map rx buffer\n");
455 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 426 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
456 } else { 427 } else {
457 descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED; 428 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
429 SPIDER_NET_DMAC_NOINTR_COMPLETE;
458 } 430 }
459 431
460 return error; 432 return error;
@@ -468,7 +440,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
468 * chip by writing to the appropriate register. DMA is enabled in 440 * chip by writing to the appropriate register. DMA is enabled in
469 * spider_net_enable_rxdmac. 441 * spider_net_enable_rxdmac.
470 */ 442 */
471static void 443static inline void
472spider_net_enable_rxchtails(struct spider_net_card *card) 444spider_net_enable_rxchtails(struct spider_net_card *card)
473{ 445{
474 /* assume chain is aligned correctly */ 446 /* assume chain is aligned correctly */
@@ -483,7 +455,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
483 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN 455 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
484 * in the GDADMACCNTR register 456 * in the GDADMACCNTR register
485 */ 457 */
486static void 458static inline void
487spider_net_enable_rxdmac(struct spider_net_card *card) 459spider_net_enable_rxdmac(struct spider_net_card *card)
488{ 460{
489 wmb(); 461 wmb();
@@ -500,23 +472,24 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
500static void 472static void
501spider_net_refill_rx_chain(struct spider_net_card *card) 473spider_net_refill_rx_chain(struct spider_net_card *card)
502{ 474{
503 struct spider_net_descr_chain *chain; 475 struct spider_net_descr_chain *chain = &card->rx_chain;
504 476 unsigned long flags;
505 chain = &card->rx_chain;
506 477
507 /* one context doing the refill (and a second context seeing that 478 /* one context doing the refill (and a second context seeing that
508 * and omitting it) is ok. If called by NAPI, we'll be called again 479 * and omitting it) is ok. If called by NAPI, we'll be called again
509 * as spider_net_decode_one_descr is called several times. If some 480 * as spider_net_decode_one_descr is called several times. If some
510 * interrupt calls us, the NAPI is about to clean up anyway. */ 481 * interrupt calls us, the NAPI is about to clean up anyway. */
511 if (atomic_inc_return(&card->rx_chain_refill) == 1) 482 if (!spin_trylock_irqsave(&chain->lock, flags))
512 while (spider_net_get_descr_status(chain->head) == 483 return;
513 SPIDER_NET_DESCR_NOT_IN_USE) { 484
514 if (spider_net_prepare_rx_descr(card, chain->head)) 485 while (spider_net_get_descr_status(chain->head) ==
515 break; 486 SPIDER_NET_DESCR_NOT_IN_USE) {
516 chain->head = chain->head->next; 487 if (spider_net_prepare_rx_descr(card, chain->head))
517 } 488 break;
489 chain->head = chain->head->next;
490 }
518 491
519 atomic_dec(&card->rx_chain_refill); 492 spin_unlock_irqrestore(&chain->lock, flags);
520} 493}
521 494
522/** 495/**
@@ -554,111 +527,6 @@ error:
554} 527}
555 528
556/** 529/**
557 * spider_net_release_tx_descr - processes a used tx descriptor
558 * @card: card structure
559 * @descr: descriptor to release
560 *
561 * releases a used tx descriptor (unmapping, freeing of skb)
562 */
563static void
564spider_net_release_tx_descr(struct spider_net_card *card,
565 struct spider_net_descr *descr)
566{
567 struct sk_buff *skb;
568
569 /* unmap the skb */
570 skb = descr->skb;
571 pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
572 PCI_DMA_BIDIRECTIONAL);
573
574 dev_kfree_skb_any(skb);
575
576 /* set status to not used */
577 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
578}
579
580/**
581 * spider_net_release_tx_chain - processes sent tx descriptors
582 * @card: adapter structure
583 * @brutal: if set, don't care about whether descriptor seems to be in use
584 *
585 * returns 0 if the tx ring is empty, otherwise 1.
586 *
587 * spider_net_release_tx_chain releases the tx descriptors that spider has
588 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
589 * If some other context is calling this function, we return 1 so that we're
590 * scheduled again (if we were scheduled) and will not loose initiative.
591 */
592static int
593spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
594{
595 struct spider_net_descr_chain *tx_chain = &card->tx_chain;
596 enum spider_net_descr_status status;
597
598 if (atomic_inc_return(&card->tx_chain_release) != 1) {
599 atomic_dec(&card->tx_chain_release);
600 return 1;
601 }
602
603 for (;;) {
604 status = spider_net_get_descr_status(tx_chain->tail);
605 switch (status) {
606 case SPIDER_NET_DESCR_CARDOWNED:
607 if (!brutal)
608 goto out;
609 /* fallthrough, if we release the descriptors
610 * brutally (then we don't care about
611 * SPIDER_NET_DESCR_CARDOWNED) */
612 case SPIDER_NET_DESCR_RESPONSE_ERROR:
613 case SPIDER_NET_DESCR_PROTECTION_ERROR:
614 case SPIDER_NET_DESCR_FORCE_END:
615 if (netif_msg_tx_err(card))
616 pr_err("%s: forcing end of tx descriptor "
617 "with status x%02x\n",
618 card->netdev->name, status);
619 card->netdev_stats.tx_dropped++;
620 break;
621
622 case SPIDER_NET_DESCR_COMPLETE:
623 card->netdev_stats.tx_packets++;
624 card->netdev_stats.tx_bytes +=
625 tx_chain->tail->skb->len;
626 break;
627
628 default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */
629 goto out;
630 }
631 spider_net_release_tx_descr(card, tx_chain->tail);
632 tx_chain->tail = tx_chain->tail->next;
633 }
634out:
635 atomic_dec(&card->tx_chain_release);
636
637 netif_wake_queue(card->netdev);
638
639 if (status == SPIDER_NET_DESCR_CARDOWNED)
640 return 1;
641 return 0;
642}
643
644/**
645 * spider_net_cleanup_tx_ring - cleans up the TX ring
646 * @card: card structure
647 *
648 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
649 * interrupts to cleanup our TX ring) and returns sent packets to the stack
650 * by freeing them
651 */
652static void
653spider_net_cleanup_tx_ring(struct spider_net_card *card)
654{
655 if ( (spider_net_release_tx_chain(card, 0)) &&
656 (card->netdev->flags & IFF_UP) ) {
657 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
658 }
659}
660
661/**
662 * spider_net_get_multicast_hash - generates hash for multicast filter table 530 * spider_net_get_multicast_hash - generates hash for multicast filter table
663 * @addr: multicast address 531 * @addr: multicast address
664 * 532 *
@@ -761,97 +629,6 @@ spider_net_disable_rxdmac(struct spider_net_card *card)
761} 629}
762 630
763/** 631/**
764 * spider_net_stop - called upon ifconfig down
765 * @netdev: interface device structure
766 *
767 * always returns 0
768 */
769int
770spider_net_stop(struct net_device *netdev)
771{
772 struct spider_net_card *card = netdev_priv(netdev);
773
774 tasklet_kill(&card->rxram_full_tl);
775 netif_poll_disable(netdev);
776 netif_carrier_off(netdev);
777 netif_stop_queue(netdev);
778 del_timer_sync(&card->tx_timer);
779
780 /* disable/mask all interrupts */
781 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
782 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
783 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
784
785 /* free_irq(netdev->irq, netdev);*/
786 free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
787
788 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
789 SPIDER_NET_DMA_TX_FEND_VALUE);
790
791 /* turn off DMA, force end */
792 spider_net_disable_rxdmac(card);
793
794 /* release chains */
795 spider_net_release_tx_chain(card, 1);
796
797 spider_net_free_chain(card, &card->tx_chain);
798 spider_net_free_chain(card, &card->rx_chain);
799
800 return 0;
801}
802
803/**
804 * spider_net_get_next_tx_descr - returns the next available tx descriptor
805 * @card: device structure to get descriptor from
806 *
807 * returns the address of the next descriptor, or NULL if not available.
808 */
809static struct spider_net_descr *
810spider_net_get_next_tx_descr(struct spider_net_card *card)
811{
812 /* check, if head points to not-in-use descr */
813 if ( spider_net_get_descr_status(card->tx_chain.head) ==
814 SPIDER_NET_DESCR_NOT_IN_USE ) {
815 return card->tx_chain.head;
816 } else {
817 return NULL;
818 }
819}
820
821/**
822 * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field
823 * @descr: descriptor structure to fill out
824 * @skb: packet to consider
825 *
826 * fills out the command and status field of the descriptor structure,
827 * depending on hardware checksum settings.
828 */
829static void
830spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
831 struct sk_buff *skb)
832{
833 /* make sure the other fields in the descriptor are written */
834 wmb();
835
836 if (skb->ip_summed != CHECKSUM_HW) {
837 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
838 return;
839 }
840
841 /* is packet ip?
842 * if yes: tcp? udp? */
843 if (skb->protocol == htons(ETH_P_IP)) {
844 if (skb->nh.iph->protocol == IPPROTO_TCP)
845 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
846 else if (skb->nh.iph->protocol == IPPROTO_UDP)
847 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
848 else /* the stack should checksum non-tcp and non-udp
849 packets on his own: NETIF_F_IP_CSUM */
850 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
851 }
852}
853
854/**
855 * spider_net_prepare_tx_descr - fill tx descriptor with skb data 632 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
856 * @card: card structure 633 * @card: card structure
857 * @descr: descriptor structure to fill out 634 * @descr: descriptor structure to fill out
@@ -864,13 +641,12 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
864 */ 641 */
865static int 642static int
866spider_net_prepare_tx_descr(struct spider_net_card *card, 643spider_net_prepare_tx_descr(struct spider_net_card *card,
867 struct spider_net_descr *descr,
868 struct sk_buff *skb) 644 struct sk_buff *skb)
869{ 645{
646 struct spider_net_descr *descr = card->tx_chain.head;
870 dma_addr_t buf; 647 dma_addr_t buf;
871 648
872 buf = pci_map_single(card->pdev, skb->data, 649 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
873 skb->len, PCI_DMA_BIDIRECTIONAL);
874 if (buf == DMA_ERROR_CODE) { 650 if (buf == DMA_ERROR_CODE) {
875 if (netif_msg_tx_err(card) && net_ratelimit()) 651 if (netif_msg_tx_err(card) && net_ratelimit())
876 pr_err("could not iommu-map packet (%p, %i). " 652 pr_err("could not iommu-map packet (%p, %i). "
@@ -880,10 +656,101 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
880 656
881 descr->buf_addr = buf; 657 descr->buf_addr = buf;
882 descr->buf_size = skb->len; 658 descr->buf_size = skb->len;
659 descr->next_descr_addr = 0;
883 descr->skb = skb; 660 descr->skb = skb;
884 descr->data_status = 0; 661 descr->data_status = 0;
885 662
886 spider_net_set_txdescr_cmdstat(descr,skb); 663 descr->dmac_cmd_status =
664 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
665 if (skb->protocol == htons(ETH_P_IP))
666 switch (skb->nh.iph->protocol) {
667 case IPPROTO_TCP:
668 descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
669 break;
670 case IPPROTO_UDP:
671 descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
672 break;
673 }
674
675 descr->prev->next_descr_addr = descr->bus_addr;
676
677 return 0;
678}
679
680/**
681 * spider_net_release_tx_descr - processes a used tx descriptor
682 * @card: card structure
683 * @descr: descriptor to release
684 *
685 * releases a used tx descriptor (unmapping, freeing of skb)
686 */
687static inline void
688spider_net_release_tx_descr(struct spider_net_card *card)
689{
690 struct spider_net_descr *descr = card->tx_chain.tail;
691 struct sk_buff *skb;
692
693 card->tx_chain.tail = card->tx_chain.tail->next;
694 descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
695
696 /* unmap the skb */
697 skb = descr->skb;
698 pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
699 PCI_DMA_TODEVICE);
700 dev_kfree_skb_any(skb);
701}
702
703/**
704 * spider_net_release_tx_chain - processes sent tx descriptors
705 * @card: adapter structure
706 * @brutal: if set, don't care about whether descriptor seems to be in use
707 *
708 * returns 0 if the tx ring is empty, otherwise 1.
709 *
710 * spider_net_release_tx_chain releases the tx descriptors that spider has
711 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
712 * If some other context is calling this function, we return 1 so that we're
713 * scheduled again (if we were scheduled) and will not loose initiative.
714 */
715static int
716spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
717{
718 struct spider_net_descr_chain *chain = &card->tx_chain;
719 int status;
720
721 spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);
722
723 while (chain->tail != chain->head) {
724 status = spider_net_get_descr_status(chain->tail);
725 switch (status) {
726 case SPIDER_NET_DESCR_COMPLETE:
727 card->netdev_stats.tx_packets++;
728 card->netdev_stats.tx_bytes += chain->tail->skb->len;
729 break;
730
731 case SPIDER_NET_DESCR_CARDOWNED:
732 if (!brutal)
733 return 1;
734 /* fallthrough, if we release the descriptors
735 * brutally (then we don't care about
736 * SPIDER_NET_DESCR_CARDOWNED) */
737
738 case SPIDER_NET_DESCR_RESPONSE_ERROR:
739 case SPIDER_NET_DESCR_PROTECTION_ERROR:
740 case SPIDER_NET_DESCR_FORCE_END:
741 if (netif_msg_tx_err(card))
742 pr_err("%s: forcing end of tx descriptor "
743 "with status x%02x\n",
744 card->netdev->name, status);
745 card->netdev_stats.tx_errors++;
746 break;
747
748 default:
749 card->netdev_stats.tx_dropped++;
750 return 1;
751 }
752 spider_net_release_tx_descr(card);
753 }
887 754
888 return 0; 755 return 0;
889} 756}
@@ -896,18 +763,32 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
896 * spider_net_kick_tx_dma writes the current tx chain head as start address 763 * spider_net_kick_tx_dma writes the current tx chain head as start address
897 * of the tx descriptor chain and enables the transmission DMA engine 764 * of the tx descriptor chain and enables the transmission DMA engine
898 */ 765 */
899static void 766static inline void
900spider_net_kick_tx_dma(struct spider_net_card *card, 767spider_net_kick_tx_dma(struct spider_net_card *card)
901 struct spider_net_descr *descr)
902{ 768{
903 /* this is the only descriptor in the output chain. 769 struct spider_net_descr *descr;
904 * Enable TX DMA */
905 770
906 spider_net_write_reg(card, SPIDER_NET_GDTDCHA, 771 if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
907 descr->bus_addr); 772 SPIDER_NET_TX_DMA_EN)
773 goto out;
908 774
909 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 775 descr = card->tx_chain.tail;
910 SPIDER_NET_DMA_TX_VALUE); 776 for (;;) {
777 if (spider_net_get_descr_status(descr) ==
778 SPIDER_NET_DESCR_CARDOWNED) {
779 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
780 descr->bus_addr);
781 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
782 SPIDER_NET_DMA_TX_VALUE);
783 break;
784 }
785 if (descr == card->tx_chain.head)
786 break;
787 descr = descr->next;
788 }
789
790out:
791 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
911} 792}
912 793
913/** 794/**
@@ -915,47 +796,69 @@ spider_net_kick_tx_dma(struct spider_net_card *card,
915 * @skb: packet to send out 796 * @skb: packet to send out
916 * @netdev: interface device structure 797 * @netdev: interface device structure
917 * 798 *
918 * returns 0 on success, <0 on failure 799 * returns 0 on success, !0 on failure
919 */ 800 */
920static int 801static int
921spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) 802spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
922{ 803{
923 struct spider_net_card *card = netdev_priv(netdev); 804 struct spider_net_card *card = netdev_priv(netdev);
924 struct spider_net_descr *descr; 805 struct spider_net_descr_chain *chain = &card->tx_chain;
806 struct spider_net_descr *descr = chain->head;
807 unsigned long flags;
925 int result; 808 int result;
926 809
810 spin_lock_irqsave(&chain->lock, flags);
811
927 spider_net_release_tx_chain(card, 0); 812 spider_net_release_tx_chain(card, 0);
928 813
929 descr = spider_net_get_next_tx_descr(card); 814 if (chain->head->next == chain->tail->prev) {
815 card->netdev_stats.tx_dropped++;
816 result = NETDEV_TX_LOCKED;
817 goto out;
818 }
930 819
931 if (!descr) 820 if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {
932 goto error; 821 result = NETDEV_TX_LOCKED;
822 goto out;
823 }
933 824
934 result = spider_net_prepare_tx_descr(card, descr, skb); 825 if (spider_net_prepare_tx_descr(card, skb) != 0) {
935 if (result) 826 card->netdev_stats.tx_dropped++;
936 goto error; 827 result = NETDEV_TX_BUSY;
828 goto out;
829 }
830
831 result = NETDEV_TX_OK;
937 832
833 spider_net_kick_tx_dma(card);
938 card->tx_chain.head = card->tx_chain.head->next; 834 card->tx_chain.head = card->tx_chain.head->next;
939 835
940 if (spider_net_get_descr_status(descr->prev) != 836out:
941 SPIDER_NET_DESCR_CARDOWNED) { 837 spin_unlock_irqrestore(&chain->lock, flags);
942 /* make sure the current descriptor is in memory. Then 838 netif_wake_queue(netdev);
943 * kicking it on again makes sense, if the previous is not 839 return result;
944 * card-owned anymore. Check the previous descriptor twice 840}
945 * to omit an mb() in heavy traffic cases */
946 mb();
947 if (spider_net_get_descr_status(descr->prev) !=
948 SPIDER_NET_DESCR_CARDOWNED)
949 spider_net_kick_tx_dma(card, descr);
950 }
951 841
952 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); 842/**
843 * spider_net_cleanup_tx_ring - cleans up the TX ring
844 * @card: card structure
845 *
846 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
847 * interrupts to cleanup our TX ring) and returns sent packets to the stack
848 * by freeing them
849 */
850static void
851spider_net_cleanup_tx_ring(struct spider_net_card *card)
852{
853 unsigned long flags;
953 854
954 return NETDEV_TX_OK; 855 spin_lock_irqsave(&card->tx_chain.lock, flags);
955 856
956error: 857 if ((spider_net_release_tx_chain(card, 0) != 0) &&
957 card->netdev_stats.tx_dropped++; 858 (card->netdev->flags & IFF_UP))
958 return NETDEV_TX_BUSY; 859 spider_net_kick_tx_dma(card);
860
861 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
959} 862}
960 863
961/** 864/**
@@ -1002,7 +905,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1002 905
1003 /* unmap descriptor */ 906 /* unmap descriptor */
1004 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, 907 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
1005 PCI_DMA_BIDIRECTIONAL); 908 PCI_DMA_FROMDEVICE);
1006 909
1007 /* the cases we'll throw away the packet immediately */ 910 /* the cases we'll throw away the packet immediately */
1008 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { 911 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
@@ -1067,14 +970,11 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1067static int 970static int
1068spider_net_decode_one_descr(struct spider_net_card *card, int napi) 971spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1069{ 972{
1070 enum spider_net_descr_status status; 973 struct spider_net_descr_chain *chain = &card->rx_chain;
1071 struct spider_net_descr *descr; 974 struct spider_net_descr *descr = chain->tail;
1072 struct spider_net_descr_chain *chain; 975 int status;
1073 int result; 976 int result;
1074 977
1075 chain = &card->rx_chain;
1076 descr = chain->tail;
1077
1078 status = spider_net_get_descr_status(descr); 978 status = spider_net_get_descr_status(descr);
1079 979
1080 if (status == SPIDER_NET_DESCR_CARDOWNED) { 980 if (status == SPIDER_NET_DESCR_CARDOWNED) {
@@ -1103,7 +1003,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1103 card->netdev->name, status); 1003 card->netdev->name, status);
1104 card->netdev_stats.rx_dropped++; 1004 card->netdev_stats.rx_dropped++;
1105 pci_unmap_single(card->pdev, descr->buf_addr, 1005 pci_unmap_single(card->pdev, descr->buf_addr,
1106 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); 1006 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1107 dev_kfree_skb_irq(descr->skb); 1007 dev_kfree_skb_irq(descr->skb);
1108 goto refill; 1008 goto refill;
1109 } 1009 }
@@ -1119,7 +1019,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1119 /* ok, we've got a packet in descr */ 1019 /* ok, we've got a packet in descr */
1120 result = spider_net_pass_skb_up(descr, card, napi); 1020 result = spider_net_pass_skb_up(descr, card, napi);
1121refill: 1021refill:
1122 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 1022 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1123 /* change the descriptor state: */ 1023 /* change the descriptor state: */
1124 if (!napi) 1024 if (!napi)
1125 spider_net_refill_rx_chain(card); 1025 spider_net_refill_rx_chain(card);
@@ -1291,21 +1191,6 @@ spider_net_set_mac(struct net_device *netdev, void *p)
1291} 1191}
1292 1192
1293/** 1193/**
1294 * spider_net_enable_txdmac - enables a TX DMA controller
1295 * @card: card structure
1296 *
1297 * spider_net_enable_txdmac enables the TX DMA controller by setting the
1298 * descriptor chain tail address
1299 */
1300static void
1301spider_net_enable_txdmac(struct spider_net_card *card)
1302{
1303 /* assume chain is aligned correctly */
1304 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
1305 card->tx_chain.tail->bus_addr);
1306}
1307
1308/**
1309 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt 1194 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1310 * @card: card structure 1195 * @card: card structure
1311 * 1196 *
@@ -1653,7 +1538,6 @@ spider_net_enable_card(struct spider_net_card *card)
1653 { SPIDER_NET_GMRWOLCTRL, 0 }, 1538 { SPIDER_NET_GMRWOLCTRL, 0 },
1654 { SPIDER_NET_GTESTMD, 0x10000000 }, 1539 { SPIDER_NET_GTESTMD, 0x10000000 },
1655 { SPIDER_NET_GTTQMSK, 0x00400040 }, 1540 { SPIDER_NET_GTTQMSK, 0x00400040 },
1656 { SPIDER_NET_GTESTMD, 0 },
1657 1541
1658 { SPIDER_NET_GMACINTEN, 0 }, 1542 { SPIDER_NET_GMACINTEN, 0 },
1659 1543
@@ -1692,9 +1576,6 @@ spider_net_enable_card(struct spider_net_card *card)
1692 1576
1693 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); 1577 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1694 1578
1695 /* set chain tail adress for TX chain */
1696 spider_net_enable_txdmac(card);
1697
1698 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, 1579 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1699 SPIDER_NET_LENLMT_VALUE); 1580 SPIDER_NET_LENLMT_VALUE);
1700 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 1581 spider_net_write_reg(card, SPIDER_NET_GMACMODE,
@@ -1709,6 +1590,9 @@ spider_net_enable_card(struct spider_net_card *card)
1709 SPIDER_NET_INT1_MASK_VALUE); 1590 SPIDER_NET_INT1_MASK_VALUE);
1710 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 1591 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1711 SPIDER_NET_INT2_MASK_VALUE); 1592 SPIDER_NET_INT2_MASK_VALUE);
1593
1594 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1595 SPIDER_NET_GDTDCEIDIS);
1712} 1596}
1713 1597
1714/** 1598/**
@@ -1728,10 +1612,12 @@ spider_net_open(struct net_device *netdev)
1728 1612
1729 result = -ENOMEM; 1613 result = -ENOMEM;
1730 if (spider_net_init_chain(card, &card->tx_chain, 1614 if (spider_net_init_chain(card, &card->tx_chain,
1731 card->descr, tx_descriptors)) 1615 card->descr,
1616 PCI_DMA_TODEVICE, tx_descriptors))
1732 goto alloc_tx_failed; 1617 goto alloc_tx_failed;
1733 if (spider_net_init_chain(card, &card->rx_chain, 1618 if (spider_net_init_chain(card, &card->rx_chain,
1734 card->descr + tx_descriptors, rx_descriptors)) 1619 card->descr + tx_descriptors,
1620 PCI_DMA_FROMDEVICE, rx_descriptors))
1735 goto alloc_rx_failed; 1621 goto alloc_rx_failed;
1736 1622
1737 /* allocate rx skbs */ 1623 /* allocate rx skbs */
@@ -1938,7 +1824,7 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1938 /* empty sequencer data */ 1824 /* empty sequencer data */
1939 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; 1825 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1940 sequencer++) { 1826 sequencer++) {
1941 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1827 spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
1942 sequencer * 8, 0x0); 1828 sequencer * 8, 0x0);
1943 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { 1829 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1944 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1830 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
@@ -1955,6 +1841,49 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1955} 1841}
1956 1842
1957/** 1843/**
1844 * spider_net_stop - called upon ifconfig down
1845 * @netdev: interface device structure
1846 *
1847 * always returns 0
1848 */
1849int
1850spider_net_stop(struct net_device *netdev)
1851{
1852 struct spider_net_card *card = netdev_priv(netdev);
1853
1854 tasklet_kill(&card->rxram_full_tl);
1855 netif_poll_disable(netdev);
1856 netif_carrier_off(netdev);
1857 netif_stop_queue(netdev);
1858 del_timer_sync(&card->tx_timer);
1859
1860 /* disable/mask all interrupts */
1861 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
1862 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
1863 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
1864
1865 /* free_irq(netdev->irq, netdev);*/
1866 free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
1867
1868 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1869 SPIDER_NET_DMA_TX_FEND_VALUE);
1870
1871 /* turn off DMA, force end */
1872 spider_net_disable_rxdmac(card);
1873
1874 /* release chains */
1875 if (spin_trylock(&card->tx_chain.lock)) {
1876 spider_net_release_tx_chain(card, 1);
1877 spin_unlock(&card->tx_chain.lock);
1878 }
1879
1880 spider_net_free_chain(card, &card->tx_chain);
1881 spider_net_free_chain(card, &card->rx_chain);
1882
1883 return 0;
1884}
1885
1886/**
1958 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout 1887 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
1959 * function (to be called not under interrupt status) 1888 * function (to be called not under interrupt status)
1960 * @data: data, is interface device structure 1889 * @data: data, is interface device structure
@@ -1982,7 +1911,7 @@ spider_net_tx_timeout_task(void *data)
1982 goto out; 1911 goto out;
1983 1912
1984 spider_net_open(netdev); 1913 spider_net_open(netdev);
1985 spider_net_kick_tx_dma(card, card->tx_chain.head); 1914 spider_net_kick_tx_dma(card);
1986 netif_device_attach(netdev); 1915 netif_device_attach(netdev);
1987 1916
1988out: 1917out:
@@ -2065,7 +1994,6 @@ spider_net_setup_netdev(struct spider_net_card *card)
2065 1994
2066 pci_set_drvdata(card->pdev, netdev); 1995 pci_set_drvdata(card->pdev, netdev);
2067 1996
2068 atomic_set(&card->tx_chain_release,0);
2069 card->rxram_full_tl.data = (unsigned long) card; 1997 card->rxram_full_tl.data = (unsigned long) card;
2070 card->rxram_full_tl.func = 1998 card->rxram_full_tl.func =
2071 (void (*)(unsigned long)) spider_net_handle_rxram_full; 1999 (void (*)(unsigned long)) spider_net_handle_rxram_full;
@@ -2079,7 +2007,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
2079 2007
2080 spider_net_setup_netdev_ops(netdev); 2008 spider_net_setup_netdev_ops(netdev);
2081 2009
2082 netdev->features = NETIF_F_HW_CSUM; 2010 netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
2083 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2011 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2084 * NETIF_F_HW_VLAN_FILTER */ 2012 * NETIF_F_HW_VLAN_FILTER */
2085 2013
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 3b8d951cf73c..f6dcf180ae3d 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -208,7 +208,10 @@ extern char spider_net_driver_name[];
208#define SPIDER_NET_DMA_RX_VALUE 0x80000000 208#define SPIDER_NET_DMA_RX_VALUE 0x80000000
209#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 209#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
210/* to set TX_DMA_EN */ 210/* to set TX_DMA_EN */
211#define SPIDER_NET_DMA_TX_VALUE 0x80000000 211#define SPIDER_NET_TX_DMA_EN 0x80000000
212#define SPIDER_NET_GDTDCEIDIS 0x00000002
213#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
214 SPIDER_NET_GDTDCEIDIS
212#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 215#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
213 216
214/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ 217/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
@@ -329,55 +332,23 @@ enum spider_net_int2_status {
329 (~SPIDER_NET_TXINT) & \ 332 (~SPIDER_NET_TXINT) & \
330 (~SPIDER_NET_RXINT) ) 333 (~SPIDER_NET_RXINT) )
331 334
332#define SPIDER_NET_GPREXEC 0x80000000 335#define SPIDER_NET_GPREXEC 0x80000000
333#define SPIDER_NET_GPRDAT_MASK 0x0000ffff 336#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
334 337
335/* descriptor bits 338#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
336 * 339#define SPIDER_NET_DMAC_NOCS 0x00040000
337 * 1010 descriptor ready 340#define SPIDER_NET_DMAC_TCP 0x00020000
338 * 0 descr in middle of chain 341#define SPIDER_NET_DMAC_UDP 0x00030000
339 * 000 fixed to 0 342#define SPIDER_NET_TXDCEST 0x08000000
340 * 343
341 * 0 no interrupt on completion 344#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
342 * 000 fixed to 0 345#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
343 * 1 no ipsec processing 346#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
344 * 1 last descriptor for this frame 347#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
345 * 00 no checksum 348#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
346 * 10 tcp checksum 349#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
347 * 11 udp checksum 350#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
348 * 351#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
349 * 00 fixed to 0
350 * 0 fixed to 0
351 * 0 no interrupt on response errors
352 * 0 no interrupt on invalid descr
353 * 0 no interrupt on dma process termination
354 * 0 no interrupt on descr chain end
355 * 0 no interrupt on descr complete
356 *
357 * 000 fixed to 0
358 * 0 response error interrupt status
359 * 0 invalid descr status
360 * 0 dma termination status
361 * 0 descr chain end status
362 * 0 descr complete status */
363#define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000
364#define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000
365#define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000
366#define SPIDER_NET_DESCR_IND_PROC_SHIFT 28
367#define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff
368
369/* descr ready, descr is in middle of chain, get interrupt on completion */
370#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
371
372enum spider_net_descr_status {
373 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
374 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
375 SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
376 SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */
377 SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
378 SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
379 SPIDER_NET_DESCR_NOT_IN_USE /* any other value */
380};
381 352
382struct spider_net_descr { 353struct spider_net_descr {
383 /* as defined by the hardware */ 354 /* as defined by the hardware */
@@ -398,7 +369,7 @@ struct spider_net_descr {
398} __attribute__((aligned(32))); 369} __attribute__((aligned(32)));
399 370
400struct spider_net_descr_chain { 371struct spider_net_descr_chain {
401 /* we walk from tail to head */ 372 spinlock_t lock;
402 struct spider_net_descr *head; 373 struct spider_net_descr *head;
403 struct spider_net_descr *tail; 374 struct spider_net_descr *tail;
404}; 375};
@@ -453,8 +424,6 @@ struct spider_net_card {
453 424
454 struct spider_net_descr_chain tx_chain; 425 struct spider_net_descr_chain tx_chain;
455 struct spider_net_descr_chain rx_chain; 426 struct spider_net_descr_chain rx_chain;
456 atomic_t rx_chain_refill;
457 atomic_t tx_chain_release;
458 427
459 struct net_device_stats netdev_stats; 428 struct net_device_stats netdev_stats;
460 429
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 8673fd4c08c7..c6f5bc3c042f 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3255,12 +3255,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
3255} 3255}
3256 3256
3257static struct pci_device_id happymeal_pci_ids[] = { 3257static struct pci_device_id happymeal_pci_ids[] = {
3258 { 3258 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3259 .vendor = PCI_VENDOR_ID_SUN,
3260 .device = PCI_DEVICE_ID_SUN_HAPPYMEAL,
3261 .subvendor = PCI_ANY_ID,
3262 .subdevice = PCI_ANY_ID,
3263 },
3264 { } /* Terminating entry */ 3259 { } /* Terminating entry */
3265}; 3260};
3266 3261
@@ -3275,7 +3270,7 @@ static struct pci_driver hme_pci_driver = {
3275 3270
3276static int __init happy_meal_pci_init(void) 3271static int __init happy_meal_pci_init(void)
3277{ 3272{
3278 return pci_module_init(&hme_pci_driver); 3273 return pci_register_driver(&hme_pci_driver);
3279} 3274}
3280 3275
3281static void happy_meal_pci_exit(void) 3276static void happy_meal_pci_exit(void)
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 1ef9fd39a79a..0e3fdf7c6dd3 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void)
1537{ 1537{
1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || 1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) { 1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) {
1540 memset(&sun4_sdev, 0, sizeof(sdev)); 1540 memset(&sun4_sdev, 0, sizeof(struct sbus_dev));
1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; 1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
1542 sun4_sdev.irqs[0] = 6; 1542 sun4_sdev.irqs[0] = 6;
1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); 1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL);
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void)
1547 1547
1548static int __exit sunlance_sun4_remove(void) 1548static int __exit sunlance_sun4_remove(void)
1549{ 1549{
1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); 1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
1551 struct net_device *net_dev = lp->dev; 1551 struct net_device *net_dev = lp->dev;
1552 1552
1553 unregister_netdevice(net_dev); 1553 unregister_netdevice(net_dev);
1554 1554
1555 lance_free_hwresources(root_lance_dev); 1555 lance_free_hwresources(lp);
1556 1556
1557 free_netdev(net_dev); 1557 free_netdev(net_dev);
1558 1558
1559 dev_set_drvdata(&sun4_sdev->dev, NULL); 1559 dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL);
1560 1560
1561 return 0; 1561 return 0;
1562} 1562}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ce6f3be86da0..1b8138f641e3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.62" 71#define DRV_MODULE_VERSION "3.63"
72#define DRV_MODULE_RELDATE "June 30, 2006" 72#define DRV_MODULE_RELDATE "July 25, 2006"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -3590,6 +3590,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3590static int tg3_init_hw(struct tg3 *, int); 3590static int tg3_init_hw(struct tg3 *, int);
3591static int tg3_halt(struct tg3 *, int, int); 3591static int tg3_halt(struct tg3 *, int, int);
3592 3592
3593/* Restart hardware after configuration changes, self-test, etc.
3594 * Invoked with tp->lock held.
3595 */
3596static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3597{
3598 int err;
3599
3600 err = tg3_init_hw(tp, reset_phy);
3601 if (err) {
3602 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3603 "aborting.\n", tp->dev->name);
3604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3605 tg3_full_unlock(tp);
3606 del_timer_sync(&tp->timer);
3607 tp->irq_sync = 0;
3608 netif_poll_enable(tp->dev);
3609 dev_close(tp->dev);
3610 tg3_full_lock(tp, 0);
3611 }
3612 return err;
3613}
3614
3593#ifdef CONFIG_NET_POLL_CONTROLLER 3615#ifdef CONFIG_NET_POLL_CONTROLLER
3594static void tg3_poll_controller(struct net_device *dev) 3616static void tg3_poll_controller(struct net_device *dev)
3595{ 3617{
@@ -3630,13 +3652,15 @@ static void tg3_reset_task(void *_data)
3630 } 3652 }
3631 3653
3632 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3654 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3633 tg3_init_hw(tp, 1); 3655 if (tg3_init_hw(tp, 1))
3656 goto out;
3634 3657
3635 tg3_netif_start(tp); 3658 tg3_netif_start(tp);
3636 3659
3637 if (restart_timer) 3660 if (restart_timer)
3638 mod_timer(&tp->timer, jiffies + 1); 3661 mod_timer(&tp->timer, jiffies + 1);
3639 3662
3663out:
3640 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; 3664 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3641 3665
3642 tg3_full_unlock(tp); 3666 tg3_full_unlock(tp);
@@ -4124,6 +4148,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4124static int tg3_change_mtu(struct net_device *dev, int new_mtu) 4148static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4125{ 4149{
4126 struct tg3 *tp = netdev_priv(dev); 4150 struct tg3 *tp = netdev_priv(dev);
4151 int err;
4127 4152
4128 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) 4153 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4129 return -EINVAL; 4154 return -EINVAL;
@@ -4144,13 +4169,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4144 4169
4145 tg3_set_mtu(dev, tp, new_mtu); 4170 tg3_set_mtu(dev, tp, new_mtu);
4146 4171
4147 tg3_init_hw(tp, 0); 4172 err = tg3_restart_hw(tp, 0);
4148 4173
4149 tg3_netif_start(tp); 4174 if (!err)
4175 tg3_netif_start(tp);
4150 4176
4151 tg3_full_unlock(tp); 4177 tg3_full_unlock(tp);
4152 4178
4153 return 0; 4179 return err;
4154} 4180}
4155 4181
4156/* Free up pending packets in all rx/tx rings. 4182/* Free up pending packets in all rx/tx rings.
@@ -4232,7 +4258,7 @@ static void tg3_free_rings(struct tg3 *tp)
4232 * end up in the driver. tp->{tx,}lock are held and thus 4258 * end up in the driver. tp->{tx,}lock are held and thus
4233 * we may not sleep. 4259 * we may not sleep.
4234 */ 4260 */
4235static void tg3_init_rings(struct tg3 *tp) 4261static int tg3_init_rings(struct tg3 *tp)
4236{ 4262{
4237 u32 i; 4263 u32 i;
4238 4264
@@ -4281,18 +4307,38 @@ static void tg3_init_rings(struct tg3 *tp)
4281 4307
4282 /* Now allocate fresh SKBs for each rx ring. */ 4308 /* Now allocate fresh SKBs for each rx ring. */
4283 for (i = 0; i < tp->rx_pending; i++) { 4309 for (i = 0; i < tp->rx_pending; i++) {
4284 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, 4310 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4285 -1, i) < 0) 4311 printk(KERN_WARNING PFX
4312 "%s: Using a smaller RX standard ring, "
4313 "only %d out of %d buffers were allocated "
4314 "successfully.\n",
4315 tp->dev->name, i, tp->rx_pending);
4316 if (i == 0)
4317 return -ENOMEM;
4318 tp->rx_pending = i;
4286 break; 4319 break;
4320 }
4287 } 4321 }
4288 4322
4289 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 4323 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4290 for (i = 0; i < tp->rx_jumbo_pending; i++) { 4324 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4291 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, 4325 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4292 -1, i) < 0) 4326 -1, i) < 0) {
4327 printk(KERN_WARNING PFX
4328 "%s: Using a smaller RX jumbo ring, "
4329 "only %d out of %d buffers were "
4330 "allocated successfully.\n",
4331 tp->dev->name, i, tp->rx_jumbo_pending);
4332 if (i == 0) {
4333 tg3_free_rings(tp);
4334 return -ENOMEM;
4335 }
4336 tp->rx_jumbo_pending = i;
4293 break; 4337 break;
4338 }
4294 } 4339 }
4295 } 4340 }
4341 return 0;
4296} 4342}
4297 4343
4298/* 4344/*
@@ -5815,6 +5861,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5815{ 5861{
5816 struct tg3 *tp = netdev_priv(dev); 5862 struct tg3 *tp = netdev_priv(dev);
5817 struct sockaddr *addr = p; 5863 struct sockaddr *addr = p;
5864 int err = 0;
5818 5865
5819 if (!is_valid_ether_addr(addr->sa_data)) 5866 if (!is_valid_ether_addr(addr->sa_data))
5820 return -EINVAL; 5867 return -EINVAL;
@@ -5832,9 +5879,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5832 tg3_full_lock(tp, 1); 5879 tg3_full_lock(tp, 1);
5833 5880
5834 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5881 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5835 tg3_init_hw(tp, 0); 5882 err = tg3_restart_hw(tp, 0);
5836 5883 if (!err)
5837 tg3_netif_start(tp); 5884 tg3_netif_start(tp);
5838 tg3_full_unlock(tp); 5885 tg3_full_unlock(tp);
5839 } else { 5886 } else {
5840 spin_lock_bh(&tp->lock); 5887 spin_lock_bh(&tp->lock);
@@ -5842,7 +5889,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5842 spin_unlock_bh(&tp->lock); 5889 spin_unlock_bh(&tp->lock);
5843 } 5890 }
5844 5891
5845 return 0; 5892 return err;
5846} 5893}
5847 5894
5848/* tp->lock is held. */ 5895/* tp->lock is held. */
@@ -5942,7 +5989,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5942 * can only do this after the hardware has been 5989 * can only do this after the hardware has been
5943 * successfully reset. 5990 * successfully reset.
5944 */ 5991 */
5945 tg3_init_rings(tp); 5992 err = tg3_init_rings(tp);
5993 if (err)
5994 return err;
5946 5995
5947 /* This value is determined during the probe time DMA 5996 /* This value is determined during the probe time DMA
5948 * engine test, tg3_test_dma. 5997 * engine test, tg3_test_dma.
@@ -7956,7 +8005,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
7956static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 8005static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7957{ 8006{
7958 struct tg3 *tp = netdev_priv(dev); 8007 struct tg3 *tp = netdev_priv(dev);
7959 int irq_sync = 0; 8008 int irq_sync = 0, err = 0;
7960 8009
7961 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 8010 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7962 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || 8011 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
@@ -7980,13 +8029,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7980 8029
7981 if (netif_running(dev)) { 8030 if (netif_running(dev)) {
7982 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8031 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7983 tg3_init_hw(tp, 1); 8032 err = tg3_restart_hw(tp, 1);
7984 tg3_netif_start(tp); 8033 if (!err)
8034 tg3_netif_start(tp);
7985 } 8035 }
7986 8036
7987 tg3_full_unlock(tp); 8037 tg3_full_unlock(tp);
7988 8038
7989 return 0; 8039 return err;
7990} 8040}
7991 8041
7992static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 8042static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
@@ -8001,7 +8051,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8001static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 8051static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8002{ 8052{
8003 struct tg3 *tp = netdev_priv(dev); 8053 struct tg3 *tp = netdev_priv(dev);
8004 int irq_sync = 0; 8054 int irq_sync = 0, err = 0;
8005 8055
8006 if (netif_running(dev)) { 8056 if (netif_running(dev)) {
8007 tg3_netif_stop(tp); 8057 tg3_netif_stop(tp);
@@ -8025,13 +8075,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8025 8075
8026 if (netif_running(dev)) { 8076 if (netif_running(dev)) {
8027 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8077 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8028 tg3_init_hw(tp, 1); 8078 err = tg3_restart_hw(tp, 1);
8029 tg3_netif_start(tp); 8079 if (!err)
8080 tg3_netif_start(tp);
8030 } 8081 }
8031 8082
8032 tg3_full_unlock(tp); 8083 tg3_full_unlock(tp);
8033 8084
8034 return 0; 8085 return err;
8035} 8086}
8036 8087
8037static u32 tg3_get_rx_csum(struct net_device *dev) 8088static u32 tg3_get_rx_csum(struct net_device *dev)
@@ -8666,7 +8717,9 @@ static int tg3_test_loopback(struct tg3 *tp)
8666 if (!netif_running(tp->dev)) 8717 if (!netif_running(tp->dev))
8667 return TG3_LOOPBACK_FAILED; 8718 return TG3_LOOPBACK_FAILED;
8668 8719
8669 tg3_reset_hw(tp, 1); 8720 err = tg3_reset_hw(tp, 1);
8721 if (err)
8722 return TG3_LOOPBACK_FAILED;
8670 8723
8671 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 8724 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8672 err |= TG3_MAC_LOOPBACK_FAILED; 8725 err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8740,8 +8793,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8740 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8793 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8741 if (netif_running(dev)) { 8794 if (netif_running(dev)) {
8742 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 8795 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8743 tg3_init_hw(tp, 1); 8796 if (!tg3_restart_hw(tp, 1))
8744 tg3_netif_start(tp); 8797 tg3_netif_start(tp);
8745 } 8798 }
8746 8799
8747 tg3_full_unlock(tp); 8800 tg3_full_unlock(tp);
@@ -11699,7 +11752,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11699 tg3_full_lock(tp, 0); 11752 tg3_full_lock(tp, 0);
11700 11753
11701 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11754 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11702 tg3_init_hw(tp, 1); 11755 if (tg3_restart_hw(tp, 1))
11756 goto out;
11703 11757
11704 tp->timer.expires = jiffies + tp->timer_offset; 11758 tp->timer.expires = jiffies + tp->timer_offset;
11705 add_timer(&tp->timer); 11759 add_timer(&tp->timer);
@@ -11707,6 +11761,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11707 netif_device_attach(dev); 11761 netif_device_attach(dev);
11708 tg3_netif_start(tp); 11762 tg3_netif_start(tp);
11709 11763
11764out:
11710 tg3_full_unlock(tp); 11765 tg3_full_unlock(tp);
11711 } 11766 }
11712 11767
@@ -11733,16 +11788,19 @@ static int tg3_resume(struct pci_dev *pdev)
11733 tg3_full_lock(tp, 0); 11788 tg3_full_lock(tp, 0);
11734 11789
11735 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11790 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11736 tg3_init_hw(tp, 1); 11791 err = tg3_restart_hw(tp, 1);
11792 if (err)
11793 goto out;
11737 11794
11738 tp->timer.expires = jiffies + tp->timer_offset; 11795 tp->timer.expires = jiffies + tp->timer_offset;
11739 add_timer(&tp->timer); 11796 add_timer(&tp->timer);
11740 11797
11741 tg3_netif_start(tp); 11798 tg3_netif_start(tp);
11742 11799
11800out:
11743 tg3_full_unlock(tp); 11801 tg3_full_unlock(tp);
11744 11802
11745 return 0; 11803 return err;
11746} 11804}
11747 11805
11748static struct pci_driver tg3_driver = { 11806static struct pci_driver tg3_driver = {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index f5b0078eb4ad..aa9cd92f46b2 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2742,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs)
2742 2742
2743 if (PHYSR0 & PHYSR0_SPDG) 2743 if (PHYSR0 & PHYSR0_SPDG)
2744 status |= VELOCITY_SPEED_1000; 2744 status |= VELOCITY_SPEED_1000;
2745 if (PHYSR0 & PHYSR0_SPD10) 2745 else if (PHYSR0 & PHYSR0_SPD10)
2746 status |= VELOCITY_SPEED_10; 2746 status |= VELOCITY_SPEED_10;
2747 else 2747 else
2748 status |= VELOCITY_SPEED_100; 2748 status |= VELOCITY_SPEED_100;
@@ -2851,8 +2851,17 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
2851 u32 status; 2851 u32 status;
2852 status = check_connection_type(vptr->mac_regs); 2852 status = check_connection_type(vptr->mac_regs);
2853 2853
2854 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; 2854 cmd->supported = SUPPORTED_TP |
2855 if (status & VELOCITY_SPEED_100) 2855 SUPPORTED_Autoneg |
2856 SUPPORTED_10baseT_Half |
2857 SUPPORTED_10baseT_Full |
2858 SUPPORTED_100baseT_Half |
2859 SUPPORTED_100baseT_Full |
2860 SUPPORTED_1000baseT_Half |
2861 SUPPORTED_1000baseT_Full;
2862 if (status & VELOCITY_SPEED_1000)
2863 cmd->speed = SPEED_1000;
2864 else if (status & VELOCITY_SPEED_100)
2856 cmd->speed = SPEED_100; 2865 cmd->speed = SPEED_100;
2857 else 2866 else
2858 cmd->speed = SPEED_10; 2867 cmd->speed = SPEED_10;
@@ -2896,7 +2905,7 @@ static u32 velocity_get_link(struct net_device *dev)
2896{ 2905{
2897 struct velocity_info *vptr = netdev_priv(dev); 2906 struct velocity_info *vptr = netdev_priv(dev);
2898 struct mac_regs __iomem * regs = vptr->mac_regs; 2907 struct mac_regs __iomem * regs = vptr->mac_regs;
2899 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 0 : 1; 2908 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2900} 2909}
2901 2910
2902static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2911static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 2c09ec908a3f..435e91ec4620 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -197,7 +197,6 @@ static int c101_open(struct net_device *dev)
197 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); 197 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
198 198
199 set_carrier(port); 199 set_carrier(port);
200 printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port));
201 200
202 /* enable MSCI1 CDCD interrupt */ 201 /* enable MSCI1 CDCD interrupt */
203 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); 202 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
@@ -449,4 +448,5 @@ module_exit(c101_cleanup);
449MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 448MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
450MODULE_DESCRIPTION("Moxa C101 serial port driver"); 449MODULE_DESCRIPTION("Moxa C101 serial port driver");
451MODULE_LICENSE("GPL v2"); 450MODULE_LICENSE("GPL v2");
452module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */ 451module_param(hw, charp, 0444);
452MODULE_PARM_DESC(hw, "irq,ram:irq,...");
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b81263eaede0..fbaab5bf71eb 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -107,6 +107,7 @@ int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
107 dev->hard_header = NULL; 107 dev->hard_header = NULL;
108 dev->type = ARPHRD_PPP; 108 dev->type = ARPHRD_PPP;
109 dev->addr_len = 0; 109 dev->addr_len = 0;
110 netif_dormant_off(dev);
110 return 0; 111 return 0;
111 } 112 }
112 113
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 9456d31cb1c1..f15aa6ba77f1 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -82,6 +82,7 @@ int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr)
82 dev->type = ARPHRD_RAWHDLC; 82 dev->type = ARPHRD_RAWHDLC;
83 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 83 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
84 dev->addr_len = 0; 84 dev->addr_len = 0;
85 netif_dormant_off(dev);
85 return 0; 86 return 0;
86 } 87 }
87 88
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index b1285cc8fee6..d1884987f94e 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -100,6 +100,7 @@ int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
100 dev->tx_queue_len = old_qlen; 100 dev->tx_queue_len = old_qlen;
101 memcpy(dev->dev_addr, "\x00\x01", 2); 101 memcpy(dev->dev_addr, "\x00\x01", 2);
102 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); 102 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
103 netif_dormant_off(dev);
103 return 0; 104 return 0;
104 } 105 }
105 106
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 07e5eef1fe0f..a867fb411f89 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -212,6 +212,7 @@ int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr)
212 dev->hard_header = NULL; 212 dev->hard_header = NULL;
213 dev->type = ARPHRD_X25; 213 dev->type = ARPHRD_X25;
214 dev->addr_len = 0; 214 dev->addr_len = 0;
215 netif_dormant_off(dev);
215 return 0; 216 return 0;
216 } 217 }
217 218
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index e013b817cab8..dcf46add3adf 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -564,4 +564,5 @@ module_exit(n2_cleanup);
564MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 564MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
565MODULE_DESCRIPTION("RISCom/N2 serial port driver"); 565MODULE_DESCRIPTION("RISCom/N2 serial port driver");
566MODULE_LICENSE("GPL v2"); 566MODULE_LICENSE("GPL v2");
567module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */ 567module_param(hw, charp, 0444);
568MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index fa9d2c4edc93..2e8ac995d56f 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -447,6 +447,7 @@ config AIRO_CS
447 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 447 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
448 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) 448 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
449 select CRYPTO 449 select CRYPTO
450 select CRYPTO_AES
450 ---help--- 451 ---help---
451 This is the standard Linux driver to support Cisco/Aironet PCMCIA 452 This is the standard Linux driver to support Cisco/Aironet PCMCIA
452 802.11 wireless cards. This driver is the same as the Aironet 453 802.11 wireless cards. This driver is the same as the Aironet
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 3889f79e7128..df317c1e12a8 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3701,7 +3701,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3701 } 3701 }
3702 if (sec->flags & SEC_AUTH_MODE) { 3702 if (sec->flags & SEC_AUTH_MODE) {
3703 secinfo->auth_mode = sec->auth_mode; 3703 secinfo->auth_mode = sec->auth_mode;
3704 dprintk(", .auth_mode = %d\n", sec->auth_mode); 3704 dprintk(", .auth_mode = %d", sec->auth_mode);
3705 } 3705 }
3706 dprintk("\n"); 3706 dprintk("\n");
3707 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && 3707 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED &&
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index d6ed5781b93a..317ace7f9aae 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
2875 if (orinoco_lock(priv, &flags) != 0) 2875 if (orinoco_lock(priv, &flags) != 0)
2876 return -EBUSY; 2876 return -EBUSY;
2877 2877
2878 if (erq->pointer) { 2878 if (erq->length > 0) {
2879 if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) 2879 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
2880 index = priv->tx_key; 2880 index = priv->tx_key;
2881 2881
@@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
2918 if (erq->flags & IW_ENCODE_RESTRICTED) 2918 if (erq->flags & IW_ENCODE_RESTRICTED)
2919 restricted = 1; 2919 restricted = 1;
2920 2920
2921 if (erq->pointer) { 2921 if (erq->pointer && erq->length > 0) {
2922 priv->keys[index].len = cpu_to_le16(xlen); 2922 priv->keys[index].len = cpu_to_le16(xlen);
2923 memset(priv->keys[index].data, 0, 2923 memset(priv->keys[index].data, 0,
2924 sizeof(priv->keys[index].data)); 2924 sizeof(priv->keys[index].data));
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 662ecc8a33ff..c52e9bcf8d02 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface,
1820 zd->dev->name); 1820 zd->dev->name);
1821 1821
1822 usb_set_intfdata(interface, zd); 1822 usb_set_intfdata(interface, zd);
1823 zd1201_enable(zd); /* zd1201 likes to startup enabled, */
1824 zd1201_disable(zd); /* interfering with all the wifis in range */
1823 return 0; 1825 return 0;
1824 1826
1825err_net: 1827err_net:
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 4cd879cb9bdd..1140302ff11d 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -304,6 +304,7 @@ static int __init xpram_setup_sizes(unsigned long pages)
304{ 304{
305 unsigned long mem_needed; 305 unsigned long mem_needed;
306 unsigned long mem_auto; 306 unsigned long mem_auto;
307 unsigned long long size;
307 int mem_auto_no; 308 int mem_auto_no;
308 int i; 309 int i;
309 310
@@ -321,9 +322,19 @@ static int __init xpram_setup_sizes(unsigned long pages)
321 mem_needed = 0; 322 mem_needed = 0;
322 mem_auto_no = 0; 323 mem_auto_no = 0;
323 for (i = 0; i < xpram_devs; i++) { 324 for (i = 0; i < xpram_devs; i++) {
324 if (sizes[i]) 325 if (sizes[i]) {
325 xpram_sizes[i] = 326 size = simple_strtoull(sizes[i], &sizes[i], 0);
326 (memparse(sizes[i], &sizes[i]) + 3) & -4UL; 327 switch (sizes[i][0]) {
328 case 'g':
329 case 'G':
330 size <<= 20;
331 break;
332 case 'm':
333 case 'M':
334 size <<= 10;
335 }
336 xpram_sizes[i] = (size + 3) & -4UL;
337 }
327 if (xpram_sizes[i]) 338 if (xpram_sizes[i])
328 mem_needed += xpram_sizes[i]; 339 mem_needed += xpram_sizes[i];
329 else 340 else
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 95e285b2e25c..7a84014f2037 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1106,10 +1106,10 @@ raw3270_delete_device(struct raw3270 *rp)
1106 1106
1107 /* Remove from device chain. */ 1107 /* Remove from device chain. */
1108 mutex_lock(&raw3270_mutex); 1108 mutex_lock(&raw3270_mutex);
1109 if (rp->clttydev) 1109 if (rp->clttydev && !IS_ERR(rp->clttydev))
1110 class_device_destroy(class3270, 1110 class_device_destroy(class3270,
1111 MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1111 MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1112 if (rp->cltubdev) 1112 if (rp->cltubdev && !IS_ERR(rp->cltubdev))
1113 class_device_destroy(class3270, 1113 class_device_destroy(class3270,
1114 MKDEV(IBM_FS3270_MAJOR, rp->minor)); 1114 MKDEV(IBM_FS3270_MAJOR, rp->minor));
1115 list_del_init(&rp->list); 1115 list_del_init(&rp->list);
@@ -1173,21 +1173,37 @@ static struct attribute_group raw3270_attr_group = {
1173 .attrs = raw3270_attrs, 1173 .attrs = raw3270_attrs,
1174}; 1174};
1175 1175
1176static void 1176static int raw3270_create_attributes(struct raw3270 *rp)
1177raw3270_create_attributes(struct raw3270 *rp)
1178{ 1177{
1179 //FIXME: check return code 1178 int rc;
1180 sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1179
1181 rp->clttydev = 1180 rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1182 class_device_create(class3270, NULL, 1181 if (rc)
1183 MKDEV(IBM_TTY3270_MAJOR, rp->minor), 1182 goto out;
1184 &rp->cdev->dev, "tty%s", 1183
1185 rp->cdev->dev.bus_id); 1184 rp->clttydev = class_device_create(class3270, NULL,
1186 rp->cltubdev = 1185 MKDEV(IBM_TTY3270_MAJOR, rp->minor),
1187 class_device_create(class3270, NULL, 1186 &rp->cdev->dev, "tty%s",
1188 MKDEV(IBM_FS3270_MAJOR, rp->minor), 1187 rp->cdev->dev.bus_id);
1189 &rp->cdev->dev, "tub%s", 1188 if (IS_ERR(rp->clttydev)) {
1190 rp->cdev->dev.bus_id); 1189 rc = PTR_ERR(rp->clttydev);
1190 goto out_ttydev;
1191 }
1192
1193 rp->cltubdev = class_device_create(class3270, NULL,
1194 MKDEV(IBM_FS3270_MAJOR, rp->minor),
1195 &rp->cdev->dev, "tub%s",
1196 rp->cdev->dev.bus_id);
1197 if (!IS_ERR(rp->cltubdev))
1198 goto out;
1199
1200 rc = PTR_ERR(rp->cltubdev);
1201 class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1202
1203out_ttydev:
1204 sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1205out:
1206 return rc;
1191} 1207}
1192 1208
1193/* 1209/*
@@ -1255,7 +1271,9 @@ raw3270_set_online (struct ccw_device *cdev)
1255 rc = raw3270_reset_device(rp); 1271 rc = raw3270_reset_device(rp);
1256 if (rc) 1272 if (rc)
1257 goto failure; 1273 goto failure;
1258 raw3270_create_attributes(rp); 1274 rc = raw3270_create_attributes(rp);
1275 if (rc)
1276 goto failure;
1259 set_bit(RAW3270_FLAGS_READY, &rp->flags); 1277 set_bit(RAW3270_FLAGS_READY, &rp->flags);
1260 mutex_lock(&raw3270_mutex); 1278 mutex_lock(&raw3270_mutex);
1261 list_for_each_entry(np, &raw3270_notifier, list) 1279 list_for_each_entry(np, &raw3270_notifier, list)
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index a5c68e60fcf4..643b6d078563 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -76,14 +76,22 @@ struct tape_class_device *register_tape_dev(
76 device, 76 device,
77 "%s", tcd->device_name 77 "%s", tcd->device_name
78 ); 78 );
79 sysfs_create_link( 79 rc = PTR_ERR(tcd->class_device);
80 if (rc)
81 goto fail_with_cdev;
82 rc = sysfs_create_link(
80 &device->kobj, 83 &device->kobj,
81 &tcd->class_device->kobj, 84 &tcd->class_device->kobj,
82 tcd->mode_name 85 tcd->mode_name
83 ); 86 );
87 if (rc)
88 goto fail_with_class_device;
84 89
85 return tcd; 90 return tcd;
86 91
92fail_with_class_device:
93 class_device_destroy(tape_class, tcd->char_device->dev);
94
87fail_with_cdev: 95fail_with_cdev:
88 cdev_del(tcd->char_device); 96 cdev_del(tcd->char_device);
89 97
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 122b4d8965c3..2826aed91043 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -543,20 +543,24 @@ int
543tape_generic_probe(struct ccw_device *cdev) 543tape_generic_probe(struct ccw_device *cdev)
544{ 544{
545 struct tape_device *device; 545 struct tape_device *device;
546 int ret;
546 547
547 device = tape_alloc_device(); 548 device = tape_alloc_device();
548 if (IS_ERR(device)) 549 if (IS_ERR(device))
549 return -ENODEV; 550 return -ENODEV;
550 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); 551 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
552 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
553 if (ret) {
554 tape_put_device(device);
555 PRINT_ERR("probe failed for tape device %s\n", cdev->dev.bus_id);
556 return ret;
557 }
551 cdev->dev.driver_data = device; 558 cdev->dev.driver_data = device;
559 cdev->handler = __tape_do_irq;
552 device->cdev = cdev; 560 device->cdev = cdev;
553 device->cdev_id = busid_to_int(cdev->dev.bus_id); 561 device->cdev_id = busid_to_int(cdev->dev.bus_id);
554 cdev->handler = __tape_do_irq; 562 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id);
555 563 return ret;
556 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
557 sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
558
559 return 0;
560} 564}
561 565
562static inline void 566static inline void
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index f26a2ee3aad8..3cba6c9fab11 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -152,7 +152,6 @@ ccwgroup_create(struct device *root,
152 struct ccwgroup_device *gdev; 152 struct ccwgroup_device *gdev;
153 int i; 153 int i;
154 int rc; 154 int rc;
155 int del_drvdata;
156 155
157 if (argc > 256) /* disallow dumb users */ 156 if (argc > 256) /* disallow dumb users */
158 return -EINVAL; 157 return -EINVAL;
@@ -163,7 +162,6 @@ ccwgroup_create(struct device *root,
163 162
164 atomic_set(&gdev->onoff, 0); 163 atomic_set(&gdev->onoff, 0);
165 164
166 del_drvdata = 0;
167 for (i = 0; i < argc; i++) { 165 for (i = 0; i < argc; i++) {
168 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 166 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
169 167
@@ -180,10 +178,8 @@ ccwgroup_create(struct device *root,
180 rc = -EINVAL; 178 rc = -EINVAL;
181 goto free_dev; 179 goto free_dev;
182 } 180 }
183 }
184 for (i = 0; i < argc; i++)
185 gdev->cdev[i]->dev.driver_data = gdev; 181 gdev->cdev[i]->dev.driver_data = gdev;
186 del_drvdata = 1; 182 }
187 183
188 gdev->creator_id = creator_id; 184 gdev->creator_id = creator_id;
189 gdev->count = argc; 185 gdev->count = argc;
@@ -226,9 +222,9 @@ error:
226free_dev: 222free_dev:
227 for (i = 0; i < argc; i++) 223 for (i = 0; i < argc; i++)
228 if (gdev->cdev[i]) { 224 if (gdev->cdev[i]) {
229 put_device(&gdev->cdev[i]->dev); 225 if (gdev->cdev[i]->dev.driver_data == gdev)
230 if (del_drvdata)
231 gdev->cdev[i]->dev.driver_data = NULL; 226 gdev->cdev[i]->dev.driver_data = NULL;
227 put_device(&gdev->cdev[i]->dev);
232 } 228 }
233 kfree(gdev); 229 kfree(gdev);
234 return rc; 230 return rc;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 0df3af1f08de..828b2d334f0a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1068,6 +1068,7 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr,
1068 if (count) { 1068 if (count) {
1069 interval = cmb_data->last_update - 1069 interval = cmb_data->last_update -
1070 cdev->private->cmb_start_time; 1070 cdev->private->cmb_start_time;
1071 interval = (interval * 1000) >> 12;
1071 interval /= count; 1072 interval /= count;
1072 } else 1073 } else
1073 interval = -1; 1074 interval = -1;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ac6e0c7e43d9..7a39e0b0386c 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
152 if (cdev->private->iretry) { 152 if (cdev->private->iretry) {
153 cdev->private->iretry--; 153 cdev->private->iretry--;
154 ret = cio_halt(sch); 154 ret = cio_halt(sch);
155 return (ret == 0) ? -EBUSY : ret; 155 if (ret != -EBUSY)
156 return (ret == 0) ? -EBUSY : ret;
156 } 157 }
157 /* halt io unsuccessful. */ 158 /* halt io unsuccessful. */
158 cdev->private->iretry = 255; /* 255 clear retries. */ 159 cdev->private->iretry = 255; /* 255 clear retries. */
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 20c8eb16f464..8a4b58120146 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -2686,9 +2686,17 @@ static struct attribute_group ctc_attr_group = {
2686static int 2686static int
2687ctc_add_attributes(struct device *dev) 2687ctc_add_attributes(struct device *dev)
2688{ 2688{
2689 device_create_file(dev, &dev_attr_loglevel); 2689 int rc;
2690 device_create_file(dev, &dev_attr_stats); 2690
2691 return 0; 2691 rc = device_create_file(dev, &dev_attr_loglevel);
2692 if (rc)
2693 goto out;
2694 rc = device_create_file(dev, &dev_attr_stats);
2695 if (!rc)
2696 goto out;
2697 device_remove_file(dev, &dev_attr_loglevel);
2698out:
2699 return rc;
2692} 2700}
2693 2701
2694static void 2702static void
@@ -2901,7 +2909,12 @@ ctc_new_device(struct ccwgroup_device *cgdev)
2901 goto out; 2909 goto out;
2902 } 2910 }
2903 2911
2904 ctc_add_attributes(&cgdev->dev); 2912 if (ctc_add_attributes(&cgdev->dev)) {
2913 ctc_netdev_unregister(dev);
2914 dev->priv = NULL;
2915 ctc_free_netdevice(dev, 1);
2916 goto out;
2917 }
2905 2918
2906 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name)); 2919 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2907 2920
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 103c41470bd2..5fff1f93973a 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -8451,10 +8451,11 @@ __qeth_reboot_event_card(struct device *dev, void *data)
8451static int 8451static int
8452qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) 8452qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
8453{ 8453{
8454 int ret;
8454 8455
8455 driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL, 8456 ret = driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL,
8456 __qeth_reboot_event_card); 8457 __qeth_reboot_event_card);
8457 return NOTIFY_DONE; 8458 return ret ? NOTIFY_BAD : NOTIFY_DONE;
8458} 8459}
8459 8460
8460 8461
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 16b59773c0bb..935952ef88f1 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -233,7 +233,7 @@ static void __init build_one_sbus(struct device_node *dp, int num_sbus)
233 sbus->ofdev.node = dp; 233 sbus->ofdev.node = dp;
234 sbus->ofdev.dev.parent = NULL; 234 sbus->ofdev.dev.parent = NULL;
235 sbus->ofdev.dev.bus = &sbus_bus_type; 235 sbus->ofdev.dev.bus = &sbus_bus_type;
236 strcpy(sbus->ofdev.dev.bus_id, dp->path_component_name); 236 sprintf(sbus->ofdev.dev.bus_id, "sbus%d", num_sbus);
237 237
238 if (of_device_register(&sbus->ofdev) != 0) 238 if (of_device_register(&sbus->ofdev) != 0)
239 printk(KERN_DEBUG "sbus: device registration error for %s!\n", 239 printk(KERN_DEBUG "sbus: device registration error for %s!\n",
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c
index c690c2b89e41..acf292736b4e 100644
--- a/drivers/scsi/53c7xx.c
+++ b/drivers/scsi/53c7xx.c
@@ -3451,12 +3451,12 @@ create_cmd (Scsi_Cmnd *cmd) {
3451 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4, 3451 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
3452 cmd_dataout += 4, ++i) { 3452 cmd_dataout += 4, ++i) {
3453 u32 vbuf = cmd->use_sg 3453 u32 vbuf = cmd->use_sg
3454 ? (u32)page_address(((struct scatterlist *)cmd->buffer)[i].page)+ 3454 ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+
3455 ((struct scatterlist *)cmd->buffer)[i].offset 3455 ((struct scatterlist *)cmd->request_buffer)[i].offset
3456 : (u32)(cmd->request_buffer); 3456 : (u32)(cmd->request_buffer);
3457 u32 bbuf = virt_to_bus((void *)vbuf); 3457 u32 bbuf = virt_to_bus((void *)vbuf);
3458 u32 count = cmd->use_sg ? 3458 u32 count = cmd->use_sg ?
3459 ((struct scatterlist *)cmd->buffer)[i].length : 3459 ((struct scatterlist *)cmd->request_buffer)[i].length :
3460 cmd->request_bufflen; 3460 cmd->request_bufflen;
3461 3461
3462 /* 3462 /*
@@ -5417,7 +5417,7 @@ insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
5417 5417
5418 if ((buffers = cmd->use_sg)) { 5418 if ((buffers = cmd->use_sg)) {
5419 for (offset = 0, 5419 for (offset = 0,
5420 segment = (struct scatterlist *) cmd->buffer; 5420 segment = (struct scatterlist *) cmd->request_buffer;
5421 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) && 5421 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) &&
5422 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length))))); 5422 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length)))));
5423 --buffers, offset += segment->length, ++segment) 5423 --buffers, offset += segment->length, ++segment)
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 8a4659e94105..bdc6bb262bce 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -911,7 +911,7 @@ static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
911 sp->SCp.ptr = 911 sp->SCp.ptr =
912 (char *) virt_to_phys(sp->request_buffer); 912 (char *) virt_to_phys(sp->request_buffer);
913 } else { 913 } else {
914 sp->SCp.buffer = (struct scatterlist *) sp->buffer; 914 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
915 sp->SCp.buffers_residual = sp->use_sg - 1; 915 sp->SCp.buffers_residual = sp->use_sg - 1;
916 sp->SCp.this_residual = sp->SCp.buffer->length; 916 sp->SCp.this_residual = sp->SCp.buffer->length;
917 if (esp->dma_mmu_get_scsi_sgl) 917 if (esp->dma_mmu_get_scsi_sgl)
@@ -2152,29 +2152,23 @@ static int esp_do_data_finale(struct NCR_ESP *esp,
2152 */ 2152 */
2153static int esp_should_clear_sync(Scsi_Cmnd *sp) 2153static int esp_should_clear_sync(Scsi_Cmnd *sp)
2154{ 2154{
2155 unchar cmd1 = sp->cmnd[0]; 2155 unchar cmd = sp->cmnd[0];
2156 unchar cmd2 = sp->data_cmnd[0];
2157 2156
2158 /* These cases are for spinning up a disk and 2157 /* These cases are for spinning up a disk and
2159 * waiting for that spinup to complete. 2158 * waiting for that spinup to complete.
2160 */ 2159 */
2161 if(cmd1 == START_STOP || 2160 if(cmd == START_STOP)
2162 cmd2 == START_STOP)
2163 return 0; 2161 return 0;
2164 2162
2165 if(cmd1 == TEST_UNIT_READY || 2163 if(cmd == TEST_UNIT_READY)
2166 cmd2 == TEST_UNIT_READY)
2167 return 0; 2164 return 0;
2168 2165
2169 /* One more special case for SCSI tape drives, 2166 /* One more special case for SCSI tape drives,
2170 * this is what is used to probe the device for 2167 * this is what is used to probe the device for
2171 * completion of a rewind or tape load operation. 2168 * completion of a rewind or tape load operation.
2172 */ 2169 */
2173 if(sp->device->type == TYPE_TAPE) { 2170 if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE)
2174 if(cmd1 == MODE_SENSE || 2171 return 0;
2175 cmd2 == MODE_SENSE)
2176 return 0;
2177 }
2178 2172
2179 return 1; 2173 return 1;
2180} 2174}
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index a06f547e87f7..d05681f9d81a 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -114,7 +114,7 @@ MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
114MODULE_LICENSE("GPL"); 114MODULE_LICENSE("GPL");
115module_param(NCR_D700, charp, 0); 115module_param(NCR_D700, charp, 0);
116 116
117static __u8 __initdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = 117static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =
118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 }; 118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
119 119
120#ifdef MODULE 120#ifdef MODULE
@@ -173,7 +173,7 @@ struct NCR_D700_private {
173 char pad; 173 char pad;
174}; 174};
175 175
176static int 176static int __devinit
177NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, 177NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
178 int slot, u32 region, int differential) 178 int slot, u32 region, int differential)
179{ 179{
@@ -243,7 +243,7 @@ NCR_D700_intr(int irq, void *data, struct pt_regs *regs)
243 * essentially connectecd to the MCA bus independently, it is easier 243 * essentially connectecd to the MCA bus independently, it is easier
244 * to set them up as two separate host adapters, rather than one 244 * to set them up as two separate host adapters, rather than one
245 * adapter with two channels */ 245 * adapter with two channels */
246static int 246static int __devinit
247NCR_D700_probe(struct device *dev) 247NCR_D700_probe(struct device *dev)
248{ 248{
249 struct NCR_D700_private *p; 249 struct NCR_D700_private *p;
@@ -329,7 +329,7 @@ NCR_D700_probe(struct device *dev)
329 for (i = 0; i < 2; i++) { 329 for (i = 0; i < 2; i++) {
330 int err; 330 int err;
331 331
332 if ((err = NCR_D700_probe_one(p, i, slot, irq, 332 if ((err = NCR_D700_probe_one(p, i, irq, slot,
333 offset_addr + (0x80 * i), 333 offset_addr + (0x80 * i),
334 differential)) != 0) 334 differential)) != 0)
335 printk("D700: SIOP%d: probe failed, error = %d\n", 335 printk("D700: SIOP%d: probe failed, error = %d\n",
@@ -349,7 +349,7 @@ NCR_D700_probe(struct device *dev)
349 return 0; 349 return 0;
350} 350}
351 351
352static void 352static void __devexit
353NCR_D700_remove_one(struct Scsi_Host *host) 353NCR_D700_remove_one(struct Scsi_Host *host)
354{ 354{
355 scsi_remove_host(host); 355 scsi_remove_host(host);
@@ -359,7 +359,7 @@ NCR_D700_remove_one(struct Scsi_Host *host)
359 release_region(host->base, 64); 359 release_region(host->base, 64);
360} 360}
361 361
362static int 362static int __devexit
363NCR_D700_remove(struct device *dev) 363NCR_D700_remove(struct device *dev)
364{ 364{
365 struct NCR_D700_private *p = dev_get_drvdata(dev); 365 struct NCR_D700_private *p = dev_get_drvdata(dev);
@@ -380,7 +380,7 @@ static struct mca_driver NCR_D700_driver = {
380 .name = "NCR_D700", 380 .name = "NCR_D700",
381 .bus = &mca_bus_type, 381 .bus = &mca_bus_type,
382 .probe = NCR_D700_probe, 382 .probe = NCR_D700_probe,
383 .remove = NCR_D700_remove, 383 .remove = __devexit_p(NCR_D700_remove),
384 }, 384 },
385}; 385};
386 386
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 36e63f82d9f8..f974869ea323 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -551,6 +551,11 @@ struct aha152x_hostdata {
551struct aha152x_scdata { 551struct aha152x_scdata {
552 Scsi_Cmnd *next; /* next sc in queue */ 552 Scsi_Cmnd *next; /* next sc in queue */
553 struct semaphore *sem; /* semaphore to block on */ 553 struct semaphore *sem; /* semaphore to block on */
554 unsigned char cmd_len;
555 unsigned char cmnd[MAX_COMMAND_SIZE];
556 unsigned short use_sg;
557 unsigned request_bufflen;
558 void *request_buffer;
554}; 559};
555 560
556 561
@@ -1006,11 +1011,20 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p
1006 return FAILED; 1011 return FAILED;
1007 } 1012 }
1008 } else { 1013 } else {
1014 struct aha152x_scdata *sc;
1015
1009 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); 1016 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
1010 if(SCpnt->host_scribble==0) { 1017 if(SCpnt->host_scribble==0) {
1011 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); 1018 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt));
1012 return FAILED; 1019 return FAILED;
1013 } 1020 }
1021
1022 sc = SCDATA(SCpnt);
1023 memcpy(sc->cmnd, SCpnt->cmnd, sizeof(sc->cmnd));
1024 sc->request_buffer = SCpnt->request_buffer;
1025 sc->request_bufflen = SCpnt->request_bufflen;
1026 sc->use_sg = SCpnt->use_sg;
1027 sc->cmd_len = SCpnt->cmd_len;
1014 } 1028 }
1015 1029
1016 SCNEXT(SCpnt) = NULL; 1030 SCNEXT(SCpnt) = NULL;
@@ -1165,6 +1179,10 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1165 DECLARE_MUTEX_LOCKED(sem); 1179 DECLARE_MUTEX_LOCKED(sem);
1166 struct timer_list timer; 1180 struct timer_list timer;
1167 int ret, issued, disconnected; 1181 int ret, issued, disconnected;
1182 unsigned char old_cmd_len = SCpnt->cmd_len;
1183 unsigned short old_use_sg = SCpnt->use_sg;
1184 void *old_buffer = SCpnt->request_buffer;
1185 unsigned old_bufflen = SCpnt->request_bufflen;
1168 unsigned long flags; 1186 unsigned long flags;
1169 1187
1170#if defined(AHA152X_DEBUG) 1188#if defined(AHA152X_DEBUG)
@@ -1198,11 +1216,11 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1198 add_timer(&timer); 1216 add_timer(&timer);
1199 down(&sem); 1217 down(&sem);
1200 del_timer(&timer); 1218 del_timer(&timer);
1201 1219
1202 SCpnt->cmd_len = SCpnt->old_cmd_len; 1220 SCpnt->cmd_len = old_cmd_len;
1203 SCpnt->use_sg = SCpnt->old_use_sg; 1221 SCpnt->use_sg = old_use_sg;
1204 SCpnt->request_buffer = SCpnt->buffer; 1222 SCpnt->request_buffer = old_buffer;
1205 SCpnt->request_bufflen = SCpnt->bufflen; 1223 SCpnt->request_bufflen = old_bufflen;
1206 1224
1207 DO_LOCK(flags); 1225 DO_LOCK(flags);
1208 1226
@@ -1565,6 +1583,9 @@ static void busfree_run(struct Scsi_Host *shpnt)
1565#endif 1583#endif
1566 1584
1567 if(DONE_SC->SCp.phase & check_condition) { 1585 if(DONE_SC->SCp.phase & check_condition) {
1586 struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
1587 struct aha152x_scdata *sc = SCDATA(cmd);
1588
1568#if 0 1589#if 0
1569 if(HOSTDATA(shpnt)->debug & debug_eh) { 1590 if(HOSTDATA(shpnt)->debug & debug_eh) {
1570 printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC)); 1591 printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC));
@@ -1573,13 +1594,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
1573#endif 1594#endif
1574 1595
1575 /* restore old command */ 1596 /* restore old command */
1576 memcpy((void *) DONE_SC->cmnd, (void *) DONE_SC->data_cmnd, sizeof(DONE_SC->data_cmnd)); 1597 memcpy(cmd->cmnd, sc->cmnd, sizeof(sc->cmnd));
1577 DONE_SC->request_buffer = DONE_SC->buffer; 1598 cmd->request_buffer = sc->request_buffer;
1578 DONE_SC->request_bufflen = DONE_SC->bufflen; 1599 cmd->request_bufflen = sc->request_bufflen;
1579 DONE_SC->use_sg = DONE_SC->old_use_sg; 1600 cmd->use_sg = sc->use_sg;
1580 DONE_SC->cmd_len = DONE_SC->old_cmd_len; 1601 cmd->cmd_len = sc->cmd_len;
1581 1602
1582 DONE_SC->SCp.Status = 0x02; 1603 cmd->SCp.Status = 0x02;
1583 1604
1584 HOSTDATA(shpnt)->commands--; 1605 HOSTDATA(shpnt)->commands--;
1585 if (!HOSTDATA(shpnt)->commands) 1606 if (!HOSTDATA(shpnt)->commands)
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index a1e8ca758594..653818d2f802 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -7289,7 +7289,7 @@ ahd_reset_cmds_pending(struct ahd_softc *ahd)
7289 ahd->flags &= ~AHD_UPDATE_PEND_CMDS; 7289 ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
7290} 7290}
7291 7291
7292void 7292static void
7293ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) 7293ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status)
7294{ 7294{
7295 cam_status ostat; 7295 cam_status ostat;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index b244c7124179..998999c0a972 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -243,25 +243,6 @@ ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
243static uint32_t aic79xx_no_reset; 243static uint32_t aic79xx_no_reset;
244 244
245/* 245/*
246 * Certain PCI motherboards will scan PCI devices from highest to lowest,
247 * others scan from lowest to highest, and they tend to do all kinds of
248 * strange things when they come into contact with PCI bridge chips. The
249 * net result of all this is that the PCI card that is actually used to boot
250 * the machine is very hard to detect. Most motherboards go from lowest
251 * PCI slot number to highest, and the first SCSI controller found is the
252 * one you boot from. The only exceptions to this are when a controller
253 * has its BIOS disabled. So, we by default sort all of our SCSI controllers
254 * from lowest PCI slot number to highest PCI slot number. We also force
255 * all controllers with their BIOS disabled to the end of the list. This
256 * works on *almost* all computers. Where it doesn't work, we have this
257 * option. Setting this option to non-0 will reverse the order of the sort
258 * to highest first, then lowest, but will still leave cards with their BIOS
259 * disabled at the very end. That should fix everyone up unless there are
260 * really strange cirumstances.
261 */
262static uint32_t aic79xx_reverse_scan;
263
264/*
265 * Should we force EXTENDED translation on a controller. 246 * Should we force EXTENDED translation on a controller.
266 * 0 == Use whatever is in the SEEPROM or default to off 247 * 0 == Use whatever is in the SEEPROM or default to off
267 * 1 == Use whatever is in the SEEPROM or default to on 248 * 1 == Use whatever is in the SEEPROM or default to on
@@ -350,7 +331,6 @@ MODULE_PARM_DESC(aic79xx,
350" periodically to prevent tag starvation.\n" 331" periodically to prevent tag starvation.\n"
351" This may be required by some older disk\n" 332" This may be required by some older disk\n"
352" or drives/RAID arrays.\n" 333" or drives/RAID arrays.\n"
353" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
354" tag_info:<tag_str> Set per-target tag depth\n" 334" tag_info:<tag_str> Set per-target tag depth\n"
355" global_tag_depth:<int> Global tag depth for all targets on all buses\n" 335" global_tag_depth:<int> Global tag depth for all targets on all buses\n"
356" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" 336" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
@@ -1031,7 +1011,6 @@ aic79xx_setup(char *s)
1031#ifdef AHD_DEBUG 1011#ifdef AHD_DEBUG
1032 { "debug", &ahd_debug }, 1012 { "debug", &ahd_debug },
1033#endif 1013#endif
1034 { "reverse_scan", &aic79xx_reverse_scan },
1035 { "periodic_otag", &aic79xx_periodic_otag }, 1014 { "periodic_otag", &aic79xx_periodic_otag },
1036 { "pci_parity", &aic79xx_pci_parity }, 1015 { "pci_parity", &aic79xx_pci_parity },
1037 { "seltime", &aic79xx_seltime }, 1016 { "seltime", &aic79xx_seltime },
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index debf3e2a0798..aa4be8a31415 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -353,7 +353,6 @@ MODULE_PARM_DESC(aic7xxx,
353" periodically to prevent tag starvation.\n" 353" periodically to prevent tag starvation.\n"
354" This may be required by some older disk\n" 354" This may be required by some older disk\n"
355" drives or RAID arrays.\n" 355" drives or RAID arrays.\n"
356" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
357" tag_info:<tag_str> Set per-target tag depth\n" 356" tag_info:<tag_str> Set per-target tag depth\n"
358" global_tag_depth:<int> Global tag depth for every target\n" 357" global_tag_depth:<int> Global tag depth for every target\n"
359" on every bus\n" 358" on every bus\n"
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 3e1053f111dc..4cf7afc31cc7 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2427,7 +2427,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt)
2427 info->stats.aborts += 1; 2427 info->stats.aborts += 1;
2428 2428
2429 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); 2429 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no);
2430 __scsi_print_command(SCpnt->data_cmnd); 2430 __scsi_print_command(SCpnt->cmnd);
2431 2431
2432 print_debug_list(); 2432 print_debug_list();
2433 fas216_dumpstate(info); 2433 fas216_dumpstate(info);
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 94b1261a259d..19745a31072b 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -105,9 +105,6 @@ enum {
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */ 105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ 106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ 107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
109 /* ICH6/7 use different scheme for map value */
110 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
111 108
112 /* combined mode. if set, PATA is channel 0. 109 /* combined mode. if set, PATA is channel 0.
113 * if clear, PATA is channel 1. 110 * if clear, PATA is channel 1.
@@ -126,6 +123,7 @@ enum {
126 ich6_sata = 4, 123 ich6_sata = 4,
127 ich6_sata_ahci = 5, 124 ich6_sata_ahci = 5,
128 ich6m_sata_ahci = 6, 125 ich6m_sata_ahci = 6,
126 ich8_sata_ahci = 7,
129 127
130 /* constants for mapping table */ 128 /* constants for mapping table */
131 P0 = 0, /* port 0 */ 129 P0 = 0, /* port 0 */
@@ -141,11 +139,19 @@ enum {
141 139
142struct piix_map_db { 140struct piix_map_db {
143 const u32 mask; 141 const u32 mask;
142 const u16 port_enable;
143 const int present_shift;
144 const int map[][4]; 144 const int map[][4];
145}; 145};
146 146
147struct piix_host_priv {
148 const int *map;
149 const struct piix_map_db *map_db;
150};
151
147static int piix_init_one (struct pci_dev *pdev, 152static int piix_init_one (struct pci_dev *pdev,
148 const struct pci_device_id *ent); 153 const struct pci_device_id *ent);
154static void piix_host_stop(struct ata_host_set *host_set);
149static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 155static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
150static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 156static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
151static void piix_pata_error_handler(struct ata_port *ap); 157static void piix_pata_error_handler(struct ata_port *ap);
@@ -186,11 +192,11 @@ static const struct pci_device_id piix_pci_tbl[] = {
186 /* Enterprise Southbridge 2 (where's the datasheet?) */ 192 /* Enterprise Southbridge 2 (where's the datasheet?) */
187 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 193 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
188 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */ 194 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
189 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 195 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
190 /* SATA Controller 2 IDE (ICH8, ditto) */ 196 /* SATA Controller 2 IDE (ICH8, ditto) */
191 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 197 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
192 /* Mobile SATA Controller IDE (ICH8M, ditto) */ 198 /* Mobile SATA Controller IDE (ICH8M, ditto) */
193 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, 199 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
194 200
195 { } /* terminate list */ 201 { } /* terminate list */
196}; 202};
@@ -254,7 +260,7 @@ static const struct ata_port_operations piix_pata_ops = {
254 260
255 .port_start = ata_port_start, 261 .port_start = ata_port_start,
256 .port_stop = ata_port_stop, 262 .port_stop = ata_port_stop,
257 .host_stop = ata_host_stop, 263 .host_stop = piix_host_stop,
258}; 264};
259 265
260static const struct ata_port_operations piix_sata_ops = { 266static const struct ata_port_operations piix_sata_ops = {
@@ -284,11 +290,13 @@ static const struct ata_port_operations piix_sata_ops = {
284 290
285 .port_start = ata_port_start, 291 .port_start = ata_port_start,
286 .port_stop = ata_port_stop, 292 .port_stop = ata_port_stop,
287 .host_stop = ata_host_stop, 293 .host_stop = piix_host_stop,
288}; 294};
289 295
290static struct piix_map_db ich5_map_db = { 296static const struct piix_map_db ich5_map_db = {
291 .mask = 0x7, 297 .mask = 0x7,
298 .port_enable = 0x3,
299 .present_shift = 4,
292 .map = { 300 .map = {
293 /* PM PS SM SS MAP */ 301 /* PM PS SM SS MAP */
294 { P0, NA, P1, NA }, /* 000b */ 302 { P0, NA, P1, NA }, /* 000b */
@@ -302,8 +310,10 @@ static struct piix_map_db ich5_map_db = {
302 }, 310 },
303}; 311};
304 312
305static struct piix_map_db ich6_map_db = { 313static const struct piix_map_db ich6_map_db = {
306 .mask = 0x3, 314 .mask = 0x3,
315 .port_enable = 0xf,
316 .present_shift = 4,
307 .map = { 317 .map = {
308 /* PM PS SM SS MAP */ 318 /* PM PS SM SS MAP */
309 { P0, P2, P1, P3 }, /* 00b */ 319 { P0, P2, P1, P3 }, /* 00b */
@@ -313,8 +323,10 @@ static struct piix_map_db ich6_map_db = {
313 }, 323 },
314}; 324};
315 325
316static struct piix_map_db ich6m_map_db = { 326static const struct piix_map_db ich6m_map_db = {
317 .mask = 0x3, 327 .mask = 0x3,
328 .port_enable = 0x5,
329 .present_shift = 4,
318 .map = { 330 .map = {
319 /* PM PS SM SS MAP */ 331 /* PM PS SM SS MAP */
320 { P0, P2, RV, RV }, /* 00b */ 332 { P0, P2, RV, RV }, /* 00b */
@@ -324,6 +336,28 @@ static struct piix_map_db ich6m_map_db = {
324 }, 336 },
325}; 337};
326 338
339static const struct piix_map_db ich8_map_db = {
340 .mask = 0x3,
341 .port_enable = 0x3,
342 .present_shift = 8,
343 .map = {
344 /* PM PS SM SS MAP */
345 { P0, NA, P1, NA }, /* 00b (hardwired) */
346 { RV, RV, RV, RV },
347 { RV, RV, RV, RV }, /* 10b (never) */
348 { RV, RV, RV, RV },
349 },
350};
351
352static const struct piix_map_db *piix_map_db_table[] = {
353 [ich5_sata] = &ich5_map_db,
354 [esb_sata] = &ich5_map_db,
355 [ich6_sata] = &ich6_map_db,
356 [ich6_sata_ahci] = &ich6_map_db,
357 [ich6m_sata_ahci] = &ich6m_map_db,
358 [ich8_sata_ahci] = &ich8_map_db,
359};
360
327static struct ata_port_info piix_port_info[] = { 361static struct ata_port_info piix_port_info[] = {
328 /* piix4_pata */ 362 /* piix4_pata */
329 { 363 {
@@ -356,63 +390,69 @@ static struct ata_port_info piix_port_info[] = {
356 /* ich5_sata */ 390 /* ich5_sata */
357 { 391 {
358 .sht = &piix_sht, 392 .sht = &piix_sht,
359 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | 393 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
360 PIIX_FLAG_CHECKINTR,
361 .pio_mask = 0x1f, /* pio0-4 */ 394 .pio_mask = 0x1f, /* pio0-4 */
362 .mwdma_mask = 0x07, /* mwdma0-2 */ 395 .mwdma_mask = 0x07, /* mwdma0-2 */
363 .udma_mask = 0x7f, /* udma0-6 */ 396 .udma_mask = 0x7f, /* udma0-6 */
364 .port_ops = &piix_sata_ops, 397 .port_ops = &piix_sata_ops,
365 .private_data = &ich5_map_db,
366 }, 398 },
367 399
368 /* i6300esb_sata */ 400 /* i6300esb_sata */
369 { 401 {
370 .sht = &piix_sht, 402 .sht = &piix_sht,
371 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | 403 .host_flags = ATA_FLAG_SATA |
372 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS, 404 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
373 .pio_mask = 0x1f, /* pio0-4 */ 405 .pio_mask = 0x1f, /* pio0-4 */
374 .mwdma_mask = 0x07, /* mwdma0-2 */ 406 .mwdma_mask = 0x07, /* mwdma0-2 */
375 .udma_mask = 0x7f, /* udma0-6 */ 407 .udma_mask = 0x7f, /* udma0-6 */
376 .port_ops = &piix_sata_ops, 408 .port_ops = &piix_sata_ops,
377 .private_data = &ich5_map_db,
378 }, 409 },
379 410
380 /* ich6_sata */ 411 /* ich6_sata */
381 { 412 {
382 .sht = &piix_sht, 413 .sht = &piix_sht,
383 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | 414 .host_flags = ATA_FLAG_SATA |
384 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR, 415 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
385 .pio_mask = 0x1f, /* pio0-4 */ 416 .pio_mask = 0x1f, /* pio0-4 */
386 .mwdma_mask = 0x07, /* mwdma0-2 */ 417 .mwdma_mask = 0x07, /* mwdma0-2 */
387 .udma_mask = 0x7f, /* udma0-6 */ 418 .udma_mask = 0x7f, /* udma0-6 */
388 .port_ops = &piix_sata_ops, 419 .port_ops = &piix_sata_ops,
389 .private_data = &ich6_map_db,
390 }, 420 },
391 421
392 /* ich6_sata_ahci */ 422 /* ich6_sata_ahci */
393 { 423 {
394 .sht = &piix_sht, 424 .sht = &piix_sht,
395 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | 425 .host_flags = ATA_FLAG_SATA |
396 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 426 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
397 PIIX_FLAG_AHCI, 427 PIIX_FLAG_AHCI,
398 .pio_mask = 0x1f, /* pio0-4 */ 428 .pio_mask = 0x1f, /* pio0-4 */
399 .mwdma_mask = 0x07, /* mwdma0-2 */ 429 .mwdma_mask = 0x07, /* mwdma0-2 */
400 .udma_mask = 0x7f, /* udma0-6 */ 430 .udma_mask = 0x7f, /* udma0-6 */
401 .port_ops = &piix_sata_ops, 431 .port_ops = &piix_sata_ops,
402 .private_data = &ich6_map_db,
403 }, 432 },
404 433
405 /* ich6m_sata_ahci */ 434 /* ich6m_sata_ahci */
406 { 435 {
407 .sht = &piix_sht, 436 .sht = &piix_sht,
408 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | 437 .host_flags = ATA_FLAG_SATA |
438 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
439 PIIX_FLAG_AHCI,
440 .pio_mask = 0x1f, /* pio0-4 */
441 .mwdma_mask = 0x07, /* mwdma0-2 */
442 .udma_mask = 0x7f, /* udma0-6 */
443 .port_ops = &piix_sata_ops,
444 },
445
446 /* ich8_sata_ahci */
447 {
448 .sht = &piix_sht,
449 .host_flags = ATA_FLAG_SATA |
409 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 450 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
410 PIIX_FLAG_AHCI, 451 PIIX_FLAG_AHCI,
411 .pio_mask = 0x1f, /* pio0-4 */ 452 .pio_mask = 0x1f, /* pio0-4 */
412 .mwdma_mask = 0x07, /* mwdma0-2 */ 453 .mwdma_mask = 0x07, /* mwdma0-2 */
413 .udma_mask = 0x7f, /* udma0-6 */ 454 .udma_mask = 0x7f, /* udma0-6 */
414 .port_ops = &piix_sata_ops, 455 .port_ops = &piix_sata_ops,
415 .private_data = &ich6m_map_db,
416 }, 456 },
417}; 457};
418 458
@@ -508,46 +548,29 @@ static void piix_pata_error_handler(struct ata_port *ap)
508static int piix_sata_prereset(struct ata_port *ap) 548static int piix_sata_prereset(struct ata_port *ap)
509{ 549{
510 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 550 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
511 const unsigned int *map = ap->host_set->private_data; 551 struct piix_host_priv *hpriv = ap->host_set->private_data;
552 const unsigned int *map = hpriv->map;
512 int base = 2 * ap->hard_port_no; 553 int base = 2 * ap->hard_port_no;
513 unsigned int present_mask = 0; 554 unsigned int present = 0;
514 int port, i; 555 int port, i;
515 u8 pcs; 556 u16 pcs;
516 557
517 pci_read_config_byte(pdev, ICH5_PCS, &pcs); 558 pci_read_config_word(pdev, ICH5_PCS, &pcs);
518 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base); 559 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
519 560
520 /* enable all ports on this ap and wait for them to settle */
521 for (i = 0; i < 2; i++) {
522 port = map[base + i];
523 if (port >= 0)
524 pcs |= 1 << port;
525 }
526
527 pci_write_config_byte(pdev, ICH5_PCS, pcs);
528 msleep(100);
529
530 /* let's see which devices are present */
531 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
532
533 for (i = 0; i < 2; i++) { 561 for (i = 0; i < 2; i++) {
534 port = map[base + i]; 562 port = map[base + i];
535 if (port < 0) 563 if (port < 0)
536 continue; 564 continue;
537 if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port)) 565 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
538 present_mask |= 1 << i; 566 (pcs & 1 << (hpriv->map_db->present_shift + port)))
539 else 567 present = 1;
540 pcs &= ~(1 << port);
541 } 568 }
542 569
543 /* disable offline ports on non-AHCI controllers */
544 if (!(ap->flags & PIIX_FLAG_AHCI))
545 pci_write_config_byte(pdev, ICH5_PCS, pcs);
546
547 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", 570 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
548 ap->id, pcs, present_mask); 571 ap->id, pcs, present_mask);
549 572
550 if (!present_mask) { 573 if (!present) {
551 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); 574 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
552 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; 575 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
553 return 0; 576 return 0;
@@ -761,10 +784,27 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
761 return no_piix_dma; 784 return no_piix_dma;
762} 785}
763 786
787static void __devinit piix_init_pcs(struct pci_dev *pdev,
788 const struct piix_map_db *map_db)
789{
790 u16 pcs, new_pcs;
791
792 pci_read_config_word(pdev, ICH5_PCS, &pcs);
793
794 new_pcs = pcs | map_db->port_enable;
795
796 if (new_pcs != pcs) {
797 DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
798 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
799 msleep(150);
800 }
801}
802
764static void __devinit piix_init_sata_map(struct pci_dev *pdev, 803static void __devinit piix_init_sata_map(struct pci_dev *pdev,
765 struct ata_port_info *pinfo) 804 struct ata_port_info *pinfo,
805 const struct piix_map_db *map_db)
766{ 806{
767 struct piix_map_db *map_db = pinfo[0].private_data; 807 struct piix_host_priv *hpriv = pinfo[0].private_data;
768 const unsigned int *map; 808 const unsigned int *map;
769 int i, invalid_map = 0; 809 int i, invalid_map = 0;
770 u8 map_value; 810 u8 map_value;
@@ -805,8 +845,8 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
805 dev_printk(KERN_ERR, &pdev->dev, 845 dev_printk(KERN_ERR, &pdev->dev,
806 "invalid MAP value %u\n", map_value); 846 "invalid MAP value %u\n", map_value);
807 847
808 pinfo[0].private_data = (void *)map; 848 hpriv->map = map;
809 pinfo[1].private_data = (void *)map; 849 hpriv->map_db = map_db;
810} 850}
811 851
812/** 852/**
@@ -829,6 +869,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
829 static int printed_version; 869 static int printed_version;
830 struct ata_port_info port_info[2]; 870 struct ata_port_info port_info[2];
831 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; 871 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
872 struct piix_host_priv *hpriv;
832 unsigned long host_flags; 873 unsigned long host_flags;
833 874
834 if (!printed_version++) 875 if (!printed_version++)
@@ -839,8 +880,14 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
839 if (!in_module_init) 880 if (!in_module_init)
840 return -ENODEV; 881 return -ENODEV;
841 882
883 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
884 if (!hpriv)
885 return -ENOMEM;
886
842 port_info[0] = piix_port_info[ent->driver_data]; 887 port_info[0] = piix_port_info[ent->driver_data];
843 port_info[1] = piix_port_info[ent->driver_data]; 888 port_info[1] = piix_port_info[ent->driver_data];
889 port_info[0].private_data = hpriv;
890 port_info[1].private_data = hpriv;
844 891
845 host_flags = port_info[0].host_flags; 892 host_flags = port_info[0].host_flags;
846 893
@@ -855,8 +902,11 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
855 } 902 }
856 903
857 /* Initialize SATA map */ 904 /* Initialize SATA map */
858 if (host_flags & ATA_FLAG_SATA) 905 if (host_flags & ATA_FLAG_SATA) {
859 piix_init_sata_map(pdev, port_info); 906 piix_init_sata_map(pdev, port_info,
907 piix_map_db_table[ent->driver_data]);
908 piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]);
909 }
860 910
861 /* On ICH5, some BIOSen disable the interrupt using the 911 /* On ICH5, some BIOSen disable the interrupt using the
862 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. 912 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
@@ -879,6 +929,13 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
879 return ata_pci_init_one(pdev, ppinfo, 2); 929 return ata_pci_init_one(pdev, ppinfo, 2);
880} 930}
881 931
932static void piix_host_stop(struct ata_host_set *host_set)
933{
934 if (host_set->next == NULL)
935 kfree(host_set->private_data);
936 ata_host_stop(host_set);
937}
938
882static int __init piix_init(void) 939static int __init piix_init(void)
883{ 940{
884 int rc; 941 int rc;
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 007a14e5c3fd..e397129c90d1 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -507,7 +507,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
507 */ 507 */
508 508
509 if (cmd->use_sg) { 509 if (cmd->use_sg) {
510 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 510 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
511 cmd->SCp.buffers_residual = cmd->use_sg - 1; 511 cmd->SCp.buffers_residual = cmd->use_sg - 1;
512 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+ 512 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+
513 cmd->SCp.buffer->offset; 513 cmd->SCp.buffer->offset;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index dddd2acce76f..61f6024b61ba 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -5,6 +5,7 @@
5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) 5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002)
6 * by D. Gilbert and aeb (20020609) 6 * by D. Gilbert and aeb (20020609)
7 * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025 7 * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025
8 * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702
8 */ 9 */
9 10
10#include <linux/blkdev.h> 11#include <linux/blkdev.h>
@@ -36,55 +37,56 @@ static const char * cdb_byte0_names[] = {
36/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", 37/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense",
37/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, 38/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL,
38 "Reasssign Blocks", 39 "Reasssign Blocks",
39/* 08-0d */ "Read (6)", NULL, "Write (6)", "Seek (6)", NULL, NULL, 40/* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL,
40/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", 41/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
41/* 13-16 */ "Verify (6)", "Recover Buffered Data", "Mode Select (6)", 42/* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)",
42 "Reserve (6)", 43 "Reserve(6)",
43/* 17-1a */ "Release (6)", "Copy", "Erase", "Mode Sense (6)", 44/* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)",
44/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", 45/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic",
45/* 1e-1f */ "Prevent/Allow Medium Removal", NULL, 46/* 1e-1f */ "Prevent/Allow Medium Removal", NULL,
46/* 20-22 */ NULL, NULL, NULL, 47/* 20-22 */ NULL, NULL, NULL,
47/* 23-28 */ "Read Format Capacities", "Set Window", 48/* 23-28 */ "Read Format Capacities", "Set Window",
48 "Read Capacity (10)", NULL, NULL, "Read (10)", 49 "Read Capacity(10)", NULL, NULL, "Read(10)",
49/* 29-2d */ "Read Generation", "Write (10)", "Seek (10)", "Erase (10)", 50/* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)",
50 "Read updated block", 51 "Read updated block",
51/* 2e-31 */ "Write Verify (10)", "Verify (10)", "Search High", "Search Equal", 52/* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal",
52/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", 53/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position",
53/* 35-37 */ "Synchronize Cache (10)", "Lock/Unlock Cache (10)", 54/* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)",
54 "Read Defect Data(10)", 55 "Read Defect Data(10)",
55/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", 56/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer",
56 "Read Buffer", 57 "Read Buffer",
57/* 3d-3f */ "Update Block", "Read Long (10)", "Write Long (10)", 58/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)",
58/* 40-41 */ "Change Definition", "Write Same (10)", 59/* 40-41 */ "Change Definition", "Write Same(10)",
59/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", 60/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support",
60 "Play audio (10)", "Get configuration", "Play audio msf", 61 "Play audio(10)", "Get configuration", "Play audio msf",
61 "Play audio track/index", 62 "Play audio track/index",
62/* 49-4f */ "Play track relative (10)", "Get event status notification", 63/* 49-4f */ "Play track relative(10)", "Get event status notification",
63 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", 64 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
64 NULL, 65 NULL,
65/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", 66/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info",
66 "Reserve track", "Send OPC info", "Mode Select (10)", 67 "Reserve track", "Send OPC info", "Mode Select(10)",
67/* 56-5b */ "Reserve (10)", "Release (10)", "Repair track", "Read master cue", 68/* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue",
68 "Mode Sense (10)", "Close track/session", 69 "Mode Sense(10)", "Close track/session",
69/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", 70/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in",
70 "Persistent reserve out", 71 "Persistent reserve out",
71/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 72/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
72/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 73/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
73/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 74/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
74/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length", 75/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length",
75/* 80-84 */ "Xdwrite (16)", "Rebuild (16)", "Regenerate (16)", "Extended copy", 76/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy",
76 "Receive copy results", 77 "Receive copy results",
77/* 85-89 */ "Memory Export In (16)", "Access control in", "Access control out", 78/* 85-89 */ "ATA command pass through(16)", "Access control in",
78 "Read (16)", "Memory Export Out (16)", 79 "Access control out", "Read(16)", "Memory Export Out(16)",
79/* 8a-8f */ "Write (16)", NULL, "Read attributes", "Write attributes", 80/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes",
80 "Write and verify (16)", "Verify (16)", 81 "Write and verify(16)", "Verify(16)",
81/* 90-94 */ "Pre-fetch (16)", "Synchronize cache (16)", 82/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)",
82 "Lock/unlock cache (16)", "Write same (16)", NULL, 83 "Lock/unlock cache(16)", "Write same(16)", NULL,
83/* 95-99 */ NULL, NULL, NULL, NULL, NULL, 84/* 95-99 */ NULL, NULL, NULL, NULL, NULL,
84/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in (16)", 85/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in(16)",
85 "Service action out (16)", 86 "Service action out(16)",
86/* a0-a5 */ "Report luns", "Blank", "Send event", "Maintenance in", 87/* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank",
87 "Maintenance out", "Move medium/play audio(12)", 88 "Security protocol in", "Maintenance in", "Maintenance out",
89 "Move medium/play audio(12)",
88/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", 90/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)",
89 "Play track relative(12)", 91 "Play track relative(12)",
90/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", 92/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance",
@@ -92,12 +94,12 @@ static const char * cdb_byte0_names[] = {
92/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", 94/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)",
93/* b2-b4 */ "Search data low(12)", "Set limits(12)", 95/* b2-b4 */ "Search data low(12)", "Set limits(12)",
94 "Read element status attached", 96 "Read element status attached",
95/* b5-b6 */ "Request volume element address", "Send volume tag, set streaming", 97/* b5-b6 */ "Security protocol out", "Send volume tag, set streaming",
96/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", 98/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf",
97/* ba-bc */ "Redundancy group (in), Scan", 99/* ba-bc */ "Redundancy group (in), Scan",
98 "Redundancy group (out), Set cd-rom speed", "Spare in, Play cd", 100 "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd",
99/* bd-bf */ "Spare out, Mechanism status", "Volume set in, Read cd", 101/* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd",
100 "Volume set out, Send DVD structure", 102 "Volume set (out), Send DVD structure",
101}; 103};
102 104
103struct value_name_pair { 105struct value_name_pair {
@@ -112,6 +114,7 @@ static const struct value_name_pair maint_in_arr[] = {
112 {0xc, "Report supported operation codes"}, 114 {0xc, "Report supported operation codes"},
113 {0xd, "Report supported task management functions"}, 115 {0xd, "Report supported task management functions"},
114 {0xe, "Report priority"}, 116 {0xe, "Report priority"},
117 {0xf, "Report timestamp"},
115}; 118};
116#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) 119#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr)
117 120
@@ -120,6 +123,7 @@ static const struct value_name_pair maint_out_arr[] = {
120 {0xa, "Set target port groups"}, 123 {0xa, "Set target port groups"},
121 {0xb, "Change aliases"}, 124 {0xb, "Change aliases"},
122 {0xe, "Set priority"}, 125 {0xe, "Set priority"},
126 {0xe, "Set timestamp"},
123}; 127};
124#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) 128#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr)
125 129
@@ -427,6 +431,7 @@ static struct error_info additional[] =
427 {0x001A, "Rewind operation in progress"}, 431 {0x001A, "Rewind operation in progress"},
428 {0x001B, "Set capacity operation in progress"}, 432 {0x001B, "Set capacity operation in progress"},
429 {0x001C, "Verify operation in progress"}, 433 {0x001C, "Verify operation in progress"},
434 {0x001D, "ATA pass through information available"},
430 435
431 {0x0100, "No index/sector signal"}, 436 {0x0100, "No index/sector signal"},
432 437
@@ -438,7 +443,7 @@ static struct error_info additional[] =
438 443
439 {0x0400, "Logical unit not ready, cause not reportable"}, 444 {0x0400, "Logical unit not ready, cause not reportable"},
440 {0x0401, "Logical unit is in process of becoming ready"}, 445 {0x0401, "Logical unit is in process of becoming ready"},
441 {0x0402, "Logical unit not ready, initializing cmd. required"}, 446 {0x0402, "Logical unit not ready, initializing command required"},
442 {0x0403, "Logical unit not ready, manual intervention required"}, 447 {0x0403, "Logical unit not ready, manual intervention required"},
443 {0x0404, "Logical unit not ready, format in progress"}, 448 {0x0404, "Logical unit not ready, format in progress"},
444 {0x0405, "Logical unit not ready, rebuild in progress"}, 449 {0x0405, "Logical unit not ready, rebuild in progress"},
@@ -478,6 +483,9 @@ static struct error_info additional[] =
478 {0x0B00, "Warning"}, 483 {0x0B00, "Warning"},
479 {0x0B01, "Warning - specified temperature exceeded"}, 484 {0x0B01, "Warning - specified temperature exceeded"},
480 {0x0B02, "Warning - enclosure degraded"}, 485 {0x0B02, "Warning - enclosure degraded"},
486 {0x0B03, "Warning - background self-test failed"},
487 {0x0B04, "Warning - background pre-scan detected medium error"},
488 {0x0B05, "Warning - background medium scan detected medium error"},
481 489
482 {0x0C00, "Write error"}, 490 {0x0C00, "Write error"},
483 {0x0C01, "Write error - recovered with auto reallocation"}, 491 {0x0C01, "Write error - recovered with auto reallocation"},
@@ -493,6 +501,7 @@ static struct error_info additional[] =
493 {0x0C0B, "Auxiliary memory write error"}, 501 {0x0C0B, "Auxiliary memory write error"},
494 {0x0C0C, "Write error - unexpected unsolicited data"}, 502 {0x0C0C, "Write error - unexpected unsolicited data"},
495 {0x0C0D, "Write error - not enough unsolicited data"}, 503 {0x0C0D, "Write error - not enough unsolicited data"},
504 {0x0C0F, "Defects in error window"},
496 505
497 {0x0D00, "Error detected by third party temporary initiator"}, 506 {0x0D00, "Error detected by third party temporary initiator"},
498 {0x0D01, "Third party device failure"}, 507 {0x0D01, "Third party device failure"},
@@ -504,11 +513,12 @@ static struct error_info additional[] =
504 {0x0E00, "Invalid information unit"}, 513 {0x0E00, "Invalid information unit"},
505 {0x0E01, "Information unit too short"}, 514 {0x0E01, "Information unit too short"},
506 {0x0E02, "Information unit too long"}, 515 {0x0E02, "Information unit too long"},
516 {0x0E03, "Invalid field in command information unit"},
507 517
508 {0x1000, "Id CRC or ECC error"}, 518 {0x1000, "Id CRC or ECC error"},
509 {0x1001, "Data block guard check failed"}, 519 {0x1001, "Logical block guard check failed"},
510 {0x1002, "Data block application tag check failed"}, 520 {0x1002, "Logical block application tag check failed"},
511 {0x1003, "Data block reference tag check failed"}, 521 {0x1003, "Logical block reference tag check failed"},
512 522
513 {0x1100, "Unrecovered read error"}, 523 {0x1100, "Unrecovered read error"},
514 {0x1101, "Read retries exhausted"}, 524 {0x1101, "Read retries exhausted"},
@@ -530,6 +540,7 @@ static struct error_info additional[] =
530 {0x1111, "Read error - loss of streaming"}, 540 {0x1111, "Read error - loss of streaming"},
531 {0x1112, "Auxiliary memory read error"}, 541 {0x1112, "Auxiliary memory read error"},
532 {0x1113, "Read error - failed retransmission request"}, 542 {0x1113, "Read error - failed retransmission request"},
543 {0x1114, "Read error - lba marked bad by application client"},
533 544
534 {0x1200, "Address mark not found for id field"}, 545 {0x1200, "Address mark not found for id field"},
535 546
@@ -610,11 +621,14 @@ static struct error_info additional[] =
610 {0x2100, "Logical block address out of range"}, 621 {0x2100, "Logical block address out of range"},
611 {0x2101, "Invalid element address"}, 622 {0x2101, "Invalid element address"},
612 {0x2102, "Invalid address for write"}, 623 {0x2102, "Invalid address for write"},
624 {0x2103, "Invalid write crossing layer jump"},
613 625
614 {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, 626 {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"},
615 627
616 {0x2400, "Invalid field in cdb"}, 628 {0x2400, "Invalid field in cdb"},
617 {0x2401, "CDB decryption error"}, 629 {0x2401, "CDB decryption error"},
630 {0x2402, "Obsolete"},
631 {0x2403, "Obsolete"},
618 {0x2404, "Security audit value frozen"}, 632 {0x2404, "Security audit value frozen"},
619 {0x2405, "Security working key frozen"}, 633 {0x2405, "Security working key frozen"},
620 {0x2406, "Nonce not unique"}, 634 {0x2406, "Nonce not unique"},
@@ -637,7 +651,10 @@ static struct error_info additional[] =
637 {0x260C, "Invalid operation for copy source or destination"}, 651 {0x260C, "Invalid operation for copy source or destination"},
638 {0x260D, "Copy segment granularity violation"}, 652 {0x260D, "Copy segment granularity violation"},
639 {0x260E, "Invalid parameter while port is enabled"}, 653 {0x260E, "Invalid parameter while port is enabled"},
640 {0x260F, "Invalid data-out buffer integrity"}, 654 {0x260F, "Invalid data-out buffer integrity check value"},
655 {0x2610, "Data decryption key fail limit reached"},
656 {0x2611, "Incomplete key-associated data set"},
657 {0x2612, "Vendor specific key reference not found"},
641 658
642 {0x2700, "Write protected"}, 659 {0x2700, "Write protected"},
643 {0x2701, "Hardware write protected"}, 660 {0x2701, "Hardware write protected"},
@@ -649,6 +666,7 @@ static struct error_info additional[] =
649 666
650 {0x2800, "Not ready to ready change, medium may have changed"}, 667 {0x2800, "Not ready to ready change, medium may have changed"},
651 {0x2801, "Import or export element accessed"}, 668 {0x2801, "Import or export element accessed"},
669 {0x2802, "Format-layer may have changed"},
652 670
653 {0x2900, "Power on, reset, or bus device reset occurred"}, 671 {0x2900, "Power on, reset, or bus device reset occurred"},
654 {0x2901, "Power on occurred"}, 672 {0x2901, "Power on occurred"},
@@ -669,6 +687,11 @@ static struct error_info additional[] =
669 {0x2A07, "Implicit asymmetric access state transition failed"}, 687 {0x2A07, "Implicit asymmetric access state transition failed"},
670 {0x2A08, "Priority changed"}, 688 {0x2A08, "Priority changed"},
671 {0x2A09, "Capacity data has changed"}, 689 {0x2A09, "Capacity data has changed"},
690 {0x2A10, "Timestamp changed"},
691 {0x2A11, "Data encryption parameters changed by another i_t nexus"},
692 {0x2A12, "Data encryption parameters changed by vendor specific "
693 "event"},
694 {0x2A13, "Data encryption key instance counter has changed"},
672 695
673 {0x2B00, "Copy cannot execute since host cannot disconnect"}, 696 {0x2B00, "Copy cannot execute since host cannot disconnect"},
674 697
@@ -690,6 +713,7 @@ static struct error_info additional[] =
690 {0x2E00, "Insufficient time for operation"}, 713 {0x2E00, "Insufficient time for operation"},
691 714
692 {0x2F00, "Commands cleared by another initiator"}, 715 {0x2F00, "Commands cleared by another initiator"},
716 {0x2F01, "Commands cleared by power loss notification"},
693 717
694 {0x3000, "Incompatible medium installed"}, 718 {0x3000, "Incompatible medium installed"},
695 {0x3001, "Cannot read medium - unknown format"}, 719 {0x3001, "Cannot read medium - unknown format"},
@@ -702,7 +726,8 @@ static struct error_info additional[] =
702 {0x3008, "Cannot write - application code mismatch"}, 726 {0x3008, "Cannot write - application code mismatch"},
703 {0x3009, "Current session not fixated for append"}, 727 {0x3009, "Current session not fixated for append"},
704 {0x300A, "Cleaning request rejected"}, 728 {0x300A, "Cleaning request rejected"},
705 {0x300C, "WORM medium, overwrite attempted"}, 729 {0x300C, "WORM medium - overwrite attempted"},
730 {0x300D, "WORM medium - integrity check"},
706 {0x3010, "Medium not formatted"}, 731 {0x3010, "Medium not formatted"},
707 732
708 {0x3100, "Medium format corrupted"}, 733 {0x3100, "Medium format corrupted"},
@@ -790,6 +815,9 @@ static struct error_info additional[] =
790 {0x3F0F, "Echo buffer overwritten"}, 815 {0x3F0F, "Echo buffer overwritten"},
791 {0x3F10, "Medium loadable"}, 816 {0x3F10, "Medium loadable"},
792 {0x3F11, "Medium auxiliary memory accessible"}, 817 {0x3F11, "Medium auxiliary memory accessible"},
818 {0x3F12, "iSCSI IP address added"},
819 {0x3F13, "iSCSI IP address removed"},
820 {0x3F14, "iSCSI IP address changed"},
793/* 821/*
794 * {0x40NN, "Ram failure"}, 822 * {0x40NN, "Ram failure"},
795 * {0x40NN, "Diagnostic failure on component nn"}, 823 * {0x40NN, "Diagnostic failure on component nn"},
@@ -799,6 +827,7 @@ static struct error_info additional[] =
799 {0x4300, "Message error"}, 827 {0x4300, "Message error"},
800 828
801 {0x4400, "Internal target failure"}, 829 {0x4400, "Internal target failure"},
830 {0x4471, "ATA device failed set features"},
802 831
803 {0x4500, "Select or reselect failure"}, 832 {0x4500, "Select or reselect failure"},
804 833
@@ -807,9 +836,10 @@ static struct error_info additional[] =
807 {0x4700, "Scsi parity error"}, 836 {0x4700, "Scsi parity error"},
808 {0x4701, "Data phase CRC error detected"}, 837 {0x4701, "Data phase CRC error detected"},
809 {0x4702, "Scsi parity error detected during st data phase"}, 838 {0x4702, "Scsi parity error detected during st data phase"},
810 {0x4703, "Information unit CRC error detected"}, 839 {0x4703, "Information unit iuCRC error detected"},
811 {0x4704, "Asynchronous information protection error detected"}, 840 {0x4704, "Asynchronous information protection error detected"},
812 {0x4705, "Protocol service CRC error"}, 841 {0x4705, "Protocol service CRC error"},
842 {0x4706, "Phy test function in progress"},
813 {0x477f, "Some commands cleared by iSCSI Protocol event"}, 843 {0x477f, "Some commands cleared by iSCSI Protocol event"},
814 844
815 {0x4800, "Initiator detected error message received"}, 845 {0x4800, "Initiator detected error message received"},
@@ -844,6 +874,8 @@ static struct error_info additional[] =
844 {0x5300, "Media load or eject failed"}, 874 {0x5300, "Media load or eject failed"},
845 {0x5301, "Unload tape failure"}, 875 {0x5301, "Unload tape failure"},
846 {0x5302, "Medium removal prevented"}, 876 {0x5302, "Medium removal prevented"},
877 {0x5303, "Medium removal prevented by data transfer element"},
878 {0x5304, "Medium thread or unthread failure"},
847 879
848 {0x5400, "Scsi to host system interface failure"}, 880 {0x5400, "Scsi to host system interface failure"},
849 881
@@ -855,6 +887,7 @@ static struct error_info additional[] =
855 {0x5505, "Insufficient access control resources"}, 887 {0x5505, "Insufficient access control resources"},
856 {0x5506, "Auxiliary memory out of space"}, 888 {0x5506, "Auxiliary memory out of space"},
857 {0x5507, "Quota error"}, 889 {0x5507, "Quota error"},
890 {0x5508, "Maximum number of supplemental decryption keys exceeded"},
858 891
859 {0x5700, "Unable to recover table-of-contents"}, 892 {0x5700, "Unable to recover table-of-contents"},
860 893
@@ -1004,6 +1037,7 @@ static struct error_info additional[] =
1004 {0x6708, "Assign failure occurred"}, 1037 {0x6708, "Assign failure occurred"},
1005 {0x6709, "Multiply assigned logical unit"}, 1038 {0x6709, "Multiply assigned logical unit"},
1006 {0x670A, "Set target port groups command failed"}, 1039 {0x670A, "Set target port groups command failed"},
1040 {0x670B, "ATA device feature not enabled"},
1007 1041
1008 {0x6800, "Logical unit not configured"}, 1042 {0x6800, "Logical unit not configured"},
1009 1043
@@ -1030,6 +1064,8 @@ static struct error_info additional[] =
1030 {0x6F03, "Read of scrambled sector without authentication"}, 1064 {0x6F03, "Read of scrambled sector without authentication"},
1031 {0x6F04, "Media region code is mismatched to logical unit region"}, 1065 {0x6F04, "Media region code is mismatched to logical unit region"},
1032 {0x6F05, "Drive region must be permanent/region reset count error"}, 1066 {0x6F05, "Drive region must be permanent/region reset count error"},
1067 {0x6F06, "Insufficient block count for binding nonce recording"},
1068 {0x6F07, "Conflict in binding nonce recording"},
1033/* 1069/*
1034 * {0x70NN, "Decompression exception short algorithm id of nn"}, 1070 * {0x70NN, "Decompression exception short algorithm id of nn"},
1035 */ 1071 */
@@ -1041,6 +1077,8 @@ static struct error_info additional[] =
1041 {0x7203, "Session fixation error - incomplete track in session"}, 1077 {0x7203, "Session fixation error - incomplete track in session"},
1042 {0x7204, "Empty or partially written reserved track"}, 1078 {0x7204, "Empty or partially written reserved track"},
1043 {0x7205, "No more track reservations allowed"}, 1079 {0x7205, "No more track reservations allowed"},
1080 {0x7206, "RMZ extension is not allowed"},
1081 {0x7207, "No more test zone extensions are allowed"},
1044 1082
1045 {0x7300, "Cd control error"}, 1083 {0x7300, "Cd control error"},
1046 {0x7301, "Power calibration area almost full"}, 1084 {0x7301, "Power calibration area almost full"},
@@ -1049,6 +1087,18 @@ static struct error_info additional[] =
1049 {0x7304, "Program memory area update failure"}, 1087 {0x7304, "Program memory area update failure"},
1050 {0x7305, "Program memory area is full"}, 1088 {0x7305, "Program memory area is full"},
1051 {0x7306, "RMA/PMA is almost full"}, 1089 {0x7306, "RMA/PMA is almost full"},
1090 {0x7310, "Current power calibration area almost full"},
1091 {0x7311, "Current power calibration area is full"},
1092 {0x7317, "RDZ is full"},
1093
1094 {0x7400, "Security error"},
1095 {0x7401, "Unable to decrypt data"},
1096 {0x7402, "Unencrypted data encountered while decrypting"},
1097 {0x7403, "Incorrect data encryption key"},
1098 {0x7404, "Cryptographic integrity validation failed"},
1099 {0x7405, "Error decrypting data"},
1100 {0x7471, "Logical unit access not authorized"},
1101
1052 {0, NULL} 1102 {0, NULL}
1053}; 1103};
1054 1104
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 10573c24a50b..98bd22714d0d 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1397,7 +1397,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1397 sp->SCp.ptr = NULL; 1397 sp->SCp.ptr = NULL;
1398 } 1398 }
1399 } else { 1399 } else {
1400 sp->SCp.buffer = (struct scatterlist *) sp->buffer; 1400 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
1401 sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, 1401 sp->SCp.buffers_residual = sbus_map_sg(esp->sdev,
1402 sp->SCp.buffer, 1402 sp->SCp.buffer,
1403 sp->use_sg, 1403 sp->use_sg,
@@ -1410,7 +1410,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1410static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) 1410static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1411{ 1411{
1412 if (sp->use_sg) { 1412 if (sp->use_sg) {
1413 sbus_unmap_sg(esp->sdev, sp->buffer, sp->use_sg, 1413 sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg,
1414 sp->sc_data_direction); 1414 sp->sc_data_direction);
1415 } else if (sp->request_bufflen) { 1415 } else if (sp->request_bufflen) {
1416 sbus_unmap_single(esp->sdev, 1416 sbus_unmap_single(esp->sdev,
@@ -2754,18 +2754,15 @@ static int esp_do_data_finale(struct esp *esp)
2754 */ 2754 */
2755static int esp_should_clear_sync(struct scsi_cmnd *sp) 2755static int esp_should_clear_sync(struct scsi_cmnd *sp)
2756{ 2756{
2757 u8 cmd1 = sp->cmnd[0]; 2757 u8 cmd = sp->cmnd[0];
2758 u8 cmd2 = sp->data_cmnd[0];
2759 2758
2760 /* These cases are for spinning up a disk and 2759 /* These cases are for spinning up a disk and
2761 * waiting for that spinup to complete. 2760 * waiting for that spinup to complete.
2762 */ 2761 */
2763 if (cmd1 == START_STOP || 2762 if (cmd == START_STOP)
2764 cmd2 == START_STOP)
2765 return 0; 2763 return 0;
2766 2764
2767 if (cmd1 == TEST_UNIT_READY || 2765 if (cmd == TEST_UNIT_READY)
2768 cmd2 == TEST_UNIT_READY)
2769 return 0; 2766 return 0;
2770 2767
2771 /* One more special case for SCSI tape drives, 2768 /* One more special case for SCSI tape drives,
@@ -2773,8 +2770,7 @@ static int esp_should_clear_sync(struct scsi_cmnd *sp)
2773 * completion of a rewind or tape load operation. 2770 * completion of a rewind or tape load operation.
2774 */ 2771 */
2775 if (sp->device->type == TYPE_TAPE) { 2772 if (sp->device->type == TYPE_TAPE) {
2776 if (cmd1 == MODE_SENSE || 2773 if (cmd == MODE_SENSE)
2777 cmd2 == MODE_SENSE)
2778 return 0; 2774 return 0;
2779 } 2775 }
2780 2776
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index 7eed0b098171..6aeb5f003c3c 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -81,7 +81,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
81 int rc; 81 int rc;
82 82
83 single_host_data = hostdata; 83 single_host_data = hostdata;
84 rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, 0); 84 rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests);
85 if (rc < 0) { 85 if (rc < 0) {
86 printk("viopath_open failed with rc %d in open_event_path\n", 86 printk("viopath_open failed with rc %d in open_event_path\n",
87 rc); 87 rc);
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 242b8873b333..ed22b96580c6 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -238,6 +238,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
238 if (rc == 2) { 238 if (rc == 2) {
239 /* Adapter is good, but other end is not ready */ 239 /* Adapter is good, but other end is not ready */
240 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); 240 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
241 retrc = 0;
241 } else if (rc != 0) { 242 } else if (rc != 0) {
242 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc); 243 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);
243 goto reg_crq_failed; 244 goto reg_crq_failed;
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 3fd8a96f2af3..bfac4441d89f 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -257,7 +257,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp)
257static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) 257static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp)
258{ 258{
259 int sz = sp->use_sg - 1; 259 int sz = sp->use_sg - 1;
260 struct scatterlist *sg = (struct scatterlist *)sp->buffer; 260 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
261 261
262 while(sz >= 0) { 262 while(sz >= 0) {
263 vdma_free(sg[sz].dma_address); 263 vdma_free(sg[sz].dma_address);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index 4b6aa30f4d68..29f59345305d 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -764,12 +764,27 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
764 unsigned int action) 764 unsigned int action)
765{ 765{
766 unsigned long flags; 766 unsigned long flags;
767 struct ata_eh_info *ehi = &ap->eh_info;
768 struct ata_eh_context *ehc = &ap->eh_context;
767 769
768 spin_lock_irqsave(ap->lock, flags); 770 spin_lock_irqsave(ap->lock, flags);
769 771
770 ata_eh_clear_action(dev, &ap->eh_info, action); 772 /* Reset is represented by combination of actions and EHI
773 * flags. Suck in all related bits before clearing eh_info to
774 * avoid losing requested action.
775 */
776 if (action & ATA_EH_RESET_MASK) {
777 ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
778 ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
779
780 /* make sure all reset actions are cleared & clear EHI flags */
781 action |= ATA_EH_RESET_MASK;
782 ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
783 }
784
785 ata_eh_clear_action(dev, ehi, action);
771 786
772 if (!(ap->eh_context.i.flags & ATA_EHI_QUIET)) 787 if (!(ehc->i.flags & ATA_EHI_QUIET))
773 ap->pflags |= ATA_PFLAG_RECOVERED; 788 ap->pflags |= ATA_PFLAG_RECOVERED;
774 789
775 spin_unlock_irqrestore(ap->lock, flags); 790 spin_unlock_irqrestore(ap->lock, flags);
@@ -790,6 +805,12 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
790static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, 805static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
791 unsigned int action) 806 unsigned int action)
792{ 807{
808 /* if reset is complete, clear all reset actions & reset modifier */
809 if (action & ATA_EH_RESET_MASK) {
810 action |= ATA_EH_RESET_MASK;
811 ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
812 }
813
793 ata_eh_clear_action(dev, &ap->eh_context.i, action); 814 ata_eh_clear_action(dev, &ap->eh_context.i, action);
794} 815}
795 816
@@ -1276,8 +1297,6 @@ static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1276static void ata_eh_autopsy(struct ata_port *ap) 1297static void ata_eh_autopsy(struct ata_port *ap)
1277{ 1298{
1278 struct ata_eh_context *ehc = &ap->eh_context; 1299 struct ata_eh_context *ehc = &ap->eh_context;
1279 unsigned int action = ehc->i.action;
1280 struct ata_device *failed_dev = NULL;
1281 unsigned int all_err_mask = 0; 1300 unsigned int all_err_mask = 0;
1282 int tag, is_io = 0; 1301 int tag, is_io = 0;
1283 u32 serror; 1302 u32 serror;
@@ -1294,7 +1313,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1294 ehc->i.serror |= serror; 1313 ehc->i.serror |= serror;
1295 ata_eh_analyze_serror(ap); 1314 ata_eh_analyze_serror(ap);
1296 } else if (rc != -EOPNOTSUPP) 1315 } else if (rc != -EOPNOTSUPP)
1297 action |= ATA_EH_HARDRESET; 1316 ehc->i.action |= ATA_EH_HARDRESET;
1298 1317
1299 /* analyze NCQ failure */ 1318 /* analyze NCQ failure */
1300 ata_eh_analyze_ncq_error(ap); 1319 ata_eh_analyze_ncq_error(ap);
@@ -1315,7 +1334,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1315 qc->err_mask |= ehc->i.err_mask; 1334 qc->err_mask |= ehc->i.err_mask;
1316 1335
1317 /* analyze TF */ 1336 /* analyze TF */
1318 action |= ata_eh_analyze_tf(qc, &qc->result_tf); 1337 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1319 1338
1320 /* DEV errors are probably spurious in case of ATA_BUS error */ 1339 /* DEV errors are probably spurious in case of ATA_BUS error */
1321 if (qc->err_mask & AC_ERR_ATA_BUS) 1340 if (qc->err_mask & AC_ERR_ATA_BUS)
@@ -1329,11 +1348,11 @@ static void ata_eh_autopsy(struct ata_port *ap)
1329 /* SENSE_VALID trumps dev/unknown error and revalidation */ 1348 /* SENSE_VALID trumps dev/unknown error and revalidation */
1330 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1349 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1331 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 1350 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1332 action &= ~ATA_EH_REVALIDATE; 1351 ehc->i.action &= ~ATA_EH_REVALIDATE;
1333 } 1352 }
1334 1353
1335 /* accumulate error info */ 1354 /* accumulate error info */
1336 failed_dev = qc->dev; 1355 ehc->i.dev = qc->dev;
1337 all_err_mask |= qc->err_mask; 1356 all_err_mask |= qc->err_mask;
1338 if (qc->flags & ATA_QCFLAG_IO) 1357 if (qc->flags & ATA_QCFLAG_IO)
1339 is_io = 1; 1358 is_io = 1;
@@ -1342,25 +1361,22 @@ static void ata_eh_autopsy(struct ata_port *ap)
1342 /* enforce default EH actions */ 1361 /* enforce default EH actions */
1343 if (ap->pflags & ATA_PFLAG_FROZEN || 1362 if (ap->pflags & ATA_PFLAG_FROZEN ||
1344 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1363 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1345 action |= ATA_EH_SOFTRESET; 1364 ehc->i.action |= ATA_EH_SOFTRESET;
1346 else if (all_err_mask) 1365 else if (all_err_mask)
1347 action |= ATA_EH_REVALIDATE; 1366 ehc->i.action |= ATA_EH_REVALIDATE;
1348 1367
1349 /* if we have offending qcs and the associated failed device */ 1368 /* if we have offending qcs and the associated failed device */
1350 if (failed_dev) { 1369 if (ehc->i.dev) {
1351 /* speed down */ 1370 /* speed down */
1352 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask); 1371 ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
1372 all_err_mask);
1353 1373
1354 /* perform per-dev EH action only on the offending device */ 1374 /* perform per-dev EH action only on the offending device */
1355 ehc->i.dev_action[failed_dev->devno] |= 1375 ehc->i.dev_action[ehc->i.dev->devno] |=
1356 action & ATA_EH_PERDEV_MASK; 1376 ehc->i.action & ATA_EH_PERDEV_MASK;
1357 action &= ~ATA_EH_PERDEV_MASK; 1377 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
1358 } 1378 }
1359 1379
1360 /* record autopsy result */
1361 ehc->i.dev = failed_dev;
1362 ehc->i.action |= action;
1363
1364 DPRINTK("EXIT\n"); 1380 DPRINTK("EXIT\n");
1365} 1381}
1366 1382
@@ -1483,6 +1499,9 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1483 ata_reset_fn_t reset; 1499 ata_reset_fn_t reset;
1484 int i, did_followup_srst, rc; 1500 int i, did_followup_srst, rc;
1485 1501
1502 /* about to reset */
1503 ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1504
1486 /* Determine which reset to use and record in ehc->i.action. 1505 /* Determine which reset to use and record in ehc->i.action.
1487 * prereset() may examine and modify it. 1506 * prereset() may examine and modify it.
1488 */ 1507 */
@@ -1531,8 +1550,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1531 ata_port_printk(ap, KERN_INFO, "%s resetting port\n", 1550 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1532 reset == softreset ? "soft" : "hard"); 1551 reset == softreset ? "soft" : "hard");
1533 1552
1534 /* reset */ 1553 /* mark that this EH session started with reset */
1535 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1536 ehc->i.flags |= ATA_EHI_DID_RESET; 1554 ehc->i.flags |= ATA_EHI_DID_RESET;
1537 1555
1538 rc = ata_do_reset(ap, reset, classes); 1556 rc = ata_do_reset(ap, reset, classes);
@@ -1595,7 +1613,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1595 postreset(ap, classes); 1613 postreset(ap, classes);
1596 1614
1597 /* reset successful, schedule revalidation */ 1615 /* reset successful, schedule revalidation */
1598 ata_eh_done(ap, NULL, ATA_EH_RESET_MASK); 1616 ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1599 ehc->i.action |= ATA_EH_REVALIDATE; 1617 ehc->i.action |= ATA_EH_REVALIDATE;
1600 } 1618 }
1601 1619
@@ -1848,15 +1866,16 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
1848 for (i = 0; i < ata_port_max_devices(ap); i++) { 1866 for (i = 0; i < ata_port_max_devices(ap); i++) {
1849 struct ata_device *dev = &ap->device[i]; 1867 struct ata_device *dev = &ap->device[i];
1850 1868
1851 if (ata_dev_absent(dev) || ata_dev_ready(dev)) 1869 if (!(dev->flags & ATA_DFLAG_SUSPENDED))
1852 break; 1870 break;
1853 } 1871 }
1854 1872
1855 if (i == ata_port_max_devices(ap)) 1873 if (i == ata_port_max_devices(ap))
1856 return 1; 1874 return 1;
1857 1875
1858 /* always thaw frozen port and recover failed devices */ 1876 /* thaw frozen port, resume link and recover failed devices */
1859 if (ap->pflags & ATA_PFLAG_FROZEN || ata_port_nr_enabled(ap)) 1877 if ((ap->pflags & ATA_PFLAG_FROZEN) ||
1878 (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
1860 return 0; 1879 return 0;
1861 1880
1862 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 1881 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f81691fcf177..d44f9aac6b8f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -21,10 +21,12 @@
21 21
22struct lpfc_sli2_slim; 22struct lpfc_sli2_slim;
23 23
24#define LPFC_MAX_TARGET 256 /* max targets supported */
25#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */
26#define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */
27 24
25#define LPFC_MAX_TARGET 256 /* max number of targets supported */
26#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
27 requests */
28#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
29 the NameServer before giving up. */
28#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */ 30#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
29#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */ 31#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
30#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */ 32#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
@@ -41,7 +43,6 @@ struct lpfc_sli2_slim;
41 (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) 43 (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
42/* Provide maximum configuration definitions. */ 44/* Provide maximum configuration definitions. */
43#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ 45#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
44#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */
45#define FC_MAX_ADPTMSG 64 46#define FC_MAX_ADPTMSG 64
46 47
47#define MAX_HBAEVT 32 48#define MAX_HBAEVT 32
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b62a72dfab29..5c68cdd8736f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -219,9 +219,19 @@ lpfc_issue_lip(struct Scsi_Host *host)
219 return -ENOMEM; 219 return -ENOMEM;
220 220
221 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 221 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
222 lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); 222 pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
223 pmboxq->mb.mbxOwner = OWN_HOST;
224
223 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
224 226
227 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
228 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
229 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
230 phba->cfg_link_speed);
231 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
232 phba->fc_ratov * 2);
233 }
234
225 if (mbxstatus == MBX_TIMEOUT) 235 if (mbxstatus == MBX_TIMEOUT)
226 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 236 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
227 else 237 else
@@ -233,51 +243,53 @@ lpfc_issue_lip(struct Scsi_Host *host)
233 return 0; 243 return 0;
234} 244}
235 245
236static ssize_t 246static int
237lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) 247lpfc_selective_reset(struct lpfc_hba *phba)
238{ 248{
239 struct Scsi_Host *host = class_to_shost(cdev); 249 struct completion online_compl;
240 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 250 int status = 0;
241 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 251
252 init_completion(&online_compl);
253 lpfc_workq_post_event(phba, &status, &online_compl,
254 LPFC_EVT_OFFLINE);
255 wait_for_completion(&online_compl);
256
257 if (status != 0)
258 return -EIO;
259
260 init_completion(&online_compl);
261 lpfc_workq_post_event(phba, &status, &online_compl,
262 LPFC_EVT_ONLINE);
263 wait_for_completion(&online_compl);
264
265 if (status != 0)
266 return -EIO;
267
268 return 0;
242} 269}
243 270
244static ssize_t 271static ssize_t
245lpfc_board_online_show(struct class_device *cdev, char *buf) 272lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
246{ 273{
247 struct Scsi_Host *host = class_to_shost(cdev); 274 struct Scsi_Host *host = class_to_shost(cdev);
248 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 275 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
276 int status = -EINVAL;
249 277
250 if (phba->fc_flag & FC_OFFLINE_MODE) 278 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
251 return snprintf(buf, PAGE_SIZE, "0\n"); 279 status = lpfc_selective_reset(phba);
280
281 if (status == 0)
282 return strlen(buf);
252 else 283 else
253 return snprintf(buf, PAGE_SIZE, "1\n"); 284 return status;
254} 285}
255 286
256static ssize_t 287static ssize_t
257lpfc_board_online_store(struct class_device *cdev, const char *buf, 288lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
258 size_t count)
259{ 289{
260 struct Scsi_Host *host = class_to_shost(cdev); 290 struct Scsi_Host *host = class_to_shost(cdev);
261 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 291 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
262 struct completion online_compl; 292 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
263 int val=0, status=0;
264
265 if (sscanf(buf, "%d", &val) != 1)
266 return -EINVAL;
267
268 init_completion(&online_compl);
269
270 if (val)
271 lpfc_workq_post_event(phba, &status, &online_compl,
272 LPFC_EVT_ONLINE);
273 else
274 lpfc_workq_post_event(phba, &status, &online_compl,
275 LPFC_EVT_OFFLINE);
276 wait_for_completion(&online_compl);
277 if (!status)
278 return strlen(buf);
279 else
280 return -EIO;
281} 293}
282 294
283static ssize_t 295static ssize_t
@@ -532,10 +544,9 @@ static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
532 NULL); 544 NULL);
533static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, 545static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
534 NULL); 546 NULL);
535static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
536 lpfc_board_online_show, lpfc_board_online_store);
537static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 547static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
538 lpfc_board_mode_show, lpfc_board_mode_store); 548 lpfc_board_mode_show, lpfc_board_mode_store);
549static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
539 550
540static int lpfc_poll = 0; 551static int lpfc_poll = 0;
541module_param(lpfc_poll, int, 0); 552module_param(lpfc_poll, int, 0);
@@ -695,12 +706,12 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
695 "during discovery"); 706 "during discovery");
696 707
697/* 708/*
698# lpfc_max_luns: maximum number of LUNs per target driver will support 709# lpfc_max_luns: maximum allowed LUN.
699# Value range is [1,32768]. Default value is 256. 710# Value range is [0,65535]. Default value is 255.
700# NOTE: The SCSI layer will scan each target for this many luns 711# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
701*/ 712*/
702LPFC_ATTR_R(max_luns, 256, 1, 32768, 713LPFC_ATTR_R(max_luns, 255, 0, 65535,
703 "Maximum number of LUNs per target driver will support"); 714 "Maximum allowed LUN");
704 715
705/* 716/*
706# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. 717# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
@@ -739,8 +750,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
739 &class_device_attr_lpfc_max_luns, 750 &class_device_attr_lpfc_max_luns,
740 &class_device_attr_nport_evt_cnt, 751 &class_device_attr_nport_evt_cnt,
741 &class_device_attr_management_version, 752 &class_device_attr_management_version,
742 &class_device_attr_board_online,
743 &class_device_attr_board_mode, 753 &class_device_attr_board_mode,
754 &class_device_attr_issue_reset,
744 &class_device_attr_lpfc_poll, 755 &class_device_attr_lpfc_poll,
745 &class_device_attr_lpfc_poll_tmo, 756 &class_device_attr_lpfc_poll_tmo,
746 NULL, 757 NULL,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index ee22173fce43..517e9e4dd461 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -147,6 +147,7 @@ int lpfc_sli_hba_setup(struct lpfc_hba *);
147int lpfc_sli_hba_down(struct lpfc_hba *); 147int lpfc_sli_hba_down(struct lpfc_hba *);
148int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 148int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
149int lpfc_sli_handle_mb_event(struct lpfc_hba *); 149int lpfc_sli_handle_mb_event(struct lpfc_hba *);
150int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
150int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 151int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
151 struct lpfc_sli_ring *, uint32_t); 152 struct lpfc_sli_ring *, uint32_t);
152void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 153void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4126fd87956f..b89f6cb641e6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -648,33 +648,32 @@ lpfc_more_plogi(struct lpfc_hba * phba)
648} 648}
649 649
650static struct lpfc_nodelist * 650static struct lpfc_nodelist *
651lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 651lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
652 struct lpfc_nodelist *ndlp) 652 struct lpfc_nodelist *ndlp)
653{ 653{
654 struct lpfc_nodelist *new_ndlp; 654 struct lpfc_nodelist *new_ndlp;
655 struct lpfc_dmabuf *pcmd, *prsp;
656 uint32_t *lp; 655 uint32_t *lp;
657 struct serv_parm *sp; 656 struct serv_parm *sp;
658 uint8_t name[sizeof (struct lpfc_name)]; 657 uint8_t name[sizeof (struct lpfc_name)];
659 uint32_t rc; 658 uint32_t rc;
660 659
661 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
662 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
663 lp = (uint32_t *) prsp->virt; 660 lp = (uint32_t *) prsp->virt;
664 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 661 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
662 memset(name, 0, sizeof (struct lpfc_name));
665 663
666 /* Now we to find out if the NPort we are logging into, matches the WWPN 664 /* Now we to find out if the NPort we are logging into, matches the WWPN
667 * we have for that ndlp. If not, we have some work to do. 665 * we have for that ndlp. If not, we have some work to do.
668 */ 666 */
669 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName); 667 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName);
670 668
671 memset(name, 0, sizeof (struct lpfc_name)); 669 if (new_ndlp == ndlp)
672 rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
673 if (!rc || (new_ndlp == ndlp)) {
674 return ndlp; 670 return ndlp;
675 }
676 671
677 if (!new_ndlp) { 672 if (!new_ndlp) {
673 rc =
674 memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
675 if (!rc)
676 return ndlp;
678 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 677 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
679 if (!new_ndlp) 678 if (!new_ndlp)
680 return ndlp; 679 return ndlp;
@@ -683,17 +682,21 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
683 } 682 }
684 683
685 lpfc_unreg_rpi(phba, new_ndlp); 684 lpfc_unreg_rpi(phba, new_ndlp);
686 new_ndlp->nlp_prev_state = ndlp->nlp_state;
687 new_ndlp->nlp_DID = ndlp->nlp_DID; 685 new_ndlp->nlp_DID = ndlp->nlp_DID;
688 new_ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 686 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
689 lpfc_nlp_list(phba, new_ndlp, NLP_PLOGI_LIST); 687 new_ndlp->nlp_state = ndlp->nlp_state;
688 lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK);
690 689
691 /* Move this back to NPR list */ 690 /* Move this back to NPR list */
692 lpfc_unreg_rpi(phba, ndlp); 691 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
693 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 692 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
694 ndlp->nlp_state = NLP_STE_NPR_NODE; 693 }
695 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 694 else {
696 695 lpfc_unreg_rpi(phba, ndlp);
696 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
697 ndlp->nlp_state = NLP_STE_NPR_NODE;
698 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
699 }
697 return new_ndlp; 700 return new_ndlp;
698} 701}
699 702
@@ -703,6 +706,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
703{ 706{
704 IOCB_t *irsp; 707 IOCB_t *irsp;
705 struct lpfc_nodelist *ndlp; 708 struct lpfc_nodelist *ndlp;
709 struct lpfc_dmabuf *prsp;
706 int disc, rc, did, type; 710 int disc, rc, did, type;
707 711
708 712
@@ -769,7 +773,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
769 } 773 }
770 } else { 774 } else {
771 /* Good status, call state machine */ 775 /* Good status, call state machine */
772 ndlp = lpfc_plogi_confirm_nport(phba, cmdiocb, ndlp); 776 prsp = list_entry(((struct lpfc_dmabuf *)
777 cmdiocb->context2)->list.next,
778 struct lpfc_dmabuf, list);
779 ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp);
773 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, 780 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
774 NLP_EVT_CMPL_PLOGI); 781 NLP_EVT_CMPL_PLOGI);
775 } 782 }
@@ -3282,10 +3289,9 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3282 } else 3289 } else
3283 lpfc_sli_release_iocbq(phba, piocb); 3290 lpfc_sli_release_iocbq(phba, piocb);
3284 } 3291 }
3285 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) { 3292 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3286 phba->els_tmofunc.expires = jiffies + HZ * timeout; 3293 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
3287 add_timer(&phba->els_tmofunc); 3294
3288 }
3289 spin_unlock_irq(phba->host->host_lock); 3295 spin_unlock_irq(phba->host->host_lock);
3290} 3296}
3291 3297
@@ -3442,6 +3448,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3442 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 3448 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3443 ndlp->nlp_type |= NLP_FABRIC; 3449 ndlp->nlp_type |= NLP_FABRIC;
3444 } 3450 }
3451 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
3452 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
3445 } 3453 }
3446 3454
3447 phba->fc_stat.elsRcvFrame++; 3455 phba->fc_stat.elsRcvFrame++;
@@ -3463,13 +3471,14 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3463 rjt_err = 1; 3471 rjt_err = 1;
3464 break; 3472 break;
3465 } 3473 }
3474 ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp);
3466 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); 3475 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
3467 break; 3476 break;
3468 case ELS_CMD_FLOGI: 3477 case ELS_CMD_FLOGI:
3469 phba->fc_stat.elsRcvFLOGI++; 3478 phba->fc_stat.elsRcvFLOGI++;
3470 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); 3479 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
3471 if (newnode) { 3480 if (newnode) {
3472 mempool_free( ndlp, phba->nlp_mem_pool); 3481 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3473 } 3482 }
3474 break; 3483 break;
3475 case ELS_CMD_LOGO: 3484 case ELS_CMD_LOGO:
@@ -3492,7 +3501,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3492 phba->fc_stat.elsRcvRSCN++; 3501 phba->fc_stat.elsRcvRSCN++;
3493 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); 3502 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
3494 if (newnode) { 3503 if (newnode) {
3495 mempool_free( ndlp, phba->nlp_mem_pool); 3504 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3496 } 3505 }
3497 break; 3506 break;
3498 case ELS_CMD_ADISC: 3507 case ELS_CMD_ADISC:
@@ -3535,28 +3544,28 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3535 phba->fc_stat.elsRcvLIRR++; 3544 phba->fc_stat.elsRcvLIRR++;
3536 lpfc_els_rcv_lirr(phba, elsiocb, ndlp); 3545 lpfc_els_rcv_lirr(phba, elsiocb, ndlp);
3537 if (newnode) { 3546 if (newnode) {
3538 mempool_free( ndlp, phba->nlp_mem_pool); 3547 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3539 } 3548 }
3540 break; 3549 break;
3541 case ELS_CMD_RPS: 3550 case ELS_CMD_RPS:
3542 phba->fc_stat.elsRcvRPS++; 3551 phba->fc_stat.elsRcvRPS++;
3543 lpfc_els_rcv_rps(phba, elsiocb, ndlp); 3552 lpfc_els_rcv_rps(phba, elsiocb, ndlp);
3544 if (newnode) { 3553 if (newnode) {
3545 mempool_free( ndlp, phba->nlp_mem_pool); 3554 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3546 } 3555 }
3547 break; 3556 break;
3548 case ELS_CMD_RPL: 3557 case ELS_CMD_RPL:
3549 phba->fc_stat.elsRcvRPL++; 3558 phba->fc_stat.elsRcvRPL++;
3550 lpfc_els_rcv_rpl(phba, elsiocb, ndlp); 3559 lpfc_els_rcv_rpl(phba, elsiocb, ndlp);
3551 if (newnode) { 3560 if (newnode) {
3552 mempool_free( ndlp, phba->nlp_mem_pool); 3561 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3553 } 3562 }
3554 break; 3563 break;
3555 case ELS_CMD_RNID: 3564 case ELS_CMD_RNID:
3556 phba->fc_stat.elsRcvRNID++; 3565 phba->fc_stat.elsRcvRNID++;
3557 lpfc_els_rcv_rnid(phba, elsiocb, ndlp); 3566 lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
3558 if (newnode) { 3567 if (newnode) {
3559 mempool_free( ndlp, phba->nlp_mem_pool); 3568 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3560 } 3569 }
3561 break; 3570 break;
3562 default: 3571 default:
@@ -3568,7 +3577,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3568 "%d:0115 Unknown ELS command x%x received from " 3577 "%d:0115 Unknown ELS command x%x received from "
3569 "NPORT x%x\n", phba->brd_no, cmd, did); 3578 "NPORT x%x\n", phba->brd_no, cmd, did);
3570 if (newnode) { 3579 if (newnode) {
3571 mempool_free( ndlp, phba->nlp_mem_pool); 3580 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3572 } 3581 }
3573 break; 3582 break;
3574 } 3583 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index adb086009ae0..4d6cf990c4fc 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1084,7 +1084,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1084 fc_remote_port_rolechg(rport, rport_ids.roles); 1084 fc_remote_port_rolechg(rport, rport_ids.roles);
1085 1085
1086 if ((rport->scsi_target_id != -1) && 1086 if ((rport->scsi_target_id != -1) &&
1087 (rport->scsi_target_id < MAX_FCP_TARGET)) { 1087 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1088 ndlp->nlp_sid = rport->scsi_target_id; 1088 ndlp->nlp_sid = rport->scsi_target_id;
1089 } 1089 }
1090 1090
@@ -1313,7 +1313,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1313 if ((rport_add == mapped) && 1313 if ((rport_add == mapped) &&
1314 ((!nlp->rport) || 1314 ((!nlp->rport) ||
1315 (nlp->rport->scsi_target_id == -1) || 1315 (nlp->rport->scsi_target_id == -1) ||
1316 (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) { 1316 (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) {
1317 nlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1317 nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1318 spin_lock_irq(phba->host->host_lock); 1318 spin_lock_irq(phba->host->host_lock);
1319 nlp->nlp_flag |= NLP_TGT_NO_SCSIID; 1319 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 81755a3f7c68..ef47b824cbed 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -71,6 +71,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
71 uint16_t offset = 0; 71 uint16_t offset = 0;
72 static char licensed[56] = 72 static char licensed[56] =
73 "key unlock for use with gnu public licensed code only\0"; 73 "key unlock for use with gnu public licensed code only\0";
74 static int init_key = 1;
74 75
75 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 76 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
76 if (!pmb) { 77 if (!pmb) {
@@ -82,10 +83,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
82 phba->hba_state = LPFC_INIT_MBX_CMDS; 83 phba->hba_state = LPFC_INIT_MBX_CMDS;
83 84
84 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 85 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
85 uint32_t *ptext = (uint32_t *) licensed; 86 if (init_key) {
87 uint32_t *ptext = (uint32_t *) licensed;
86 88
87 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 89 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
88 *ptext = cpu_to_be32(*ptext); 90 *ptext = cpu_to_be32(*ptext);
91 init_key = 0;
92 }
89 93
90 lpfc_read_nv(phba, pmb); 94 lpfc_read_nv(phba, pmb);
91 memset((char*)mb->un.varRDnvp.rsvd3, 0, 95 memset((char*)mb->un.varRDnvp.rsvd3, 0,
@@ -405,19 +409,26 @@ lpfc_config_port_post(struct lpfc_hba * phba)
405 } 409 }
406 /* MBOX buffer will be freed in mbox compl */ 410 /* MBOX buffer will be freed in mbox compl */
407 411
408 i = 0; 412 return (0);
413}
414
415static int
416lpfc_discovery_wait(struct lpfc_hba *phba)
417{
418 int i = 0;
419
409 while ((phba->hba_state != LPFC_HBA_READY) || 420 while ((phba->hba_state != LPFC_HBA_READY) ||
410 (phba->num_disc_nodes) || (phba->fc_prli_sent) || 421 (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
411 ((phba->fc_map_cnt == 0) && (i<2)) || 422 ((phba->fc_map_cnt == 0) && (i<2)) ||
412 (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { 423 (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
413 /* Check every second for 30 retries. */ 424 /* Check every second for 30 retries. */
414 i++; 425 i++;
415 if (i > 30) { 426 if (i > 30) {
416 break; 427 return -ETIMEDOUT;
417 } 428 }
418 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { 429 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
419 /* The link is down. Set linkdown timeout */ 430 /* The link is down. Set linkdown timeout */
420 break; 431 return -ETIMEDOUT;
421 } 432 }
422 433
423 /* Delay for 1 second to give discovery time to complete. */ 434 /* Delay for 1 second to give discovery time to complete. */
@@ -425,12 +436,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
425 436
426 } 437 }
427 438
428 /* Since num_disc_nodes keys off of PLOGI, delay a bit to let 439 return 0;
429 * any potential PRLIs to flush thru the SLI sub-system.
430 */
431 msleep(50);
432
433 return (0);
434} 440}
435 441
436/************************************************************************/ 442/************************************************************************/
@@ -1339,7 +1345,8 @@ lpfc_offline(struct lpfc_hba * phba)
1339 struct lpfc_sli_ring *pring; 1345 struct lpfc_sli_ring *pring;
1340 struct lpfc_sli *psli; 1346 struct lpfc_sli *psli;
1341 unsigned long iflag; 1347 unsigned long iflag;
1342 int i = 0; 1348 int i;
1349 int cnt = 0;
1343 1350
1344 if (!phba) 1351 if (!phba)
1345 return 0; 1352 return 0;
@@ -1348,17 +1355,27 @@ lpfc_offline(struct lpfc_hba * phba)
1348 return 0; 1355 return 0;
1349 1356
1350 psli = &phba->sli; 1357 psli = &phba->sli;
1351 pring = &psli->ring[psli->fcp_ring];
1352 1358
1353 lpfc_linkdown(phba); 1359 lpfc_linkdown(phba);
1360 lpfc_sli_flush_mbox_queue(phba);
1354 1361
1355 /* The linkdown event takes 30 seconds to timeout. */ 1362 for (i = 0; i < psli->num_rings; i++) {
1356 while (pring->txcmplq_cnt) { 1363 pring = &psli->ring[i];
1357 mdelay(10); 1364 /* The linkdown event takes 30 seconds to timeout. */
1358 if (i++ > 3000) 1365 while (pring->txcmplq_cnt) {
1359 break; 1366 mdelay(10);
1367 if (cnt++ > 3000) {
1368 lpfc_printf_log(phba,
1369 KERN_WARNING, LOG_INIT,
1370 "%d:0466 Outstanding IO when "
1371 "bringing Adapter offline\n",
1372 phba->brd_no);
1373 break;
1374 }
1375 }
1360 } 1376 }
1361 1377
1378
1362 /* stop all timers associated with this hba */ 1379 /* stop all timers associated with this hba */
1363 lpfc_stop_timer(phba); 1380 lpfc_stop_timer(phba);
1364 phba->work_hba_events = 0; 1381 phba->work_hba_events = 0;
@@ -1639,6 +1656,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1639 goto out_free_irq; 1656 goto out_free_irq;
1640 } 1657 }
1641 1658
1659 lpfc_discovery_wait(phba);
1660
1642 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1661 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1643 spin_lock_irq(phba->host->host_lock); 1662 spin_lock_irq(phba->host->host_lock);
1644 lpfc_poll_start_timer(phba); 1663 lpfc_poll_start_timer(phba);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 07017658ac56..066292d3995a 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -133,6 +133,11 @@ lpfc_mem_free(struct lpfc_hba * phba)
133 133
134 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 134 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
135 pci_pool_destroy(phba->lpfc_mbuf_pool); 135 pci_pool_destroy(phba->lpfc_mbuf_pool);
136
137 /* Free the iocb lookup array */
138 kfree(psli->iocbq_lookup);
139 psli->iocbq_lookup = NULL;
140
136} 141}
137 142
138void * 143void *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 27d60ad897cd..bd0b0e293d63 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1110,6 +1110,17 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1110 phba->brd_no, 1110 phba->brd_no,
1111 did, mb->mbxStatus, phba->hba_state); 1111 did, mb->mbxStatus, phba->hba_state);
1112 1112
1113 /*
1114 * If RegLogin failed due to lack of HBA resources do not
1115 * retry discovery.
1116 */
1117 if (mb->mbxStatus == MBXERR_RPI_FULL) {
1118 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
1119 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
1120 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
1121 return ndlp->nlp_state;
1122 }
1123
1113 /* Put ndlp in npr list set plogi timer for 1 sec */ 1124 /* Put ndlp in npr list set plogi timer for 1 sec */
1114 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1125 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1115 spin_lock_irq(phba->host->host_lock); 1126 spin_lock_irq(phba->host->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index aea1ee472f3d..a760a44173df 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -153,22 +153,6 @@ static void
153lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 153lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
154{ 154{
155 unsigned long iflag = 0; 155 unsigned long iflag = 0;
156 /*
157 * There are only two special cases to consider. (1) the scsi command
158 * requested scatter-gather usage or (2) the scsi command allocated
159 * a request buffer, but did not request use_sg. There is a third
160 * case, but it does not require resource deallocation.
161 */
162 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
163 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
164 psb->seg_cnt, psb->pCmd->sc_data_direction);
165 } else {
166 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
167 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
168 psb->pCmd->request_bufflen,
169 psb->pCmd->sc_data_direction);
170 }
171 }
172 156
173 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 157 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
174 psb->pCmd = NULL; 158 psb->pCmd = NULL;
@@ -282,6 +266,27 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
282} 266}
283 267
284static void 268static void
269lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
270{
271 /*
272 * There are only two special cases to consider. (1) the scsi command
273 * requested scatter-gather usage or (2) the scsi command allocated
274 * a request buffer, but did not request use_sg. There is a third
275 * case, but it does not require resource deallocation.
276 */
277 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
278 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
279 psb->seg_cnt, psb->pCmd->sc_data_direction);
280 } else {
281 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
282 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
283 psb->pCmd->request_bufflen,
284 psb->pCmd->sc_data_direction);
285 }
286 }
287}
288
289static void
285lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) 290lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
286{ 291{
287 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 292 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
@@ -454,6 +459,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
454 cmd->scsi_done(cmd); 459 cmd->scsi_done(cmd);
455 460
456 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 461 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
462 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
457 lpfc_release_scsi_buf(phba, lpfc_cmd); 463 lpfc_release_scsi_buf(phba, lpfc_cmd);
458 return; 464 return;
459 } 465 }
@@ -511,6 +517,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
511 } 517 }
512 } 518 }
513 519
520 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
514 lpfc_release_scsi_buf(phba, lpfc_cmd); 521 lpfc_release_scsi_buf(phba, lpfc_cmd);
515} 522}
516 523
@@ -609,6 +616,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
609static int 616static int
610lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 617lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
611 struct lpfc_scsi_buf *lpfc_cmd, 618 struct lpfc_scsi_buf *lpfc_cmd,
619 unsigned int lun,
612 uint8_t task_mgmt_cmd) 620 uint8_t task_mgmt_cmd)
613{ 621{
614 struct lpfc_sli *psli; 622 struct lpfc_sli *psli;
@@ -627,8 +635,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
627 piocb = &piocbq->iocb; 635 piocb = &piocbq->iocb;
628 636
629 fcp_cmnd = lpfc_cmd->fcp_cmnd; 637 fcp_cmnd = lpfc_cmd->fcp_cmnd;
630 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 638 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
631 &lpfc_cmd->fcp_cmnd->fcp_lun);
632 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 639 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
633 640
634 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 641 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
@@ -655,14 +662,16 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
655 662
656static int 663static int
657lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, 664lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
658 unsigned tgt_id, struct lpfc_rport_data *rdata) 665 unsigned tgt_id, unsigned int lun,
666 struct lpfc_rport_data *rdata)
659{ 667{
660 struct lpfc_iocbq *iocbq; 668 struct lpfc_iocbq *iocbq;
661 struct lpfc_iocbq *iocbqrsp; 669 struct lpfc_iocbq *iocbqrsp;
662 int ret; 670 int ret;
663 671
664 lpfc_cmd->rdata = rdata; 672 lpfc_cmd->rdata = rdata;
665 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 673 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
674 FCP_TARGET_RESET);
666 if (!ret) 675 if (!ret)
667 return FAILED; 676 return FAILED;
668 677
@@ -822,6 +831,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
822 return 0; 831 return 0;
823 832
824 out_host_busy_free_buf: 833 out_host_busy_free_buf:
834 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
825 lpfc_release_scsi_buf(phba, lpfc_cmd); 835 lpfc_release_scsi_buf(phba, lpfc_cmd);
826 out_host_busy: 836 out_host_busy:
827 return SCSI_MLQUEUE_HOST_BUSY; 837 return SCSI_MLQUEUE_HOST_BUSY;
@@ -969,12 +979,12 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
969 if (lpfc_cmd == NULL) 979 if (lpfc_cmd == NULL)
970 goto out; 980 goto out;
971 981
972 lpfc_cmd->pCmd = cmnd;
973 lpfc_cmd->timeout = 60; 982 lpfc_cmd->timeout = 60;
974 lpfc_cmd->scsi_hba = phba; 983 lpfc_cmd->scsi_hba = phba;
975 lpfc_cmd->rdata = rdata; 984 lpfc_cmd->rdata = rdata;
976 985
977 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 986 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
987 FCP_LUN_RESET);
978 if (!ret) 988 if (!ret)
979 goto out_free_scsi_buf; 989 goto out_free_scsi_buf;
980 990
@@ -1001,7 +1011,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1001 cmd_status = iocbqrsp->iocb.ulpStatus; 1011 cmd_status = iocbqrsp->iocb.ulpStatus;
1002 1012
1003 lpfc_sli_release_iocbq(phba, iocbqrsp); 1013 lpfc_sli_release_iocbq(phba, iocbqrsp);
1004 lpfc_release_scsi_buf(phba, lpfc_cmd);
1005 1014
1006 /* 1015 /*
1007 * All outstanding txcmplq I/Os should have been aborted by the device. 1016 * All outstanding txcmplq I/Os should have been aborted by the device.
@@ -1040,6 +1049,8 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1040 } 1049 }
1041 1050
1042out_free_scsi_buf: 1051out_free_scsi_buf:
1052 lpfc_release_scsi_buf(phba, lpfc_cmd);
1053
1043 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1054 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1044 "%d:0713 SCSI layer issued LUN reset (%d, %d) " 1055 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1045 "Data: x%x x%x x%x\n", 1056 "Data: x%x x%x x%x\n",
@@ -1070,7 +1081,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1070 1081
1071 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1082 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1072 lpfc_cmd->timeout = 60; 1083 lpfc_cmd->timeout = 60;
1073 lpfc_cmd->pCmd = cmnd;
1074 lpfc_cmd->scsi_hba = phba; 1084 lpfc_cmd->scsi_hba = phba;
1075 1085
1076 /* 1086 /*
@@ -1078,7 +1088,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1078 * targets known to the driver. Should any target reset 1088 * targets known to the driver. Should any target reset
1079 * fail, this routine returns failure to the midlayer. 1089 * fail, this routine returns failure to the midlayer.
1080 */ 1090 */
1081 for (i = 0; i < MAX_FCP_TARGET; i++) { 1091 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1082 /* Search the mapped list for this target ID */ 1092 /* Search the mapped list for this target ID */
1083 match = 0; 1093 match = 0;
1084 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1094 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
@@ -1090,8 +1100,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1090 if (!match) 1100 if (!match)
1091 continue; 1101 continue;
1092 1102
1093 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, 1103 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun,
1094 i, ndlp->rport->dd_data); 1104 ndlp->rport->dd_data);
1095 if (ret != SUCCESS) { 1105 if (ret != SUCCESS) {
1096 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1106 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1097 "%d:0713 Bus Reset on target %d failed\n", 1107 "%d:0713 Bus Reset on target %d failed\n",
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index bb69a7a1ec59..350a625fa224 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -191,35 +191,12 @@ static int
191lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 191lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
193{ 193{
194 uint16_t iotag;
195
196 list_add_tail(&piocb->list, &pring->txcmplq); 194 list_add_tail(&piocb->list, &pring->txcmplq);
197 pring->txcmplq_cnt++; 195 pring->txcmplq_cnt++;
198 if (unlikely(pring->ringno == LPFC_ELS_RING)) 196 if (unlikely(pring->ringno == LPFC_ELS_RING))
199 mod_timer(&phba->els_tmofunc, 197 mod_timer(&phba->els_tmofunc,
200 jiffies + HZ * (phba->fc_ratov << 1)); 198 jiffies + HZ * (phba->fc_ratov << 1));
201 199
202 if (pring->fast_lookup) {
203 /* Setup fast lookup based on iotag for completion */
204 iotag = piocb->iocb.ulpIoTag;
205 if (iotag && (iotag < pring->fast_iotag))
206 *(pring->fast_lookup + iotag) = piocb;
207 else {
208
209 /* Cmd ring <ringno> put: iotag <iotag> greater then
210 configured max <fast_iotag> wd0 <icmd> */
211 lpfc_printf_log(phba,
212 KERN_ERR,
213 LOG_SLI,
214 "%d:0316 Cmd ring %d put: iotag x%x "
215 "greater then configured max x%x "
216 "wd0 x%x\n",
217 phba->brd_no,
218 pring->ringno, iotag,
219 pring->fast_iotag,
220 *(((uint32_t *)(&piocb->iocb)) + 7));
221 }
222 }
223 return (0); 200 return (0);
224} 201}
225 202
@@ -601,7 +578,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
601 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus 578 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
602 <status> */ 579 <status> */
603 lpfc_printf_log(phba, 580 lpfc_printf_log(phba,
604 KERN_ERR, 581 KERN_WARNING,
605 LOG_MBOX | LOG_SLI, 582 LOG_MBOX | LOG_SLI,
606 "%d:0304 Stray Mailbox Interrupt " 583 "%d:0304 Stray Mailbox Interrupt "
607 "mbxCommand x%x mbxStatus x%x\n", 584 "mbxCommand x%x mbxStatus x%x\n",
@@ -1570,8 +1547,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1570 1547
1571void lpfc_reset_barrier(struct lpfc_hba * phba) 1548void lpfc_reset_barrier(struct lpfc_hba * phba)
1572{ 1549{
1573 uint32_t * resp_buf; 1550 uint32_t __iomem *resp_buf;
1574 uint32_t * mbox_buf; 1551 uint32_t __iomem *mbox_buf;
1575 volatile uint32_t mbox; 1552 volatile uint32_t mbox;
1576 uint32_t hc_copy; 1553 uint32_t hc_copy;
1577 int i; 1554 int i;
@@ -1587,7 +1564,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1587 * Tell the other part of the chip to suspend temporarily all 1564 * Tell the other part of the chip to suspend temporarily all
1588 * its DMA activity. 1565 * its DMA activity.
1589 */ 1566 */
1590 resp_buf = (uint32_t *)phba->MBslimaddr; 1567 resp_buf = phba->MBslimaddr;
1591 1568
1592 /* Disable the error attention */ 1569 /* Disable the error attention */
1593 hc_copy = readl(phba->HCregaddr); 1570 hc_copy = readl(phba->HCregaddr);
@@ -1605,7 +1582,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1605 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 1582 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1606 1583
1607 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 1584 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1608 mbox_buf = (uint32_t *)phba->MBslimaddr; 1585 mbox_buf = phba->MBslimaddr;
1609 writel(mbox, mbox_buf); 1586 writel(mbox, mbox_buf);
1610 1587
1611 for (i = 0; 1588 for (i = 0;
@@ -1805,7 +1782,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1805 skip_post = 0; 1782 skip_post = 0;
1806 word0 = 0; /* This is really setting up word1 */ 1783 word0 = 0; /* This is really setting up word1 */
1807 } 1784 }
1808 to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t); 1785 to_slim = phba->MBslimaddr + sizeof (uint32_t);
1809 writel(*(uint32_t *) mb, to_slim); 1786 writel(*(uint32_t *) mb, to_slim);
1810 readl(to_slim); /* flush */ 1787 readl(to_slim); /* flush */
1811 1788
@@ -2659,8 +2636,6 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2659 2636
2660 INIT_LIST_HEAD(&(pring->txq)); 2637 INIT_LIST_HEAD(&(pring->txq));
2661 2638
2662 kfree(pring->fast_lookup);
2663 pring->fast_lookup = NULL;
2664 } 2639 }
2665 2640
2666 spin_unlock_irqrestore(phba->host->host_lock, flags); 2641 spin_unlock_irqrestore(phba->host->host_lock, flags);
@@ -3110,6 +3085,24 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3110 return retval; 3085 return retval;
3111} 3086}
3112 3087
3088int
3089lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3090{
3091 int i = 0;
3092
3093 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {
3094 if (i++ > LPFC_MBOX_TMO * 1000)
3095 return 1;
3096
3097 if (lpfc_sli_handle_mb_event(phba) == 0)
3098 i = 0;
3099
3100 msleep(1);
3101 }
3102
3103 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3104}
3105
3113irqreturn_t 3106irqreturn_t
3114lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) 3107lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
3115{ 3108{
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index a52d6c6cf083..d8ef0d2894d4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -135,8 +135,6 @@ struct lpfc_sli_ring {
135 uint32_t fast_iotag; /* max fastlookup based iotag */ 135 uint32_t fast_iotag; /* max fastlookup based iotag */
136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */ 136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */
137 uint32_t iotag_max; /* max iotag value to use */ 137 uint32_t iotag_max; /* max iotag value to use */
138 struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by
139 iotag */
140 struct list_head txq; 138 struct list_head txq;
141 uint16_t txq_cnt; /* current length of queue */ 139 uint16_t txq_cnt; /* current length of queue */
142 uint16_t txq_max; /* max length */ 140 uint16_t txq_max; /* max length */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6b737568b831..10e89c6ae823 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.6" 21#define LPFC_DRIVER_VERSION "8.1.7"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 93edaa8696cf..89ef34df5a1d 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -378,7 +378,7 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
378 int nseg; 378 int nseg;
379 379
380 total = 0; 380 total = 0;
381 scl = (struct scatterlist *) cmd->buffer; 381 scl = (struct scatterlist *) cmd->request_buffer;
382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg, 382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg,
383 cmd->sc_data_direction); 383 cmd->sc_data_direction);
384 for (i = 0; i < nseg; ++i) { 384 for (i = 0; i < nseg; ++i) {
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index c88717727be8..5572981a9f92 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1268,7 +1268,7 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
1268 if (cmd->use_sg > 0) { 1268 if (cmd->use_sg > 0) {
1269 int nseg; 1269 int nseg;
1270 total = 0; 1270 total = 0;
1271 scl = (struct scatterlist *) cmd->buffer; 1271 scl = (struct scatterlist *) cmd->request_buffer;
1272 off = ms->data_ptr; 1272 off = ms->data_ptr;
1273 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg, 1273 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,
1274 cmd->sc_data_direction); 1274 cmd->sc_data_direction);
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c
index 7abf64d1bfc9..0bd9c60e6455 100644
--- a/drivers/scsi/pluto.c
+++ b/drivers/scsi/pluto.c
@@ -169,8 +169,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
169 SCpnt->request->rq_status = RQ_SCSI_BUSY; 169 SCpnt->request->rq_status = RQ_SCSI_BUSY;
170 170
171 SCpnt->done = pluto_detect_done; 171 SCpnt->done = pluto_detect_done;
172 SCpnt->bufflen = 256;
173 SCpnt->buffer = fcs[i].inquiry;
174 SCpnt->request_bufflen = 256; 172 SCpnt->request_bufflen = 256;
175 SCpnt->request_buffer = fcs[i].inquiry; 173 SCpnt->request_buffer = fcs[i].inquiry;
176 PLD(("set up %d %08lx\n", i, (long)SCpnt)) 174 PLD(("set up %d %08lx\n", i, (long)SCpnt))
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 69e0551a81d2..5b2f0741a55b 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -874,7 +874,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
874 if (Cmnd->use_sg) { 874 if (Cmnd->use_sg) {
875 int sg_count; 875 int sg_count;
876 876
877 sg = (struct scatterlist *) Cmnd->buffer; 877 sg = (struct scatterlist *) Cmnd->request_buffer;
878 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction); 878 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
879 879
880 ds = cmd->dataseg; 880 ds = cmd->dataseg;
@@ -1278,7 +1278,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1278 1278
1279 if (Cmnd->use_sg) { 1279 if (Cmnd->use_sg) {
1280 sbus_unmap_sg(qpti->sdev, 1280 sbus_unmap_sg(qpti->sdev,
1281 (struct scatterlist *)Cmnd->buffer, 1281 (struct scatterlist *)Cmnd->request_buffer,
1282 Cmnd->use_sg, 1282 Cmnd->use_sg,
1283 Cmnd->sc_data_direction); 1283 Cmnd->sc_data_direction);
1284 } else { 1284 } else {
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 64631bd38952..4776f4e55839 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -269,8 +269,15 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
270 board_20619 }, 270 board_20619 },
271 271
272/* TODO: remove all associated board_20771 code, as it completely
273 * duplicates board_2037x code, unless reason for separation can be
274 * divined.
275 */
276#if 0
272 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 277 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
273 board_20771 }, 278 board_20771 },
279#endif
280
274 { } /* terminate list */ 281 { } /* terminate list */
275}; 282};
276 283
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2ab7df0dcfe8..b332caddd5b3 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -346,7 +346,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
346 if (level > 3) { 346 if (level > 3) {
347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
348 " done = 0x%p, queuecommand 0x%p\n", 348 " done = 0x%p, queuecommand 0x%p\n",
349 cmd->buffer, cmd->bufflen, 349 cmd->request_buffer, cmd->request_bufflen,
350 cmd->done, 350 cmd->done,
351 sdev->host->hostt->queuecommand); 351 sdev->host->hostt->queuecommand);
352 352
@@ -661,11 +661,6 @@ void __scsi_done(struct scsi_cmnd *cmd)
661 */ 661 */
662int scsi_retry_command(struct scsi_cmnd *cmd) 662int scsi_retry_command(struct scsi_cmnd *cmd)
663{ 663{
664 /*
665 * Restore the SCSI command state.
666 */
667 scsi_setup_cmd_retry(cmd);
668
669 /* 664 /*
670 * Zero the sense information from the last time we tried 665 * Zero the sense information from the last time we tried
671 * this command. 666 * this command.
@@ -711,10 +706,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
711 "Notifying upper driver of completion " 706 "Notifying upper driver of completion "
712 "(result %x)\n", cmd->result)); 707 "(result %x)\n", cmd->result));
713 708
714 /*
715 * We can get here with use_sg=0, causing a panic in the upper level
716 */
717 cmd->use_sg = cmd->old_use_sg;
718 cmd->done(cmd); 709 cmd->done(cmd);
719} 710}
720EXPORT_SYMBOL(scsi_finish_command); 711EXPORT_SYMBOL(scsi_finish_command);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9c63b00773c4..a80303c6b3fd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -286,7 +286,7 @@ static int inquiry_evpd_83(unsigned char * arr, int target_dev_id,
286 int dev_id_num, const char * dev_id_str, 286 int dev_id_num, const char * dev_id_str,
287 int dev_id_str_len); 287 int dev_id_str_len);
288static int inquiry_evpd_88(unsigned char * arr, int target_dev_id); 288static int inquiry_evpd_88(unsigned char * arr, int target_dev_id);
289static void do_create_driverfs_files(void); 289static int do_create_driverfs_files(void);
290static void do_remove_driverfs_files(void); 290static void do_remove_driverfs_files(void);
291 291
292static int sdebug_add_adapter(void); 292static int sdebug_add_adapter(void);
@@ -2487,19 +2487,22 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp,
2487DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, 2487DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
2488 sdebug_add_host_store); 2488 sdebug_add_host_store);
2489 2489
2490static void do_create_driverfs_files(void) 2490static int do_create_driverfs_files(void)
2491{ 2491{
2492 driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); 2492 int ret;
2493 driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); 2493
2494 driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2494 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
2495 driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2495 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
2496 driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2496 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
2497 driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2497 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
2498 driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2498 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
2499 driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 2499 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
2500 driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2500 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
2501 driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); 2501 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2502 driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2502 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
2503 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
2504 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
2505 return ret;
2503} 2506}
2504 2507
2505static void do_remove_driverfs_files(void) 2508static void do_remove_driverfs_files(void)
@@ -2522,6 +2525,7 @@ static int __init scsi_debug_init(void)
2522 unsigned int sz; 2525 unsigned int sz;
2523 int host_to_add; 2526 int host_to_add;
2524 int k; 2527 int k;
2528 int ret;
2525 2529
2526 if (scsi_debug_dev_size_mb < 1) 2530 if (scsi_debug_dev_size_mb < 1)
2527 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 2531 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
@@ -2560,12 +2564,32 @@ static int __init scsi_debug_init(void)
2560 if (scsi_debug_num_parts > 0) 2564 if (scsi_debug_num_parts > 0)
2561 sdebug_build_parts(fake_storep); 2565 sdebug_build_parts(fake_storep);
2562 2566
2563 init_all_queued(); 2567 ret = device_register(&pseudo_primary);
2568 if (ret < 0) {
2569 printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
2570 ret);
2571 goto free_vm;
2572 }
2573 ret = bus_register(&pseudo_lld_bus);
2574 if (ret < 0) {
2575 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
2576 ret);
2577 goto dev_unreg;
2578 }
2579 ret = driver_register(&sdebug_driverfs_driver);
2580 if (ret < 0) {
2581 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
2582 ret);
2583 goto bus_unreg;
2584 }
2585 ret = do_create_driverfs_files();
2586 if (ret < 0) {
2587 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
2588 ret);
2589 goto del_files;
2590 }
2564 2591
2565 device_register(&pseudo_primary); 2592 init_all_queued();
2566 bus_register(&pseudo_lld_bus);
2567 driver_register(&sdebug_driverfs_driver);
2568 do_create_driverfs_files();
2569 2593
2570 sdebug_driver_template.proc_name = (char *)sdebug_proc_name; 2594 sdebug_driver_template.proc_name = (char *)sdebug_proc_name;
2571 2595
@@ -2585,6 +2609,18 @@ static int __init scsi_debug_init(void)
2585 scsi_debug_add_host); 2609 scsi_debug_add_host);
2586 } 2610 }
2587 return 0; 2611 return 0;
2612
2613del_files:
2614 do_remove_driverfs_files();
2615 driver_unregister(&sdebug_driverfs_driver);
2616bus_unreg:
2617 bus_unregister(&pseudo_lld_bus);
2618dev_unreg:
2619 device_unregister(&pseudo_primary);
2620free_vm:
2621 vfree(fake_storep);
2622
2623 return ret;
2588} 2624}
2589 2625
2590static void __exit scsi_debug_exit(void) 2626static void __exit scsi_debug_exit(void)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6683d596234a..6a5b731bd5ba 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -460,19 +460,67 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
460 * Return value: 460 * Return value:
461 * SUCCESS or FAILED or NEEDS_RETRY 461 * SUCCESS or FAILED or NEEDS_RETRY
462 **/ 462 **/
463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) 463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense)
464{ 464{
465 struct scsi_device *sdev = scmd->device; 465 struct scsi_device *sdev = scmd->device;
466 struct Scsi_Host *shost = sdev->host; 466 struct Scsi_Host *shost = sdev->host;
467 int old_result = scmd->result;
467 DECLARE_COMPLETION(done); 468 DECLARE_COMPLETION(done);
468 unsigned long timeleft; 469 unsigned long timeleft;
469 unsigned long flags; 470 unsigned long flags;
471 unsigned char old_cmnd[MAX_COMMAND_SIZE];
472 enum dma_data_direction old_data_direction;
473 unsigned short old_use_sg;
474 unsigned char old_cmd_len;
475 unsigned old_bufflen;
476 void *old_buffer;
470 int rtn; 477 int rtn;
471 478
479 /*
480 * We need saved copies of a number of fields - this is because
481 * error handling may need to overwrite these with different values
482 * to run different commands, and once error handling is complete,
483 * we will need to restore these values prior to running the actual
484 * command.
485 */
486 old_buffer = scmd->request_buffer;
487 old_bufflen = scmd->request_bufflen;
488 memcpy(old_cmnd, scmd->cmnd, sizeof(scmd->cmnd));
489 old_data_direction = scmd->sc_data_direction;
490 old_cmd_len = scmd->cmd_len;
491 old_use_sg = scmd->use_sg;
492
493 if (copy_sense) {
494 int gfp_mask = GFP_ATOMIC;
495
496 if (shost->hostt->unchecked_isa_dma)
497 gfp_mask |= __GFP_DMA;
498
499 scmd->sc_data_direction = DMA_FROM_DEVICE;
500 scmd->request_bufflen = 252;
501 scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);
502 if (!scmd->request_buffer)
503 return FAILED;
504 } else {
505 scmd->request_buffer = NULL;
506 scmd->request_bufflen = 0;
507 scmd->sc_data_direction = DMA_NONE;
508 }
509
510 scmd->underflow = 0;
511 scmd->use_sg = 0;
512 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
513
472 if (sdev->scsi_level <= SCSI_2) 514 if (sdev->scsi_level <= SCSI_2)
473 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | 515 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
474 (sdev->lun << 5 & 0xe0); 516 (sdev->lun << 5 & 0xe0);
475 517
518 /*
519 * Zero the sense buffer. The scsi spec mandates that any
520 * untransferred sense data should be interpreted as being zero.
521 */
522 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
523
476 shost->eh_action = &done; 524 shost->eh_action = &done;
477 525
478 spin_lock_irqsave(shost->host_lock, flags); 526 spin_lock_irqsave(shost->host_lock, flags);
@@ -522,6 +570,29 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
522 rtn = FAILED; 570 rtn = FAILED;
523 } 571 }
524 572
573
574 /*
575 * Last chance to have valid sense data.
576 */
577 if (copy_sense) {
578 if (!SCSI_SENSE_VALID(scmd)) {
579 memcpy(scmd->sense_buffer, scmd->request_buffer,
580 sizeof(scmd->sense_buffer));
581 }
582 kfree(scmd->request_buffer);
583 }
584
585
586 /*
587 * Restore original data
588 */
589 scmd->request_buffer = old_buffer;
590 scmd->request_bufflen = old_bufflen;
591 memcpy(scmd->cmnd, old_cmnd, sizeof(scmd->cmnd));
592 scmd->sc_data_direction = old_data_direction;
593 scmd->cmd_len = old_cmd_len;
594 scmd->use_sg = old_use_sg;
595 scmd->result = old_result;
525 return rtn; 596 return rtn;
526} 597}
527 598
@@ -537,56 +608,10 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
537static int scsi_request_sense(struct scsi_cmnd *scmd) 608static int scsi_request_sense(struct scsi_cmnd *scmd)
538{ 609{
539 static unsigned char generic_sense[6] = 610 static unsigned char generic_sense[6] =
540 {REQUEST_SENSE, 0, 0, 0, 252, 0}; 611 {REQUEST_SENSE, 0, 0, 0, 252, 0};
541 unsigned char *scsi_result;
542 int saved_result;
543 int rtn;
544 612
545 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); 613 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense));
546 614 return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1);
547 scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0));
548
549
550 if (unlikely(!scsi_result)) {
551 printk(KERN_ERR "%s: cannot allocate scsi_result.\n",
552 __FUNCTION__);
553 return FAILED;
554 }
555
556 /*
557 * zero the sense buffer. some host adapters automatically always
558 * request sense, so it is not a good idea that
559 * scmd->request_buffer and scmd->sense_buffer point to the same
560 * address (db). 0 is not a valid sense code.
561 */
562 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
563 memset(scsi_result, 0, 252);
564
565 saved_result = scmd->result;
566 scmd->request_buffer = scsi_result;
567 scmd->request_bufflen = 252;
568 scmd->use_sg = 0;
569 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
570 scmd->sc_data_direction = DMA_FROM_DEVICE;
571 scmd->underflow = 0;
572
573 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
574
575 /* last chance to have valid sense data */
576 if(!SCSI_SENSE_VALID(scmd)) {
577 memcpy(scmd->sense_buffer, scmd->request_buffer,
578 sizeof(scmd->sense_buffer));
579 }
580
581 kfree(scsi_result);
582
583 /*
584 * when we eventually call scsi_finish, we really wish to complete
585 * the original request, so let's restore the original data. (db)
586 */
587 scsi_setup_cmd_retry(scmd);
588 scmd->result = saved_result;
589 return rtn;
590} 615}
591 616
592/** 617/**
@@ -605,12 +630,6 @@ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
605{ 630{
606 scmd->device->host->host_failed--; 631 scmd->device->host->host_failed--;
607 scmd->eh_eflags = 0; 632 scmd->eh_eflags = 0;
608
609 /*
610 * set this back so that the upper level can correctly free up
611 * things.
612 */
613 scsi_setup_cmd_retry(scmd);
614 list_move_tail(&scmd->eh_entry, done_q); 633 list_move_tail(&scmd->eh_entry, done_q);
615} 634}
616EXPORT_SYMBOL(scsi_eh_finish_cmd); 635EXPORT_SYMBOL(scsi_eh_finish_cmd);
@@ -715,47 +734,26 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
715{ 734{
716 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; 735 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
717 int retry_cnt = 1, rtn; 736 int retry_cnt = 1, rtn;
718 int saved_result;
719 737
720retry_tur: 738retry_tur:
721 memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); 739 memcpy(scmd->cmnd, tur_command, sizeof(tur_command));
722 740
723 /*
724 * zero the sense buffer. the scsi spec mandates that any
725 * untransferred sense data should be interpreted as being zero.
726 */
727 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
728
729 saved_result = scmd->result;
730 scmd->request_buffer = NULL;
731 scmd->request_bufflen = 0;
732 scmd->use_sg = 0;
733 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
734 scmd->underflow = 0;
735 scmd->sc_data_direction = DMA_NONE;
736 741
737 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); 742 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0);
738 743
739 /*
740 * when we eventually call scsi_finish, we really wish to complete
741 * the original request, so let's restore the original data. (db)
742 */
743 scsi_setup_cmd_retry(scmd);
744 scmd->result = saved_result;
745
746 /*
747 * hey, we are done. let's look to see what happened.
748 */
749 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 744 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
750 __FUNCTION__, scmd, rtn)); 745 __FUNCTION__, scmd, rtn));
751 if (rtn == SUCCESS) 746
752 return 0; 747 switch (rtn) {
753 else if (rtn == NEEDS_RETRY) { 748 case NEEDS_RETRY:
754 if (retry_cnt--) 749 if (retry_cnt--)
755 goto retry_tur; 750 goto retry_tur;
751 /*FALLTHRU*/
752 case SUCCESS:
756 return 0; 753 return 0;
754 default:
755 return 1;
757 } 756 }
758 return 1;
759} 757}
760 758
761/** 759/**
@@ -837,44 +835,16 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
837static int scsi_eh_try_stu(struct scsi_cmnd *scmd) 835static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
838{ 836{
839 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 837 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
840 int rtn;
841 int saved_result;
842 838
843 if (!scmd->device->allow_restart) 839 if (scmd->device->allow_restart) {
844 return 1; 840 int rtn;
845
846 memcpy(scmd->cmnd, stu_command, sizeof(stu_command));
847
848 /*
849 * zero the sense buffer. the scsi spec mandates that any
850 * untransferred sense data should be interpreted as being zero.
851 */
852 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
853
854 saved_result = scmd->result;
855 scmd->request_buffer = NULL;
856 scmd->request_bufflen = 0;
857 scmd->use_sg = 0;
858 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
859 scmd->underflow = 0;
860 scmd->sc_data_direction = DMA_NONE;
861 841
862 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); 842 memcpy(scmd->cmnd, stu_command, sizeof(stu_command));
863 843 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0);
864 /* 844 if (rtn == SUCCESS)
865 * when we eventually call scsi_finish, we really wish to complete 845 return 0;
866 * the original request, so let's restore the original data. (db) 846 }
867 */
868 scsi_setup_cmd_retry(scmd);
869 scmd->result = saved_result;
870 847
871 /*
872 * hey, we are done. let's look to see what happened.
873 */
874 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
875 __FUNCTION__, scmd, rtn));
876 if (rtn == SUCCESS)
877 return 0;
878 return 1; 848 return 1;
879} 849}
880 850
@@ -1684,8 +1654,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1684 1654
1685 scmd->scsi_done = scsi_reset_provider_done_command; 1655 scmd->scsi_done = scsi_reset_provider_done_command;
1686 scmd->done = NULL; 1656 scmd->done = NULL;
1687 scmd->buffer = NULL;
1688 scmd->bufflen = 0;
1689 scmd->request_buffer = NULL; 1657 scmd->request_buffer = NULL;
1690 scmd->request_bufflen = 0; 1658 scmd->request_bufflen = 0;
1691 1659
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index a89c4115cfba..32293f451669 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
110 sshdr.asc, sshdr.ascq); 110 sshdr.asc, sshdr.ascq);
111 break; 111 break;
112 case NOT_READY: /* This happens if there is no disc in drive */ 112 case NOT_READY: /* This happens if there is no disc in drive */
113 if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) { 113 if (sdev->removable)
114 printk(KERN_INFO "Device not ready. Make sure"
115 " there is a disc in the drive.\n");
116 break; 114 break;
117 }
118 case UNIT_ATTENTION: 115 case UNIT_ATTENTION:
119 if (sdev->removable) { 116 if (sdev->removable) {
120 sdev->changed = 1; 117 sdev->changed = 1;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 08af9aae7df3..077c1c691210 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -436,60 +436,16 @@ EXPORT_SYMBOL_GPL(scsi_execute_async);
436 * 436 *
437 * Arguments: cmd - command that is ready to be queued. 437 * Arguments: cmd - command that is ready to be queued.
438 * 438 *
439 * Returns: Nothing
440 *
441 * Notes: This function has the job of initializing a number of 439 * Notes: This function has the job of initializing a number of
442 * fields related to error handling. Typically this will 440 * fields related to error handling. Typically this will
443 * be called once for each command, as required. 441 * be called once for each command, as required.
444 */ 442 */
445static int scsi_init_cmd_errh(struct scsi_cmnd *cmd) 443static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
446{ 444{
447 cmd->serial_number = 0; 445 cmd->serial_number = 0;
448
449 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); 446 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
450
451 if (cmd->cmd_len == 0) 447 if (cmd->cmd_len == 0)
452 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 448 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
453
454 /*
455 * We need saved copies of a number of fields - this is because
456 * error handling may need to overwrite these with different values
457 * to run different commands, and once error handling is complete,
458 * we will need to restore these values prior to running the actual
459 * command.
460 */
461 cmd->old_use_sg = cmd->use_sg;
462 cmd->old_cmd_len = cmd->cmd_len;
463 cmd->sc_old_data_direction = cmd->sc_data_direction;
464 cmd->old_underflow = cmd->underflow;
465 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
466 cmd->buffer = cmd->request_buffer;
467 cmd->bufflen = cmd->request_bufflen;
468
469 return 1;
470}
471
472/*
473 * Function: scsi_setup_cmd_retry()
474 *
475 * Purpose: Restore the command state for a retry
476 *
477 * Arguments: cmd - command to be restored
478 *
479 * Returns: Nothing
480 *
481 * Notes: Immediately prior to retrying a command, we need
482 * to restore certain fields that we saved above.
483 */
484void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
485{
486 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
487 cmd->request_buffer = cmd->buffer;
488 cmd->request_bufflen = cmd->bufflen;
489 cmd->use_sg = cmd->old_use_sg;
490 cmd->cmd_len = cmd->old_cmd_len;
491 cmd->sc_data_direction = cmd->sc_old_data_direction;
492 cmd->underflow = cmd->old_underflow;
493} 449}
494 450
495void scsi_device_unbusy(struct scsi_device *sdev) 451void scsi_device_unbusy(struct scsi_device *sdev)
@@ -807,22 +763,13 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
807 */ 763 */
808static void scsi_release_buffers(struct scsi_cmnd *cmd) 764static void scsi_release_buffers(struct scsi_cmnd *cmd)
809{ 765{
810 struct request *req = cmd->request;
811
812 /*
813 * Free up any indirection buffers we allocated for DMA purposes.
814 */
815 if (cmd->use_sg) 766 if (cmd->use_sg)
816 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 767 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
817 else if (cmd->request_buffer != req->buffer)
818 kfree(cmd->request_buffer);
819 768
820 /* 769 /*
821 * Zero these out. They now point to freed memory, and it is 770 * Zero these out. They now point to freed memory, and it is
822 * dangerous to hang onto the pointers. 771 * dangerous to hang onto the pointers.
823 */ 772 */
824 cmd->buffer = NULL;
825 cmd->bufflen = 0;
826 cmd->request_buffer = NULL; 773 cmd->request_buffer = NULL;
827 cmd->request_bufflen = 0; 774 cmd->request_bufflen = 0;
828} 775}
@@ -858,7 +805,7 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
858void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 805void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
859{ 806{
860 int result = cmd->result; 807 int result = cmd->result;
861 int this_count = cmd->bufflen; 808 int this_count = cmd->request_bufflen;
862 request_queue_t *q = cmd->device->request_queue; 809 request_queue_t *q = cmd->device->request_queue;
863 struct request *req = cmd->request; 810 struct request *req = cmd->request;
864 int clear_errors = 1; 811 int clear_errors = 1;
@@ -866,28 +813,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
866 int sense_valid = 0; 813 int sense_valid = 0;
867 int sense_deferred = 0; 814 int sense_deferred = 0;
868 815
869 /* 816 scsi_release_buffers(cmd);
870 * Free up any indirection buffers we allocated for DMA purposes.
871 * For the case of a READ, we need to copy the data out of the
872 * bounce buffer and into the real buffer.
873 */
874 if (cmd->use_sg)
875 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
876 else if (cmd->buffer != req->buffer) {
877 if (rq_data_dir(req) == READ) {
878 unsigned long flags;
879 char *to = bio_kmap_irq(req->bio, &flags);
880 memcpy(to, cmd->buffer, cmd->bufflen);
881 bio_kunmap_irq(to, &flags);
882 }
883 kfree(cmd->buffer);
884 }
885 817
886 if (result) { 818 if (result) {
887 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 819 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
888 if (sense_valid) 820 if (sense_valid)
889 sense_deferred = scsi_sense_is_deferred(&sshdr); 821 sense_deferred = scsi_sense_is_deferred(&sshdr);
890 } 822 }
823
891 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 824 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
892 req->errors = result; 825 req->errors = result;
893 if (result) { 826 if (result) {
@@ -908,15 +841,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
908 } 841 }
909 842
910 /* 843 /*
911 * Zero these out. They now point to freed memory, and it is
912 * dangerous to hang onto the pointers.
913 */
914 cmd->buffer = NULL;
915 cmd->bufflen = 0;
916 cmd->request_buffer = NULL;
917 cmd->request_bufflen = 0;
918
919 /*
920 * Next deal with any sectors which we were able to correctly 844 * Next deal with any sectors which we were able to correctly
921 * handle. 845 * handle.
922 */ 846 */
@@ -1012,7 +936,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1012 if (!(req->flags & REQ_QUIET)) { 936 if (!(req->flags & REQ_QUIET)) {
1013 scmd_printk(KERN_INFO, cmd, 937 scmd_printk(KERN_INFO, cmd,
1014 "Volume overflow, CDB: "); 938 "Volume overflow, CDB: ");
1015 __scsi_print_command(cmd->data_cmnd); 939 __scsi_print_command(cmd->cmnd);
1016 scsi_print_sense("", cmd); 940 scsi_print_sense("", cmd);
1017 } 941 }
1018 /* See SSC3rXX or current. */ 942 /* See SSC3rXX or current. */
@@ -1143,7 +1067,7 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1143 * successfully. Since this is a REQ_BLOCK_PC command the 1067 * successfully. Since this is a REQ_BLOCK_PC command the
1144 * caller should check the request's errors value 1068 * caller should check the request's errors value
1145 */ 1069 */
1146 scsi_io_completion(cmd, cmd->bufflen); 1070 scsi_io_completion(cmd, cmd->request_bufflen);
1147} 1071}
1148 1072
1149static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1073static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index e2fbe9a9d5a9..ae24c85aaeea 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -57,7 +57,6 @@ extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
57 57
58/* scsi_lib.c */ 58/* scsi_lib.c */
59extern int scsi_maybe_unblock_host(struct scsi_device *sdev); 59extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
60extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd);
61extern void scsi_device_unbusy(struct scsi_device *sdev); 60extern void scsi_device_unbusy(struct scsi_device *sdev);
62extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); 61extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
63extern void scsi_next_command(struct scsi_cmnd *cmd); 62extern void scsi_next_command(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index dd075627e605..5a625c3fddae 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -41,6 +41,7 @@ struct sas_host_attrs {
41 struct mutex lock; 41 struct mutex lock;
42 u32 next_target_id; 42 u32 next_target_id;
43 u32 next_expander_id; 43 u32 next_expander_id;
44 int next_port_id;
44}; 45};
45#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) 46#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data)
46 47
@@ -146,6 +147,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
146 mutex_init(&sas_host->lock); 147 mutex_init(&sas_host->lock);
147 sas_host->next_target_id = 0; 148 sas_host->next_target_id = 0;
148 sas_host->next_expander_id = 0; 149 sas_host->next_expander_id = 0;
150 sas_host->next_port_id = 0;
149 return 0; 151 return 0;
150} 152}
151 153
@@ -327,7 +329,7 @@ sas_phy_protocol_attr(identify.target_port_protocols,
327sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", 329sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
328 unsigned long long); 330 unsigned long long);
329sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); 331sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
330//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8); 332//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
331sas_phy_linkspeed_attr(negotiated_linkrate); 333sas_phy_linkspeed_attr(negotiated_linkrate);
332sas_phy_linkspeed_attr(minimum_linkrate_hw); 334sas_phy_linkspeed_attr(minimum_linkrate_hw);
333sas_phy_linkspeed_attr(minimum_linkrate); 335sas_phy_linkspeed_attr(minimum_linkrate);
@@ -590,6 +592,38 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id)
590} 592}
591EXPORT_SYMBOL(sas_port_alloc); 593EXPORT_SYMBOL(sas_port_alloc);
592 594
595/** sas_port_alloc_num - allocate and initialize a SAS port structure
596 *
597 * @parent: parent device
598 *
599 * Allocates a SAS port structure and a number to go with it. This
600 * interface is really for adapters where the port number has no
601 * meansing, so the sas class should manage them. It will be added to
602 * the device tree below the device specified by @parent which must be
603 * either a Scsi_Host or a sas_expander_device.
604 *
605 * Returns %NULL on error
606 */
607struct sas_port *sas_port_alloc_num(struct device *parent)
608{
609 int index;
610 struct Scsi_Host *shost = dev_to_shost(parent);
611 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
612
613 /* FIXME: use idr for this eventually */
614 mutex_lock(&sas_host->lock);
615 if (scsi_is_sas_expander_device(parent)) {
616 struct sas_rphy *rphy = dev_to_rphy(parent);
617 struct sas_expander_device *exp = rphy_to_expander_device(rphy);
618
619 index = exp->next_port_id++;
620 } else
621 index = sas_host->next_port_id++;
622 mutex_unlock(&sas_host->lock);
623 return sas_port_alloc(parent, index);
624}
625EXPORT_SYMBOL(sas_port_alloc_num);
626
593/** 627/**
594 * sas_port_add - add a SAS port to the device hierarchy 628 * sas_port_add - add a SAS port to the device hierarchy
595 * 629 *
@@ -658,6 +692,13 @@ void sas_port_delete(struct sas_port *port)
658 } 692 }
659 mutex_unlock(&port->phy_list_mutex); 693 mutex_unlock(&port->phy_list_mutex);
660 694
695 if (port->is_backlink) {
696 struct device *parent = port->dev.parent;
697
698 sysfs_remove_link(&port->dev.kobj, parent->bus_id);
699 port->is_backlink = 0;
700 }
701
661 transport_remove_device(dev); 702 transport_remove_device(dev);
662 device_del(dev); 703 device_del(dev);
663 transport_destroy_device(dev); 704 transport_destroy_device(dev);
@@ -733,6 +774,19 @@ void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy)
733} 774}
734EXPORT_SYMBOL(sas_port_delete_phy); 775EXPORT_SYMBOL(sas_port_delete_phy);
735 776
777void sas_port_mark_backlink(struct sas_port *port)
778{
779 struct device *parent = port->dev.parent->parent->parent;
780
781 if (port->is_backlink)
782 return;
783 port->is_backlink = 1;
784 sysfs_create_link(&port->dev.kobj, &parent->kobj,
785 parent->bus_id);
786
787}
788EXPORT_SYMBOL(sas_port_mark_backlink);
789
736/* 790/*
737 * SAS remote PHY attributes. 791 * SAS remote PHY attributes.
738 */ 792 */
@@ -1140,7 +1194,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
1140 1194
1141 if (identify->device_type == SAS_END_DEVICE && 1195 if (identify->device_type == SAS_END_DEVICE &&
1142 rphy->scsi_target_id != -1) { 1196 rphy->scsi_target_id != -1) {
1143 scsi_scan_target(&rphy->dev, parent->port_identifier, 1197 scsi_scan_target(&rphy->dev, 0,
1144 rphy->scsi_target_id, ~0, 0); 1198 rphy->scsi_target_id, ~0, 0);
1145 } 1199 }
1146 1200
@@ -1242,15 +1296,13 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1242 1296
1243 mutex_lock(&sas_host->lock); 1297 mutex_lock(&sas_host->lock);
1244 list_for_each_entry(rphy, &sas_host->rphy_list, list) { 1298 list_for_each_entry(rphy, &sas_host->rphy_list, list) {
1245 struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
1246
1247 if (rphy->identify.device_type != SAS_END_DEVICE || 1299 if (rphy->identify.device_type != SAS_END_DEVICE ||
1248 rphy->scsi_target_id == -1) 1300 rphy->scsi_target_id == -1)
1249 continue; 1301 continue;
1250 1302
1251 if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && 1303 if ((channel == SCAN_WILD_CARD || channel == 0) &&
1252 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { 1304 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {
1253 scsi_scan_target(&rphy->dev, parent->port_identifier, 1305 scsi_scan_target(&rphy->dev, 0,
1254 rphy->scsi_target_id, lun, 1); 1306 rphy->scsi_target_id, lun, 1);
1255 } 1307 }
1256 } 1308 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3225d31449e1..98bd3aab9739 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -502,8 +502,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
502 SCpnt->cmnd[4] = (unsigned char) this_count; 502 SCpnt->cmnd[4] = (unsigned char) this_count;
503 SCpnt->cmnd[5] = 0; 503 SCpnt->cmnd[5] = 0;
504 } 504 }
505 SCpnt->request_bufflen = SCpnt->bufflen = 505 SCpnt->request_bufflen = this_count * sdp->sector_size;
506 this_count * sdp->sector_size;
507 506
508 /* 507 /*
509 * We shouldn't disconnect in the middle of a sector, so with a dumb 508 * We shouldn't disconnect in the middle of a sector, so with a dumb
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index 3f312a84c6a7..2679ea8bff1a 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -1002,7 +1002,7 @@ connect_loop:
1002 } 1002 }
1003#endif 1003#endif
1004 1004
1005 buffer = (struct scatterlist *) SCint->buffer; 1005 buffer = (struct scatterlist *) SCint->request_buffer;
1006 len = buffer->length; 1006 len = buffer->length;
1007 data = page_address(buffer->page) + buffer->offset; 1007 data = page_address(buffer->page) + buffer->offset;
1008 } else { 1008 } else {
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index fd94408577e5..fae6e95a6298 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -360,7 +360,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
360 "mismatch count %d, bytes %d\n", 360 "mismatch count %d, bytes %d\n",
361 size, SCpnt->request_bufflen); 361 size, SCpnt->request_bufflen);
362 if (SCpnt->request_bufflen > size) 362 if (SCpnt->request_bufflen > size)
363 SCpnt->request_bufflen = SCpnt->bufflen = size; 363 SCpnt->request_bufflen = size;
364 } 364 }
365 } 365 }
366 366
@@ -387,8 +387,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
387 387
388 if (this_count > 0xffff) { 388 if (this_count > 0xffff) {
389 this_count = 0xffff; 389 this_count = 0xffff;
390 SCpnt->request_bufflen = SCpnt->bufflen = 390 SCpnt->request_bufflen = this_count * s_size;
391 this_count * s_size;
392 } 391 }
393 392
394 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; 393 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 756ceb93ddc8..7f669b600677 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -368,7 +368,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], 368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); 369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
370 if (cmdstatp->have_sense) 370 if (cmdstatp->have_sense)
371 __scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 371 __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
372 } ) /* end DEB */ 372 } ) /* end DEB */
373 if (!debugging) { /* Abnormal conditions for tape */ 373 if (!debugging) { /* Abnormal conditions for tape */
374 if (!cmdstatp->have_sense) 374 if (!cmdstatp->have_sense)
@@ -384,9 +384,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
384 scode != VOLUME_OVERFLOW && 384 scode != VOLUME_OVERFLOW &&
385 SRpnt->cmd[0] != MODE_SENSE && 385 SRpnt->cmd[0] != MODE_SENSE &&
386 SRpnt->cmd[0] != TEST_UNIT_READY) { 386 SRpnt->cmd[0] != TEST_UNIT_READY) {
387 printk(KERN_WARNING "%s: Error with sense data: ", name); 387
388 __scsi_print_sense("st", SRpnt->sense, 388 __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
389 SCSI_SENSE_BUFFERSIZE);
390 } 389 }
391 } 390 }
392 391
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 2ebe0d663899..2f8073b73bf3 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -517,7 +517,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
517 */ 517 */
518 518
519 if (cmd->use_sg) { 519 if (cmd->use_sg) {
520 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 520 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
521 cmd->SCp.buffers_residual = cmd->use_sg - 1; 521 cmd->SCp.buffers_residual = cmd->use_sg - 1;
522 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); 522 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
523 cmd->SCp.this_residual = cmd->SCp.buffer->length; 523 cmd->SCp.this_residual = cmd->SCp.buffer->length;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 1f328cae5c05..6b60536ac92b 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -347,7 +347,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
347static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 347static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
348{ 348{
349 int sz = sp->use_sg - 1; 349 int sz = sp->use_sg - 1;
350 struct scatterlist *sg = (struct scatterlist *)sp->buffer; 350 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
351 351
352 while(sz >= 0) { 352 while(sz >= 0) {
353 dvma_unmap((char *)sg[sz].dma_address); 353 dvma_unmap((char *)sg[sz].dma_address);
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 680f38ab60d8..2083454db511 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -373,7 +373,7 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd,
373 */ 373 */
374 374
375 if (cmd->use_sg) { 375 if (cmd->use_sg) {
376 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 376 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
377 cmd->SCp.buffers_residual = cmd->use_sg - 1; 377 cmd->SCp.buffers_residual = cmd->use_sg - 1;
378 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + 378 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
379 cmd->SCp.buffer->offset; 379 cmd->SCp.buffer->offset;
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 979497f108c8..dc673e1b6fd9 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -1047,12 +1047,13 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id *
1047 up = &sunsab_ports[inst * 2]; 1047 up = &sunsab_ports[inst * 2];
1048 1048
1049 err = sunsab_init_one(&up[0], op, 1049 err = sunsab_init_one(&up[0], op,
1050 sizeof(union sab82532_async_regs), 1050 0,
1051 (inst * 2) + 0); 1051 (inst * 2) + 0);
1052 if (err) 1052 if (err)
1053 return err; 1053 return err;
1054 1054
1055 err = sunsab_init_one(&up[1], op, 0, 1055 err = sunsab_init_one(&up[1], op,
1056 sizeof(union sab82532_async_regs),
1056 (inst * 2) + 1); 1057 (inst * 2) + 1);
1057 if (err) { 1058 if (err) {
1058 of_iounmap(up[0].port.membase, 1059 of_iounmap(up[0].port.membase,
@@ -1117,7 +1118,7 @@ static int __init sunsab_init(void)
1117 int err; 1118 int err;
1118 1119
1119 num_channels = 0; 1120 num_channels = 0;
1120 for_each_node_by_name(dp, "su") 1121 for_each_node_by_name(dp, "se")
1121 num_channels += 2; 1122 num_channels += 2;
1122 for_each_node_by_name(dp, "serial") { 1123 for_each_node_by_name(dp, "serial") {
1123 if (of_device_is_compatible(dp, "sab82532")) 1124 if (of_device_is_compatible(dp, "sab82532"))
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index a1456d9352cb..47bc3d57e019 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -68,9 +68,6 @@ static int num_sunzilog;
68#define NUM_SUNZILOG num_sunzilog 68#define NUM_SUNZILOG num_sunzilog
69#define NUM_CHANNELS (NUM_SUNZILOG * 2) 69#define NUM_CHANNELS (NUM_SUNZILOG * 2)
70 70
71#define KEYBOARD_LINE 0x2
72#define MOUSE_LINE 0x3
73
74#define ZS_CLOCK 4915200 /* Zilog input clock rate. */ 71#define ZS_CLOCK 4915200 /* Zilog input clock rate. */
75#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */ 72#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */
76 73
@@ -1225,12 +1222,10 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
1225{ 1222{
1226 int baud, brg; 1223 int baud, brg;
1227 1224
1228 if (channel == KEYBOARD_LINE) { 1225 if (up->flags & SUNZILOG_FLAG_CONS_KEYB) {
1229 up->flags |= SUNZILOG_FLAG_CONS_KEYB;
1230 up->cflag = B1200 | CS8 | CLOCAL | CREAD; 1226 up->cflag = B1200 | CS8 | CLOCAL | CREAD;
1231 baud = 1200; 1227 baud = 1200;
1232 } else { 1228 } else {
1233 up->flags |= SUNZILOG_FLAG_CONS_MOUSE;
1234 up->cflag = B4800 | CS8 | CLOCAL | CREAD; 1229 up->cflag = B4800 | CS8 | CLOCAL | CREAD;
1235 baud = 4800; 1230 baud = 4800;
1236 } 1231 }
@@ -1243,14 +1238,14 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
1243} 1238}
1244 1239
1245#ifdef CONFIG_SERIO 1240#ifdef CONFIG_SERIO
1246static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int channel) 1241static void __init sunzilog_register_serio(struct uart_sunzilog_port *up)
1247{ 1242{
1248 struct serio *serio = &up->serio; 1243 struct serio *serio = &up->serio;
1249 1244
1250 serio->port_data = up; 1245 serio->port_data = up;
1251 1246
1252 serio->id.type = SERIO_RS232; 1247 serio->id.type = SERIO_RS232;
1253 if (channel == KEYBOARD_LINE) { 1248 if (up->flags & SUNZILOG_FLAG_CONS_KEYB) {
1254 serio->id.proto = SERIO_SUNKBD; 1249 serio->id.proto = SERIO_SUNKBD;
1255 strlcpy(serio->name, "zskbd", sizeof(serio->name)); 1250 strlcpy(serio->name, "zskbd", sizeof(serio->name));
1256 } else { 1251 } else {
@@ -1259,7 +1254,8 @@ static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int ch
1259 strlcpy(serio->name, "zsms", sizeof(serio->name)); 1254 strlcpy(serio->name, "zsms", sizeof(serio->name));
1260 } 1255 }
1261 strlcpy(serio->phys, 1256 strlcpy(serio->phys,
1262 (channel == KEYBOARD_LINE ? "zs/serio0" : "zs/serio1"), 1257 ((up->flags & SUNZILOG_FLAG_CONS_KEYB) ?
1258 "zs/serio0" : "zs/serio1"),
1263 sizeof(serio->phys)); 1259 sizeof(serio->phys));
1264 1260
1265 serio->write = sunzilog_serio_write; 1261 serio->write = sunzilog_serio_write;
@@ -1286,8 +1282,8 @@ static void __init sunzilog_init_hw(struct uart_sunzilog_port *up)
1286 (void) read_zsreg(channel, R0); 1282 (void) read_zsreg(channel, R0);
1287 } 1283 }
1288 1284
1289 if (up->port.line == KEYBOARD_LINE || 1285 if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
1290 up->port.line == MOUSE_LINE) { 1286 SUNZILOG_FLAG_CONS_MOUSE)) {
1291 sunzilog_init_kbdms(up, up->port.line); 1287 sunzilog_init_kbdms(up, up->port.line);
1292 up->curregs[R9] |= (NV | MIE); 1288 up->curregs[R9] |= (NV | MIE);
1293 write_zsreg(channel, R9, up->curregs[R9]); 1289 write_zsreg(channel, R9, up->curregs[R9]);
@@ -1313,37 +1309,26 @@ static void __init sunzilog_init_hw(struct uart_sunzilog_port *up)
1313 spin_unlock_irqrestore(&up->port.lock, flags); 1309 spin_unlock_irqrestore(&up->port.lock, flags);
1314 1310
1315#ifdef CONFIG_SERIO 1311#ifdef CONFIG_SERIO
1316 if (up->port.line == KEYBOARD_LINE || up->port.line == MOUSE_LINE) 1312 if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
1317 sunzilog_register_serio(up, up->port.line); 1313 SUNZILOG_FLAG_CONS_MOUSE))
1314 sunzilog_register_serio(up);
1318#endif 1315#endif
1319} 1316}
1320 1317
1321static int __devinit zs_get_instance(struct device_node *dp)
1322{
1323 int ret;
1324
1325 ret = of_getintprop_default(dp, "slave", -1);
1326 if (ret != -1)
1327 return ret;
1328
1329 if (of_find_property(dp, "keyboard", NULL))
1330 ret = 1;
1331 else
1332 ret = 0;
1333
1334 return ret;
1335}
1336
1337static int zilog_irq = -1; 1318static int zilog_irq = -1;
1338 1319
1339static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *match) 1320static int __devinit zs_probe(struct of_device *op, const struct of_device_id *match)
1340{ 1321{
1341 struct of_device *op = to_of_device(&dev->dev); 1322 static int inst;
1342 struct uart_sunzilog_port *up; 1323 struct uart_sunzilog_port *up;
1343 struct zilog_layout __iomem *rp; 1324 struct zilog_layout __iomem *rp;
1344 int inst = zs_get_instance(dev->node); 1325 int keyboard_mouse;
1345 int err; 1326 int err;
1346 1327
1328 keyboard_mouse = 0;
1329 if (of_find_property(op->node, "keyboard", NULL))
1330 keyboard_mouse = 1;
1331
1347 sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0, 1332 sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0,
1348 sizeof(struct zilog_layout), 1333 sizeof(struct zilog_layout),
1349 "zs"); 1334 "zs");
@@ -1352,16 +1337,8 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1352 1337
1353 rp = sunzilog_chip_regs[inst]; 1338 rp = sunzilog_chip_regs[inst];
1354 1339
1355 if (zilog_irq == -1) { 1340 if (zilog_irq == -1)
1356 zilog_irq = op->irqs[0]; 1341 zilog_irq = op->irqs[0];
1357 err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED,
1358 "zs", sunzilog_irq_chain);
1359 if (err) {
1360 of_iounmap(rp, sizeof(struct zilog_layout));
1361
1362 return err;
1363 }
1364 }
1365 1342
1366 up = &sunzilog_port_table[inst * 2]; 1343 up = &sunzilog_port_table[inst * 2];
1367 1344
@@ -1378,7 +1355,7 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1378 up[0].port.line = (inst * 2) + 0; 1355 up[0].port.line = (inst * 2) + 0;
1379 up[0].port.dev = &op->dev; 1356 up[0].port.dev = &op->dev;
1380 up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A; 1357 up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A;
1381 if (inst == 1) 1358 if (keyboard_mouse)
1382 up[0].flags |= SUNZILOG_FLAG_CONS_KEYB; 1359 up[0].flags |= SUNZILOG_FLAG_CONS_KEYB;
1383 sunzilog_init_hw(&up[0]); 1360 sunzilog_init_hw(&up[0]);
1384 1361
@@ -1395,11 +1372,11 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1395 up[1].port.line = (inst * 2) + 1; 1372 up[1].port.line = (inst * 2) + 1;
1396 up[1].port.dev = &op->dev; 1373 up[1].port.dev = &op->dev;
1397 up[1].flags |= 0; 1374 up[1].flags |= 0;
1398 if (inst == 1) 1375 if (keyboard_mouse)
1399 up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE; 1376 up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE;
1400 sunzilog_init_hw(&up[1]); 1377 sunzilog_init_hw(&up[1]);
1401 1378
1402 if (inst != 1) { 1379 if (!keyboard_mouse) {
1403 err = uart_add_one_port(&sunzilog_reg, &up[0].port); 1380 err = uart_add_one_port(&sunzilog_reg, &up[0].port);
1404 if (err) { 1381 if (err) {
1405 of_iounmap(rp, sizeof(struct zilog_layout)); 1382 of_iounmap(rp, sizeof(struct zilog_layout));
@@ -1411,9 +1388,18 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1411 of_iounmap(rp, sizeof(struct zilog_layout)); 1388 of_iounmap(rp, sizeof(struct zilog_layout));
1412 return err; 1389 return err;
1413 } 1390 }
1391 } else {
1392 printk(KERN_INFO "%s: Keyboard at MMIO %lx (irq = %d) "
1393 "is a zs\n",
1394 op->dev.bus_id, up[0].port.mapbase, op->irqs[0]);
1395 printk(KERN_INFO "%s: Mouse at MMIO %lx (irq = %d) "
1396 "is a zs\n",
1397 op->dev.bus_id, up[1].port.mapbase, op->irqs[0]);
1414 } 1398 }
1415 1399
1416 dev_set_drvdata(&dev->dev, &up[0]); 1400 dev_set_drvdata(&op->dev, &up[0]);
1401
1402 inst++;
1417 1403
1418 return 0; 1404 return 0;
1419} 1405}
@@ -1462,36 +1448,65 @@ static struct of_platform_driver zs_driver = {
1462static int __init sunzilog_init(void) 1448static int __init sunzilog_init(void)
1463{ 1449{
1464 struct device_node *dp; 1450 struct device_node *dp;
1465 int err; 1451 int err, uart_count;
1452 int num_keybms;
1466 1453
1467 NUM_SUNZILOG = 0; 1454 NUM_SUNZILOG = 0;
1468 for_each_node_by_name(dp, "zs") 1455 num_keybms = 0;
1456 for_each_node_by_name(dp, "zs") {
1469 NUM_SUNZILOG++; 1457 NUM_SUNZILOG++;
1458 if (of_find_property(dp, "keyboard", NULL))
1459 num_keybms++;
1460 }
1470 1461
1462 uart_count = 0;
1471 if (NUM_SUNZILOG) { 1463 if (NUM_SUNZILOG) {
1472 int uart_count; 1464 int uart_count;
1473 1465
1474 err = sunzilog_alloc_tables(); 1466 err = sunzilog_alloc_tables();
1475 if (err) 1467 if (err)
1476 return err; 1468 goto out;
1477 1469
1478 /* Subtract 1 for keyboard, 1 for mouse. */ 1470 uart_count = (NUM_SUNZILOG * 2) - (2 * num_keybms);
1479 uart_count = (NUM_SUNZILOG * 2) - 2;
1480 1471
1481 sunzilog_reg.nr = uart_count; 1472 sunzilog_reg.nr = uart_count;
1482 sunzilog_reg.minor = sunserial_current_minor; 1473 sunzilog_reg.minor = sunserial_current_minor;
1483 err = uart_register_driver(&sunzilog_reg); 1474 err = uart_register_driver(&sunzilog_reg);
1484 if (err) { 1475 if (err)
1485 sunzilog_free_tables(); 1476 goto out_free_tables;
1486 return err; 1477
1487 }
1488 sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64; 1478 sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64;
1489 sunzilog_reg.cons = SUNZILOG_CONSOLE(); 1479 sunzilog_reg.cons = SUNZILOG_CONSOLE();
1490 1480
1491 sunserial_current_minor += uart_count; 1481 sunserial_current_minor += uart_count;
1492 } 1482 }
1493 1483
1494 return of_register_driver(&zs_driver, &of_bus_type); 1484 err = of_register_driver(&zs_driver, &of_bus_type);
1485 if (err)
1486 goto out_unregister_uart;
1487
1488 if (zilog_irq != -1) {
1489 err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED,
1490 "zs", sunzilog_irq_chain);
1491 if (err)
1492 goto out_unregister_driver;
1493 }
1494
1495out:
1496 return err;
1497
1498out_unregister_driver:
1499 of_unregister_driver(&zs_driver);
1500
1501out_unregister_uart:
1502 if (NUM_SUNZILOG) {
1503 uart_unregister_driver(&sunzilog_reg);
1504 sunzilog_reg.cons = NULL;
1505 }
1506
1507out_free_tables:
1508 sunzilog_free_tables();
1509 goto out;
1495} 1510}
1496 1511
1497static void __exit sunzilog_exit(void) 1512static void __exit sunzilog_exit(void)
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index ceda3a2859d2..7858703ed84c 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -246,8 +246,8 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
246#define BUF_BUSY XBF_DONT_BLOCK 246#define BUF_BUSY XBF_DONT_BLOCK
247 247
248#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) 248#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
249#define XFS_BUF_ZEROFLAGS(bp) \ 249#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
250 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI)) 250 ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
251 251
252#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) 252#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
253#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) 253#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9bdef9d51900..4754f342a5d3 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -314,6 +314,13 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
314 return; 314 return;
315 } 315 }
316 316
317 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
318 xfs_fs_cmn_err(CE_NOTE, mp,
319 "Disabling barriers, underlying device is readonly");
320 mp->m_flags &= ~XFS_MOUNT_BARRIER;
321 return;
322 }
323
317 error = xfs_barrier_test(mp); 324 error = xfs_barrier_test(mp);
318 if (error) { 325 if (error) {
319 xfs_fs_cmn_err(CE_NOTE, mp, 326 xfs_fs_cmn_err(CE_NOTE, mp,
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index e95e99f7168f..f137856c3261 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -217,17 +217,24 @@ xfs_qm_statvfs(
217 return 0; 217 return 0;
218 dp = &dqp->q_core; 218 dp = &dqp->q_core;
219 219
220 limit = dp->d_blk_softlimit ? dp->d_blk_softlimit : dp->d_blk_hardlimit; 220 limit = dp->d_blk_softlimit ?
221 be64_to_cpu(dp->d_blk_softlimit) :
222 be64_to_cpu(dp->d_blk_hardlimit);
221 if (limit && statp->f_blocks > limit) { 223 if (limit && statp->f_blocks > limit) {
222 statp->f_blocks = limit; 224 statp->f_blocks = limit;
223 statp->f_bfree = (statp->f_blocks > dp->d_bcount) ? 225 statp->f_bfree =
224 (statp->f_blocks - dp->d_bcount) : 0; 226 (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
227 (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
225 } 228 }
226 limit = dp->d_ino_softlimit ? dp->d_ino_softlimit : dp->d_ino_hardlimit; 229
230 limit = dp->d_ino_softlimit ?
231 be64_to_cpu(dp->d_ino_softlimit) :
232 be64_to_cpu(dp->d_ino_hardlimit);
227 if (limit && statp->f_files > limit) { 233 if (limit && statp->f_files > limit) {
228 statp->f_files = limit; 234 statp->f_files = limit;
229 statp->f_ffree = (statp->f_files > dp->d_icount) ? 235 statp->f_ffree =
230 (statp->f_ffree - dp->d_icount) : 0; 236 (statp->f_files > be64_to_cpu(dp->d_icount)) ?
237 (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0;
231 } 238 }
232 239
233 xfs_qm_dqput(dqp); 240 xfs_qm_dqput(dqp);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 86c1bf0bba9e..1f8ecff8553a 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -334,10 +334,9 @@ xfs_itobp(
334#if !defined(__KERNEL__) 334#if !defined(__KERNEL__)
335 ni = 0; 335 ni = 0;
336#elif defined(DEBUG) 336#elif defined(DEBUG)
337 ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 337 ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog;
338 (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog);
339#else /* usual case */ 338#else /* usual case */
340 ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; 339 ni = 1;
341#endif 340#endif
342 341
343 for (i = 0; i < ni; i++) { 342 for (i = 0; i < ni; i++) {
@@ -348,11 +347,15 @@ xfs_itobp(
348 (i << mp->m_sb.sb_inodelog)); 347 (i << mp->m_sb.sb_inodelog));
349 di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && 348 di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
350 XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); 349 XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
351 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, 350 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
352 XFS_RANDOM_ITOBP_INOTOBP))) { 351 XFS_ERRTAG_ITOBP_INOTOBP,
352 XFS_RANDOM_ITOBP_INOTOBP))) {
353 if (imap_flags & XFS_IMAP_BULKSTAT) {
354 xfs_trans_brelse(tp, bp);
355 return XFS_ERROR(EINVAL);
356 }
353#ifdef DEBUG 357#ifdef DEBUG
354 if (!(imap_flags & XFS_IMAP_BULKSTAT)) 358 cmn_err(CE_ALERT,
355 cmn_err(CE_ALERT,
356 "Device %s - bad inode magic/vsn " 359 "Device %s - bad inode magic/vsn "
357 "daddr %lld #%d (magic=%x)", 360 "daddr %lld #%d (magic=%x)",
358 XFS_BUFTARG_NAME(mp->m_ddev_targp), 361 XFS_BUFTARG_NAME(mp->m_ddev_targp),
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e730328636c3..21ac1a67e3e0 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1413,7 +1413,7 @@ xlog_sync(xlog_t *log,
1413 ops = iclog->ic_header.h_num_logops; 1413 ops = iclog->ic_header.h_num_logops;
1414 INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); 1414 INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops);
1415 1415
1416 bp = iclog->ic_bp; 1416 bp = iclog->ic_bp;
1417 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); 1417 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
1418 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); 1418 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
1419 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); 1419 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)));
@@ -1430,15 +1430,14 @@ xlog_sync(xlog_t *log,
1430 } 1430 }
1431 XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); 1431 XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count);
1432 XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ 1432 XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */
1433 XFS_BUF_ZEROFLAGS(bp);
1433 XFS_BUF_BUSY(bp); 1434 XFS_BUF_BUSY(bp);
1434 XFS_BUF_ASYNC(bp); 1435 XFS_BUF_ASYNC(bp);
1435 /* 1436 /*
1436 * Do an ordered write for the log block. 1437 * Do an ordered write for the log block.
1437 * 1438 * Its unnecessary to flush the first split block in the log wrap case.
1438 * It may not be needed to flush the first split block in the log wrap
1439 * case, but do it anyways to be safe -AK
1440 */ 1439 */
1441 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1440 if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER))
1442 XFS_BUF_ORDERED(bp); 1441 XFS_BUF_ORDERED(bp);
1443 1442
1444 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1443 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
@@ -1460,7 +1459,7 @@ xlog_sync(xlog_t *log,
1460 return error; 1459 return error;
1461 } 1460 }
1462 if (split) { 1461 if (split) {
1463 bp = iclog->ic_log->l_xbuf; 1462 bp = iclog->ic_log->l_xbuf;
1464 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == 1463 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) ==
1465 (unsigned long)1); 1464 (unsigned long)1);
1466 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); 1465 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
@@ -1468,6 +1467,7 @@ xlog_sync(xlog_t *log,
1468 XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ 1467 XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+
1469 (__psint_t)count), split); 1468 (__psint_t)count), split);
1470 XFS_BUF_SET_FSPRIVATE(bp, iclog); 1469 XFS_BUF_SET_FSPRIVATE(bp, iclog);
1470 XFS_BUF_ZEROFLAGS(bp);
1471 XFS_BUF_BUSY(bp); 1471 XFS_BUF_BUSY(bp);
1472 XFS_BUF_ASYNC(bp); 1472 XFS_BUF_ASYNC(bp);
1473 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1473 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 6c96391f3f1a..b427d220a169 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -515,7 +515,7 @@ xfs_mount(
515 if (error) 515 if (error)
516 goto error2; 516 goto error2;
517 517
518 if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY)) 518 if (mp->m_flags & XFS_MOUNT_BARRIER)
519 xfs_mountfs_check_barriers(mp); 519 xfs_mountfs_check_barriers(mp);
520 520
521 error = XFS_IOINIT(vfsp, args, flags); 521 error = XFS_IOINIT(vfsp, args, flags);
diff --git a/include/asm-arm/arch-iop3xx/iop331-irqs.h b/include/asm-arm/arch-iop3xx/iop331-irqs.h
index 8ff73d487222..7135ad7e335e 100644
--- a/include/asm-arm/arch-iop3xx/iop331-irqs.h
+++ b/include/asm-arm/arch-iop3xx/iop331-irqs.h
@@ -91,7 +91,6 @@
91#define NR_IRQS NR_IOP331_IRQS 91#define NR_IRQS NR_IOP331_IRQS
92 92
93 93
94#if defined(CONFIG_ARCH_IQ80331)
95/* 94/*
96 * Interrupts available on the IQ80331 board 95 * Interrupts available on the IQ80331 board
97 */ 96 */
@@ -111,7 +110,6 @@
111#define IRQ_IQ80331_INTC IRQ_IOP331_XINT2 110#define IRQ_IQ80331_INTC IRQ_IOP331_XINT2
112#define IRQ_IQ80331_INTD IRQ_IOP331_XINT3 111#define IRQ_IQ80331_INTD IRQ_IOP331_XINT3
113 112
114#elif defined(CONFIG_MACH_IQ80332)
115/* 113/*
116 * Interrupts available on the IQ80332 board 114 * Interrupts available on the IQ80332 board
117 */ 115 */
@@ -131,6 +129,4 @@
131#define IRQ_IQ80332_INTC IRQ_IOP331_XINT2 129#define IRQ_IQ80332_INTC IRQ_IOP331_XINT2
132#define IRQ_IQ80332_INTD IRQ_IOP331_XINT3 130#define IRQ_IQ80332_INTD IRQ_IOP331_XINT3
133 131
134#endif
135
136#endif // _IOP331_IRQ_H_ 132#endif // _IOP331_IRQ_H_
diff --git a/include/asm-m68k/oplib.h b/include/asm-m68k/oplib.h
index c3594f473ef7..06caa2d08451 100644
--- a/include/asm-m68k/oplib.h
+++ b/include/asm-m68k/oplib.h
@@ -244,11 +244,6 @@ extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
244/* Does the passed node have the given "name"? YES=1 NO=0 */ 244/* Does the passed node have the given "name"? YES=1 NO=0 */
245extern int prom_nodematch(int thisnode, char *name); 245extern int prom_nodematch(int thisnode, char *name);
246 246
247/* Puts in buffer a prom name in the form name@x,y or name (x for which_io
248 * and y for first regs phys address
249 */
250extern int prom_getname(int node, char *buf, int buflen);
251
252/* Search all siblings starting at the passed node for "name" matching 247/* Search all siblings starting at the passed node for "name" matching
253 * the given string. Returns the node on success, zero on failure. 248 * the given string. Returns the node on success, zero on failure.
254 */ 249 */
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 36a3a85d611a..16040048cd1b 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -128,8 +128,13 @@ extern void account_system_vtime(struct task_struct *);
128 128
129#define nop() __asm__ __volatile__ ("nop") 129#define nop() __asm__ __volatile__ ("nop")
130 130
131#define xchg(ptr,x) \ 131#define xchg(ptr,x) \
132 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr)))) 132({ \
133 __typeof__(*(ptr)) __ret; \
134 __ret = (__typeof__(*(ptr))) \
135 __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
136 __ret; \
137})
133 138
134static inline unsigned long __xchg(unsigned long x, void * ptr, int size) 139static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
135{ 140{
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 4848057dafe4..5d0332a4c2bd 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -19,7 +19,7 @@ static inline cycles_t get_cycles(void)
19{ 19{
20 cycles_t cycles; 20 cycles_t cycles;
21 21
22 __asm__("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc"); 22 __asm__ __volatile__ ("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc");
23 return cycles >> 2; 23 return cycles >> 2;
24} 24}
25 25
@@ -27,7 +27,7 @@ static inline unsigned long long get_clock (void)
27{ 27{
28 unsigned long long clk; 28 unsigned long long clk;
29 29
30 __asm__("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); 30 __asm__ __volatile__ ("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
31 return clk; 31 return clk;
32} 32}
33 33
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h
index f283f8aaf6a9..91691e52c058 100644
--- a/include/asm-sparc/oplib.h
+++ b/include/asm-sparc/oplib.h
@@ -267,11 +267,6 @@ extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
267/* Does the passed node have the given "name"? YES=1 NO=0 */ 267/* Does the passed node have the given "name"? YES=1 NO=0 */
268extern int prom_nodematch(int thisnode, char *name); 268extern int prom_nodematch(int thisnode, char *name);
269 269
270/* Puts in buffer a prom name in the form name@x,y or name (x for which_io
271 * and y for first regs phys address
272 */
273extern int prom_getname(int node, char *buf, int buflen);
274
275/* Search all siblings starting at the passed node for "name" matching 270/* Search all siblings starting at the passed node for "name" matching
276 * the given string. Returns the node on success, zero on failure. 271 * the given string. Returns the node on success, zero on failure.
277 */ 272 */
diff --git a/include/asm-sparc/signal.h b/include/asm-sparc/signal.h
index 0ae5084c427b..d03a21c97abb 100644
--- a/include/asm-sparc/signal.h
+++ b/include/asm-sparc/signal.h
@@ -168,7 +168,7 @@ struct sigstack {
168 * statically allocated data.. which is NOT GOOD. 168 * statically allocated data.. which is NOT GOOD.
169 * 169 *
170 */ 170 */
171#define SA_STATIC_ALLOC 0x80 171#define SA_STATIC_ALLOC 0x8000
172#endif 172#endif
173 173
174#include <asm-generic/signal.h> 174#include <asm-generic/signal.h>
diff --git a/include/asm-sparc64/openprom.h b/include/asm-sparc64/openprom.h
index b4959d2b0d99..e01b80559c93 100644
--- a/include/asm-sparc64/openprom.h
+++ b/include/asm-sparc64/openprom.h
@@ -175,7 +175,7 @@ struct linux_nodeops {
175}; 175};
176 176
177/* More fun PROM structures for device probing. */ 177/* More fun PROM structures for device probing. */
178#define PROMREG_MAX 16 178#define PROMREG_MAX 24
179#define PROMVADDR_MAX 16 179#define PROMVADDR_MAX 16
180#define PROMINTR_MAX 15 180#define PROMINTR_MAX 15
181 181
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index a68b0bb05958..6a0da3b1695c 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -287,11 +287,6 @@ extern void prom_getstring(int node, const char *prop, char *buf, int bufsize);
287/* Does the passed node have the given "name"? YES=1 NO=0 */ 287/* Does the passed node have the given "name"? YES=1 NO=0 */
288extern int prom_nodematch(int thisnode, const char *name); 288extern int prom_nodematch(int thisnode, const char *name);
289 289
290/* Puts in buffer a prom name in the form name@x,y or name (x for which_io
291 * and y for first regs phys address
292 */
293extern int prom_getname(int node, char *buf, int buflen);
294
295/* Search all siblings starting at the passed node for "name" matching 290/* Search all siblings starting at the passed node for "name" matching
296 * the given string. Returns the node on success, zero on failure. 291 * the given string. Returns the node on success, zero on failure.
297 */ 292 */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 03f5bc9b6bec..1ba19eb34ce3 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -339,7 +339,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
339 " .section .sun4v_2insn_patch, \"ax\"\n" 339 " .section .sun4v_2insn_patch, \"ax\"\n"
340 " .word 661b\n" 340 " .word 661b\n"
341 " andn %0, %4, %0\n" 341 " andn %0, %4, %0\n"
342 " or %0, %3, %0\n" 342 " or %0, %5, %0\n"
343 " .previous\n" 343 " .previous\n"
344 : "=r" (val) 344 : "=r" (val)
345 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), 345 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
diff --git a/include/asm-sparc64/sfp-machine.h b/include/asm-sparc64/sfp-machine.h
index 5015bb8d6c32..89d42431efb5 100644
--- a/include/asm-sparc64/sfp-machine.h
+++ b/include/asm-sparc64/sfp-machine.h
@@ -34,7 +34,7 @@
34#define _FP_MUL_MEAT_D(R,X,Y) \ 34#define _FP_MUL_MEAT_D(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) 35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_Q(R,X,Y) \ 36#define _FP_MUL_MEAT_Q(R,X,Y) \
37 _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) 37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
38 38
39#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) 39#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
40#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) 40#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y)
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
index fbfb50136edb..4e3919524240 100644
--- a/include/asm-x86_64/calgary.h
+++ b/include/asm-x86_64/calgary.h
@@ -60,9 +60,4 @@ static inline int calgary_iommu_init(void) { return 1; }
60static inline void detect_calgary(void) { return; } 60static inline void detect_calgary(void) { return; }
61#endif 61#endif
62 62
63static inline unsigned int bus_to_phb(unsigned char busno)
64{
65 return ((busno % 15 == 0) ? 0 : busno / 2 + 1);
66}
67
68#endif /* _ASM_X86_64_CALGARY_H */ 63#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index f7bf875aae40..10f346165cab 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -19,7 +19,7 @@
19#define EXCEPTION_STACK_ORDER 0 19#define EXCEPTION_STACK_ORDER 0
20#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) 20#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
21 21
22#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER 22#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
23#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 23#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
24 24
25#define IRQSTACK_ORDER 2 25#define IRQSTACK_ORDER 2
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index 5f9a01805821..ba94ab3d2673 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -42,6 +42,8 @@ extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
42extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); 42extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
43extern void swiotlb_init(void); 43extern void swiotlb_init(void);
44 44
45extern int swiotlb_force;
46
45#ifdef CONFIG_SWIOTLB 47#ifdef CONFIG_SWIOTLB
46extern int swiotlb; 48extern int swiotlb;
47#else 49#else
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 44a11f1ccaf2..8fb344a9abd8 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -48,7 +48,6 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
48{ 48{
49} 49}
50#endif 50#endif
51extern int current_in_cpu_hotplug(void);
52 51
53int cpu_up(unsigned int cpu); 52int cpu_up(unsigned int cpu);
54 53
@@ -61,10 +60,6 @@ static inline int register_cpu_notifier(struct notifier_block *nb)
61static inline void unregister_cpu_notifier(struct notifier_block *nb) 60static inline void unregister_cpu_notifier(struct notifier_block *nb)
62{ 61{
63} 62}
64static inline int current_in_cpu_hotplug(void)
65{
66 return 0;
67}
68 63
69#endif /* CONFIG_SMP */ 64#endif /* CONFIG_SMP */
70extern struct sysdev_class cpu_sysdev_class; 65extern struct sysdev_class cpu_sysdev_class;
@@ -73,7 +68,6 @@ extern struct sysdev_class cpu_sysdev_class;
73/* Stop CPUs going up and down. */ 68/* Stop CPUs going up and down. */
74extern void lock_cpu_hotplug(void); 69extern void lock_cpu_hotplug(void);
75extern void unlock_cpu_hotplug(void); 70extern void unlock_cpu_hotplug(void);
76extern int lock_cpu_hotplug_interruptible(void);
77#define hotcpu_notifier(fn, pri) { \ 71#define hotcpu_notifier(fn, pri) { \
78 static struct notifier_block fn##_nb = \ 72 static struct notifier_block fn##_nb = \
79 { .notifier_call = fn, .priority = pri }; \ 73 { .notifier_call = fn, .priority = pri }; \
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 35e137636b0b..4ea39fee99c7 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -172,9 +172,6 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
172 unsigned int relation); 172 unsigned int relation);
173 173
174 174
175/* pass an event to the cpufreq governor */
176int cpufreq_governor(unsigned int cpu, unsigned int event);
177
178int cpufreq_register_governor(struct cpufreq_governor *governor); 175int cpufreq_register_governor(struct cpufreq_governor *governor);
179void cpufreq_unregister_governor(struct cpufreq_governor *governor); 176void cpufreq_unregister_governor(struct cpufreq_governor *governor);
180 177
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 34c3a215f2cd..d097b5b72bc6 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -96,7 +96,8 @@ struct robust_list_head {
96long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, 96long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
97 u32 __user *uaddr2, u32 val2, u32 val3); 97 u32 __user *uaddr2, u32 val2, u32 val3);
98 98
99extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); 99extern int
100handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
100 101
101#ifdef CONFIG_FUTEX 102#ifdef CONFIG_FUTEX
102extern void exit_robust_list(struct task_struct *curr); 103extern void exit_robust_list(struct task_struct *curr);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index dc7abef10965..99620451d958 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -571,6 +571,7 @@ typedef struct ide_drive_s {
571 u8 waiting_for_dma; /* dma currently in progress */ 571 u8 waiting_for_dma; /* dma currently in progress */
572 u8 unmask; /* okay to unmask other irqs */ 572 u8 unmask; /* okay to unmask other irqs */
573 u8 bswap; /* byte swap data */ 573 u8 bswap; /* byte swap data */
574 u8 noflush; /* don't attempt flushes */
574 u8 dsc_overlap; /* DSC overlap */ 575 u8 dsc_overlap; /* DSC overlap */
575 u8 nice1; /* give potential excess bandwidth */ 576 u8 nice1; /* give potential excess bandwidth */
576 577
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6cc497a2b6da..66c3100c2b94 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -265,12 +265,14 @@ enum {
265 265
266 /* ata_eh_info->flags */ 266 /* ata_eh_info->flags */
267 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 267 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
268 ATA_EHI_RESUME_LINK = (1 << 1), /* need to resume link */ 268 ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */
269 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 269 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
270 ATA_EHI_QUIET = (1 << 3), /* be quiet */ 270 ATA_EHI_QUIET = (1 << 3), /* be quiet */
271 271
272 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ 272 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
273 273
274 ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
275
274 /* max repeat if error condition is still set after ->error_handler */ 276 /* max repeat if error condition is still set after ->error_handler */
275 ATA_EH_MAX_REPEAT = 5, 277 ATA_EH_MAX_REPEAT = 5,
276 278
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 76cc099c8580..75f02d8c6ed3 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -924,10 +924,10 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
924 924
925static inline int netif_tx_trylock(struct net_device *dev) 925static inline int netif_tx_trylock(struct net_device *dev)
926{ 926{
927 int err = spin_trylock(&dev->_xmit_lock); 927 int ok = spin_trylock(&dev->_xmit_lock);
928 if (!err) 928 if (likely(ok))
929 dev->xmit_lock_owner = smp_processor_id(); 929 dev->xmit_lock_owner = smp_processor_id();
930 return err; 930 return ok;
931} 931}
932 932
933static inline void netif_tx_unlock(struct net_device *dev) 933static inline void netif_tx_unlock(struct net_device *dev)
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 87764022cc67..31f02ba036ce 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -79,6 +79,8 @@ struct bridge_skb_cb {
79 __u32 ipv4; 79 __u32 ipv4;
80 } daddr; 80 } daddr;
81}; 81};
82
83extern int brnf_deferred_hooks;
82#endif /* CONFIG_BRIDGE_NETFILTER */ 84#endif /* CONFIG_BRIDGE_NETFILTER */
83 85
84#endif /* __KERNEL__ */ 86#endif /* __KERNEL__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0bf31b83578c..4307e764ef0a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1066,9 +1066,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
1066 kfree_skb(skb); 1066 kfree_skb(skb);
1067} 1067}
1068 1068
1069#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
1070/** 1069/**
1071 * __dev_alloc_skb - allocate an skbuff for sending 1070 * __dev_alloc_skb - allocate an skbuff for receiving
1072 * @length: length to allocate 1071 * @length: length to allocate
1073 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1072 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1074 * 1073 *
@@ -1087,12 +1086,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1087 skb_reserve(skb, NET_SKB_PAD); 1086 skb_reserve(skb, NET_SKB_PAD);
1088 return skb; 1087 return skb;
1089} 1088}
1090#else
1091extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
1092#endif
1093 1089
1094/** 1090/**
1095 * dev_alloc_skb - allocate an skbuff for sending 1091 * dev_alloc_skb - allocate an skbuff for receiving
1096 * @length: length to allocate 1092 * @length: length to allocate
1097 * 1093 *
1098 * Allocate a new &sk_buff and assign it a usage count of one. The 1094 * Allocate a new &sk_buff and assign it a usage count of one. The
diff --git a/include/net/netdma.h b/include/net/netdma.h
index 19760eb131aa..ceae5ee85c04 100644
--- a/include/net/netdma.h
+++ b/include/net/netdma.h
@@ -37,7 +37,7 @@ static inline struct dma_chan *get_softnet_dma(void)
37} 37}
38 38
39int dma_skb_copy_datagram_iovec(struct dma_chan* chan, 39int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
40 const struct sk_buff *skb, int offset, struct iovec *to, 40 struct sk_buff *skb, int offset, struct iovec *to,
41 size_t len, struct dma_pinned_list *pinned_list); 41 size_t len, struct dma_pinned_list *pinned_list);
42 42
43#endif /* CONFIG_NET_DMA */ 43#endif /* CONFIG_NET_DMA */
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 1925c65e617b..f6afee73235d 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -169,23 +169,17 @@ psched_tod_diff(int delta_sec, int bound)
169 169
170#define PSCHED_TADD2(tv, delta, tv_res) \ 170#define PSCHED_TADD2(tv, delta, tv_res) \
171({ \ 171({ \
172 int __delta = (delta); \ 172 int __delta = (tv).tv_usec + (delta); \
173 (tv_res) = (tv); \ 173 (tv_res).tv_sec = (tv).tv_sec; \
174 while(__delta >= USEC_PER_SEC){ \ 174 while (__delta >= USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \
175 (tv_res).tv_sec++; \
176 __delta -= USEC_PER_SEC; \
177 } \
178 (tv_res).tv_usec = __delta; \ 175 (tv_res).tv_usec = __delta; \
179}) 176})
180 177
181#define PSCHED_TADD(tv, delta) \ 178#define PSCHED_TADD(tv, delta) \
182({ \ 179({ \
183 int __delta = (delta); \ 180 (tv).tv_usec += (delta); \
184 while(__delta >= USEC_PER_SEC){ \ 181 while ((tv).tv_usec >= USEC_PER_SEC) { (tv).tv_sec++; \
185 (tv).tv_sec++; \ 182 (tv).tv_usec -= USEC_PER_SEC; } \
186 __delta -= USEC_PER_SEC; \
187 } \
188 (tv).tv_usec = __delta; \
189}) 183})
190 184
191/* Set/check that time is in the "past perfect"; 185/* Set/check that time is in the "past perfect";
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 5f69158c1006..e5aa7ff1f5b5 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -445,6 +445,7 @@ typedef struct sctp_sender_hb_info {
445 struct sctp_paramhdr param_hdr; 445 struct sctp_paramhdr param_hdr;
446 union sctp_addr daddr; 446 union sctp_addr daddr;
447 unsigned long sent_at; 447 unsigned long sent_at;
448 __u64 hb_nonce;
448} __attribute__((packed)) sctp_sender_hb_info_t; 449} __attribute__((packed)) sctp_sender_hb_info_t;
449 450
450/* 451/*
@@ -730,13 +731,10 @@ void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *,
730const union sctp_addr *sctp_source(const struct sctp_chunk *chunk); 731const union sctp_addr *sctp_source(const struct sctp_chunk *chunk);
731 732
732/* This is a structure for holding either an IPv6 or an IPv4 address. */ 733/* This is a structure for holding either an IPv6 or an IPv4 address. */
733/* sin_family -- AF_INET or AF_INET6
734 * sin_port -- ordinary port number
735 * sin_addr -- cast to either (struct in_addr) or (struct in6_addr)
736 */
737struct sctp_sockaddr_entry { 734struct sctp_sockaddr_entry {
738 struct list_head list; 735 struct list_head list;
739 union sctp_addr a; 736 union sctp_addr a;
737 __u8 use_as_src;
740}; 738};
741 739
742typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *); 740typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *);
@@ -984,6 +982,9 @@ struct sctp_transport {
984 */ 982 */
985 char cacc_saw_newack; 983 char cacc_saw_newack;
986 } cacc; 984 } cacc;
985
986 /* 64-bit random number sent with heartbeat. */
987 __u64 hb_nonce;
987}; 988};
988 989
989struct sctp_transport *sctp_transport_new(const union sctp_addr *, 990struct sctp_transport *sctp_transport_new(const union sctp_addr *,
@@ -1138,7 +1139,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
1138 sctp_scope_t scope, gfp_t gfp, 1139 sctp_scope_t scope, gfp_t gfp,
1139 int flags); 1140 int flags);
1140int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1141int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
1141 gfp_t gfp); 1142 __u8 use_as_src, gfp_t gfp);
1142int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); 1143int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
1143int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, 1144int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
1144 struct sctp_sock *); 1145 struct sctp_sock *);
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 8a6bef6f91eb..1b7aae6cdd82 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -560,9 +560,18 @@ struct sctp_paddrinfo {
560} __attribute__((packed, aligned(4))); 560} __attribute__((packed, aligned(4)));
561 561
562/* Peer addresses's state. */ 562/* Peer addresses's state. */
563/* UNKNOWN: Peer address passed by the upper layer in sendmsg or connect[x]
564 * calls.
565 * UNCONFIRMED: Peer address received in INIT/INIT-ACK address parameters.
566 * Not yet confirmed by a heartbeat and not available for data
567 * transfers.
568 * ACTIVE : Peer address confirmed, active and available for data transfers.
569 * INACTIVE: Peer address inactive and not available for data transfers.
570 */
563enum sctp_spinfo_state { 571enum sctp_spinfo_state {
564 SCTP_INACTIVE, 572 SCTP_INACTIVE,
565 SCTP_ACTIVE, 573 SCTP_ACTIVE,
574 SCTP_UNCONFIRMED,
566 SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */ 575 SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */
567}; 576};
568 577
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 5ff77558013b..585d28e960dd 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -75,6 +75,7 @@
75#define IB_MGMT_METHOD_TRAP_REPRESS 0x07 75#define IB_MGMT_METHOD_TRAP_REPRESS 0x07
76 76
77#define IB_MGMT_METHOD_RESP 0x80 77#define IB_MGMT_METHOD_RESP 0x80
78#define IB_BM_ATTR_MOD_RESP cpu_to_be32(1)
78 79
79#define IB_MGMT_MAX_METHODS 128 80#define IB_MGMT_MAX_METHODS 128
80 81
@@ -247,6 +248,12 @@ struct ib_mad_send_buf {
247}; 248};
248 249
249/** 250/**
251 * ib_response_mad - Returns if the specified MAD has been generated in
252 * response to a sent request or trap.
253 */
254int ib_response_mad(struct ib_mad *mad);
255
256/**
250 * ib_get_rmpp_resptime - Returns the RMPP response time. 257 * ib_get_rmpp_resptime - Returns the RMPP response time.
251 * @rmpp_hdr: An RMPP header. 258 * @rmpp_hdr: An RMPP header.
252 */ 259 */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 371f70d9aa92..58e6444eebee 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -58,9 +58,7 @@ struct scsi_cmnd {
58 int timeout_per_command; 58 int timeout_per_command;
59 59
60 unsigned char cmd_len; 60 unsigned char cmd_len;
61 unsigned char old_cmd_len;
62 enum dma_data_direction sc_data_direction; 61 enum dma_data_direction sc_data_direction;
63 enum dma_data_direction sc_old_data_direction;
64 62
65 /* These elements define the operation we are about to perform */ 63 /* These elements define the operation we are about to perform */
66#define MAX_COMMAND_SIZE 16 64#define MAX_COMMAND_SIZE 16
@@ -71,18 +69,11 @@ struct scsi_cmnd {
71 void *request_buffer; /* Actual requested buffer */ 69 void *request_buffer; /* Actual requested buffer */
72 70
73 /* These elements define the operation we ultimately want to perform */ 71 /* These elements define the operation we ultimately want to perform */
74 unsigned char data_cmnd[MAX_COMMAND_SIZE];
75 unsigned short old_use_sg; /* We save use_sg here when requesting
76 * sense info */
77 unsigned short use_sg; /* Number of pieces of scatter-gather */ 72 unsigned short use_sg; /* Number of pieces of scatter-gather */
78 unsigned short sglist_len; /* size of malloc'd scatter-gather list */ 73 unsigned short sglist_len; /* size of malloc'd scatter-gather list */
79 unsigned bufflen; /* Size of data buffer */
80 void *buffer; /* Data buffer */
81 74
82 unsigned underflow; /* Return error if less than 75 unsigned underflow; /* Return error if less than
83 this amount is transferred */ 76 this amount is transferred */
84 unsigned old_underflow; /* save underflow here when reusing the
85 * command for error handling */
86 77
87 unsigned transfersize; /* How much we are guaranteed to 78 unsigned transfersize; /* How much we are guaranteed to
88 transfer with each SCSI transfer 79 transfer with each SCSI transfer
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index e3c503cd175e..6cc2314098cf 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -106,6 +106,7 @@ struct sas_end_device {
106 106
107struct sas_expander_device { 107struct sas_expander_device {
108 int level; 108 int level;
109 int next_port_id;
109 110
110 #define SAS_EXPANDER_VENDOR_ID_LEN 8 111 #define SAS_EXPANDER_VENDOR_ID_LEN 8
111 char vendor_id[SAS_EXPANDER_VENDOR_ID_LEN+1]; 112 char vendor_id[SAS_EXPANDER_VENDOR_ID_LEN+1];
@@ -127,8 +128,10 @@ struct sas_expander_device {
127struct sas_port { 128struct sas_port {
128 struct device dev; 129 struct device dev;
129 130
130 u8 port_identifier; 131 int port_identifier;
131 int num_phys; 132 int num_phys;
133 /* port flags */
134 unsigned int is_backlink:1;
132 135
133 /* the other end of the link */ 136 /* the other end of the link */
134 struct sas_rphy *rphy; 137 struct sas_rphy *rphy;
@@ -168,11 +171,13 @@ extern void sas_rphy_delete(struct sas_rphy *);
168extern int scsi_is_sas_rphy(const struct device *); 171extern int scsi_is_sas_rphy(const struct device *);
169 172
170struct sas_port *sas_port_alloc(struct device *, int); 173struct sas_port *sas_port_alloc(struct device *, int);
174struct sas_port *sas_port_alloc_num(struct device *);
171int sas_port_add(struct sas_port *); 175int sas_port_add(struct sas_port *);
172void sas_port_free(struct sas_port *); 176void sas_port_free(struct sas_port *);
173void sas_port_delete(struct sas_port *); 177void sas_port_delete(struct sas_port *);
174void sas_port_add_phy(struct sas_port *, struct sas_phy *); 178void sas_port_add_phy(struct sas_port *, struct sas_phy *);
175void sas_port_delete_phy(struct sas_port *, struct sas_phy *); 179void sas_port_delete_phy(struct sas_port *, struct sas_phy *);
180void sas_port_mark_backlink(struct sas_port *);
176int scsi_is_sas_port(const struct device *); 181int scsi_is_sas_port(const struct device *);
177 182
178extern struct scsi_transport_template * 183extern struct scsi_transport_template *
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 70fbf2e83766..f230f9ae01c2 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -16,56 +16,48 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17 17
18/* This protects CPUs going up and down... */ 18/* This protects CPUs going up and down... */
19static DEFINE_MUTEX(cpucontrol); 19static DEFINE_MUTEX(cpu_add_remove_lock);
20static DEFINE_MUTEX(cpu_bitmask_lock);
20 21
21static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); 22static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain);
22 23
23#ifdef CONFIG_HOTPLUG_CPU 24#ifdef CONFIG_HOTPLUG_CPU
24static struct task_struct *lock_cpu_hotplug_owner;
25static int lock_cpu_hotplug_depth;
26 25
27static int __lock_cpu_hotplug(int interruptible) 26/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */
28{ 27static struct task_struct *recursive;
29 int ret = 0; 28static int recursive_depth;
30
31 if (lock_cpu_hotplug_owner != current) {
32 if (interruptible)
33 ret = mutex_lock_interruptible(&cpucontrol);
34 else
35 mutex_lock(&cpucontrol);
36 }
37
38 /*
39 * Set only if we succeed in locking
40 */
41 if (!ret) {
42 lock_cpu_hotplug_depth++;
43 lock_cpu_hotplug_owner = current;
44 }
45
46 return ret;
47}
48 29
49void lock_cpu_hotplug(void) 30void lock_cpu_hotplug(void)
50{ 31{
51 __lock_cpu_hotplug(0); 32 struct task_struct *tsk = current;
33
34 if (tsk == recursive) {
35 static int warnings = 10;
36 if (warnings) {
37 printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n");
38 WARN_ON(1);
39 warnings--;
40 }
41 recursive_depth++;
42 return;
43 }
44 mutex_lock(&cpu_bitmask_lock);
45 recursive = tsk;
52} 46}
53EXPORT_SYMBOL_GPL(lock_cpu_hotplug); 47EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
54 48
55void unlock_cpu_hotplug(void) 49void unlock_cpu_hotplug(void)
56{ 50{
57 if (--lock_cpu_hotplug_depth == 0) { 51 WARN_ON(recursive != current);
58 lock_cpu_hotplug_owner = NULL; 52 if (recursive_depth) {
59 mutex_unlock(&cpucontrol); 53 recursive_depth--;
54 return;
60 } 55 }
56 mutex_unlock(&cpu_bitmask_lock);
57 recursive = NULL;
61} 58}
62EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); 59EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
63 60
64int lock_cpu_hotplug_interruptible(void)
65{
66 return __lock_cpu_hotplug(1);
67}
68EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
69#endif /* CONFIG_HOTPLUG_CPU */ 61#endif /* CONFIG_HOTPLUG_CPU */
70 62
71/* Need to know about CPUs going up/down? */ 63/* Need to know about CPUs going up/down? */
@@ -122,9 +114,7 @@ int cpu_down(unsigned int cpu)
122 struct task_struct *p; 114 struct task_struct *p;
123 cpumask_t old_allowed, tmp; 115 cpumask_t old_allowed, tmp;
124 116
125 if ((err = lock_cpu_hotplug_interruptible()) != 0) 117 mutex_lock(&cpu_add_remove_lock);
126 return err;
127
128 if (num_online_cpus() == 1) { 118 if (num_online_cpus() == 1) {
129 err = -EBUSY; 119 err = -EBUSY;
130 goto out; 120 goto out;
@@ -150,7 +140,10 @@ int cpu_down(unsigned int cpu)
150 cpu_clear(cpu, tmp); 140 cpu_clear(cpu, tmp);
151 set_cpus_allowed(current, tmp); 141 set_cpus_allowed(current, tmp);
152 142
143 mutex_lock(&cpu_bitmask_lock);
153 p = __stop_machine_run(take_cpu_down, NULL, cpu); 144 p = __stop_machine_run(take_cpu_down, NULL, cpu);
145 mutex_unlock(&cpu_bitmask_lock);
146
154 if (IS_ERR(p)) { 147 if (IS_ERR(p)) {
155 /* CPU didn't die: tell everyone. Can't complain. */ 148 /* CPU didn't die: tell everyone. Can't complain. */
156 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, 149 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
@@ -187,7 +180,7 @@ out_thread:
187out_allowed: 180out_allowed:
188 set_cpus_allowed(current, old_allowed); 181 set_cpus_allowed(current, old_allowed);
189out: 182out:
190 unlock_cpu_hotplug(); 183 mutex_unlock(&cpu_add_remove_lock);
191 return err; 184 return err;
192} 185}
193#endif /*CONFIG_HOTPLUG_CPU*/ 186#endif /*CONFIG_HOTPLUG_CPU*/
@@ -197,9 +190,7 @@ int __devinit cpu_up(unsigned int cpu)
197 int ret; 190 int ret;
198 void *hcpu = (void *)(long)cpu; 191 void *hcpu = (void *)(long)cpu;
199 192
200 if ((ret = lock_cpu_hotplug_interruptible()) != 0) 193 mutex_lock(&cpu_add_remove_lock);
201 return ret;
202
203 if (cpu_online(cpu) || !cpu_present(cpu)) { 194 if (cpu_online(cpu) || !cpu_present(cpu)) {
204 ret = -EINVAL; 195 ret = -EINVAL;
205 goto out; 196 goto out;
@@ -214,7 +205,9 @@ int __devinit cpu_up(unsigned int cpu)
214 } 205 }
215 206
216 /* Arch-specific enabling code. */ 207 /* Arch-specific enabling code. */
208 mutex_lock(&cpu_bitmask_lock);
217 ret = __cpu_up(cpu); 209 ret = __cpu_up(cpu);
210 mutex_unlock(&cpu_bitmask_lock);
218 if (ret != 0) 211 if (ret != 0)
219 goto out_notify; 212 goto out_notify;
220 BUG_ON(!cpu_online(cpu)); 213 BUG_ON(!cpu_online(cpu));
@@ -227,6 +220,6 @@ out_notify:
227 blocking_notifier_call_chain(&cpu_chain, 220 blocking_notifier_call_chain(&cpu_chain,
228 CPU_UP_CANCELED, hcpu); 221 CPU_UP_CANCELED, hcpu);
229out: 222out:
230 unlock_cpu_hotplug(); 223 mutex_unlock(&cpu_add_remove_lock);
231 return ret; 224 return ret;
232} 225}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c232dc077438..1a649f2bb9bb 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -762,6 +762,8 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
762 * 762 *
763 * Call with manage_mutex held. May nest a call to the 763 * Call with manage_mutex held. May nest a call to the
764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
765 * Must not be called holding callback_mutex, because we must
766 * not call lock_cpu_hotplug() while holding callback_mutex.
765 */ 767 */
766 768
767static void update_cpu_domains(struct cpuset *cur) 769static void update_cpu_domains(struct cpuset *cur)
@@ -781,7 +783,7 @@ static void update_cpu_domains(struct cpuset *cur)
781 if (is_cpu_exclusive(c)) 783 if (is_cpu_exclusive(c))
782 cpus_andnot(pspan, pspan, c->cpus_allowed); 784 cpus_andnot(pspan, pspan, c->cpus_allowed);
783 } 785 }
784 if (is_removed(cur) || !is_cpu_exclusive(cur)) { 786 if (!is_cpu_exclusive(cur)) {
785 cpus_or(pspan, pspan, cur->cpus_allowed); 787 cpus_or(pspan, pspan, cur->cpus_allowed);
786 if (cpus_equal(pspan, cur->cpus_allowed)) 788 if (cpus_equal(pspan, cur->cpus_allowed))
787 return; 789 return;
@@ -1917,6 +1919,17 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1917 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); 1919 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
1918} 1920}
1919 1921
1922/*
1923 * Locking note on the strange update_flag() call below:
1924 *
1925 * If the cpuset being removed is marked cpu_exclusive, then simulate
1926 * turning cpu_exclusive off, which will call update_cpu_domains().
1927 * The lock_cpu_hotplug() call in update_cpu_domains() must not be
1928 * made while holding callback_mutex. Elsewhere the kernel nests
1929 * callback_mutex inside lock_cpu_hotplug() calls. So the reverse
1930 * nesting would risk an ABBA deadlock.
1931 */
1932
1920static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) 1933static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1921{ 1934{
1922 struct cpuset *cs = dentry->d_fsdata; 1935 struct cpuset *cs = dentry->d_fsdata;
@@ -1936,11 +1949,16 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1936 mutex_unlock(&manage_mutex); 1949 mutex_unlock(&manage_mutex);
1937 return -EBUSY; 1950 return -EBUSY;
1938 } 1951 }
1952 if (is_cpu_exclusive(cs)) {
1953 int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");
1954 if (retval < 0) {
1955 mutex_unlock(&manage_mutex);
1956 return retval;
1957 }
1958 }
1939 parent = cs->parent; 1959 parent = cs->parent;
1940 mutex_lock(&callback_mutex); 1960 mutex_lock(&callback_mutex);
1941 set_bit(CS_REMOVED, &cs->flags); 1961 set_bit(CS_REMOVED, &cs->flags);
1942 if (is_cpu_exclusive(cs))
1943 update_cpu_domains(cs);
1944 list_del(&cs->sibling); /* delete my sibling from parent->children */ 1962 list_del(&cs->sibling); /* delete my sibling from parent->children */
1945 spin_lock(&cs->dentry->d_lock); 1963 spin_lock(&cs->dentry->d_lock);
1946 d = dget(cs->dentry); 1964 d = dget(cs->dentry);
diff --git a/kernel/futex.c b/kernel/futex.c
index cf0c8e21d1ab..dda2049692a2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -415,15 +415,15 @@ out_unlock:
415 */ 415 */
416void exit_pi_state_list(struct task_struct *curr) 416void exit_pi_state_list(struct task_struct *curr)
417{ 417{
418 struct futex_hash_bucket *hb;
419 struct list_head *next, *head = &curr->pi_state_list; 418 struct list_head *next, *head = &curr->pi_state_list;
420 struct futex_pi_state *pi_state; 419 struct futex_pi_state *pi_state;
420 struct futex_hash_bucket *hb;
421 union futex_key key; 421 union futex_key key;
422 422
423 /* 423 /*
424 * We are a ZOMBIE and nobody can enqueue itself on 424 * We are a ZOMBIE and nobody can enqueue itself on
425 * pi_state_list anymore, but we have to be careful 425 * pi_state_list anymore, but we have to be careful
426 * versus waiters unqueueing themselfs 426 * versus waiters unqueueing themselves:
427 */ 427 */
428 spin_lock_irq(&curr->pi_lock); 428 spin_lock_irq(&curr->pi_lock);
429 while (!list_empty(head)) { 429 while (!list_empty(head)) {
@@ -431,21 +431,24 @@ void exit_pi_state_list(struct task_struct *curr)
431 next = head->next; 431 next = head->next;
432 pi_state = list_entry(next, struct futex_pi_state, list); 432 pi_state = list_entry(next, struct futex_pi_state, list);
433 key = pi_state->key; 433 key = pi_state->key;
434 hb = hash_futex(&key);
434 spin_unlock_irq(&curr->pi_lock); 435 spin_unlock_irq(&curr->pi_lock);
435 436
436 hb = hash_futex(&key);
437 spin_lock(&hb->lock); 437 spin_lock(&hb->lock);
438 438
439 spin_lock_irq(&curr->pi_lock); 439 spin_lock_irq(&curr->pi_lock);
440 /*
441 * We dropped the pi-lock, so re-check whether this
442 * task still owns the PI-state:
443 */
440 if (head->next != next) { 444 if (head->next != next) {
441 spin_unlock(&hb->lock); 445 spin_unlock(&hb->lock);
442 continue; 446 continue;
443 } 447 }
444 448
445 list_del_init(&pi_state->list);
446
447 WARN_ON(pi_state->owner != curr); 449 WARN_ON(pi_state->owner != curr);
448 450 WARN_ON(list_empty(&pi_state->list));
451 list_del_init(&pi_state->list);
449 pi_state->owner = NULL; 452 pi_state->owner = NULL;
450 spin_unlock_irq(&curr->pi_lock); 453 spin_unlock_irq(&curr->pi_lock);
451 454
@@ -470,7 +473,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
470 head = &hb->chain; 473 head = &hb->chain;
471 474
472 list_for_each_entry_safe(this, next, head, list) { 475 list_for_each_entry_safe(this, next, head, list) {
473 if (match_futex (&this->key, &me->key)) { 476 if (match_futex(&this->key, &me->key)) {
474 /* 477 /*
475 * Another waiter already exists - bump up 478 * Another waiter already exists - bump up
476 * the refcount and return its pi_state: 479 * the refcount and return its pi_state:
@@ -482,6 +485,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
482 if (unlikely(!pi_state)) 485 if (unlikely(!pi_state))
483 return -EINVAL; 486 return -EINVAL;
484 487
488 WARN_ON(!atomic_read(&pi_state->refcount));
489
485 atomic_inc(&pi_state->refcount); 490 atomic_inc(&pi_state->refcount);
486 me->pi_state = pi_state; 491 me->pi_state = pi_state;
487 492
@@ -490,10 +495,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
490 } 495 }
491 496
492 /* 497 /*
493 * We are the first waiter - try to look up the real owner and 498 * We are the first waiter - try to look up the real owner and attach
494 * attach the new pi_state to it: 499 * the new pi_state to it, but bail out when the owner died bit is set
500 * and TID = 0:
495 */ 501 */
496 pid = uval & FUTEX_TID_MASK; 502 pid = uval & FUTEX_TID_MASK;
503 if (!pid && (uval & FUTEX_OWNER_DIED))
504 return -ESRCH;
497 p = futex_find_get_task(pid); 505 p = futex_find_get_task(pid);
498 if (!p) 506 if (!p)
499 return -ESRCH; 507 return -ESRCH;
@@ -510,6 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
510 pi_state->key = me->key; 518 pi_state->key = me->key;
511 519
512 spin_lock_irq(&p->pi_lock); 520 spin_lock_irq(&p->pi_lock);
521 WARN_ON(!list_empty(&pi_state->list));
513 list_add(&pi_state->list, &p->pi_state_list); 522 list_add(&pi_state->list, &p->pi_state_list);
514 pi_state->owner = p; 523 pi_state->owner = p;
515 spin_unlock_irq(&p->pi_lock); 524 spin_unlock_irq(&p->pi_lock);
@@ -573,20 +582,29 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
573 * kept enabled while there is PI state around. We must also 582 * kept enabled while there is PI state around. We must also
574 * preserve the owner died bit.) 583 * preserve the owner died bit.)
575 */ 584 */
576 newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; 585 if (!(uval & FUTEX_OWNER_DIED)) {
586 newval = FUTEX_WAITERS | new_owner->pid;
577 587
578 inc_preempt_count(); 588 inc_preempt_count();
579 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); 589 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
580 dec_preempt_count(); 590 dec_preempt_count();
591 if (curval == -EFAULT)
592 return -EFAULT;
593 if (curval != uval)
594 return -EINVAL;
595 }
581 596
582 if (curval == -EFAULT) 597 spin_lock_irq(&pi_state->owner->pi_lock);
583 return -EFAULT; 598 WARN_ON(list_empty(&pi_state->list));
584 if (curval != uval) 599 list_del_init(&pi_state->list);
585 return -EINVAL; 600 spin_unlock_irq(&pi_state->owner->pi_lock);
586 601
587 list_del_init(&pi_state->owner->pi_state_list); 602 spin_lock_irq(&new_owner->pi_lock);
603 WARN_ON(!list_empty(&pi_state->list));
588 list_add(&pi_state->list, &new_owner->pi_state_list); 604 list_add(&pi_state->list, &new_owner->pi_state_list);
589 pi_state->owner = new_owner; 605 pi_state->owner = new_owner;
606 spin_unlock_irq(&new_owner->pi_lock);
607
590 rt_mutex_unlock(&pi_state->pi_mutex); 608 rt_mutex_unlock(&pi_state->pi_mutex);
591 609
592 return 0; 610 return 0;
@@ -1236,6 +1254,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1236 /* Owner died? */ 1254 /* Owner died? */
1237 if (q.pi_state->owner != NULL) { 1255 if (q.pi_state->owner != NULL) {
1238 spin_lock_irq(&q.pi_state->owner->pi_lock); 1256 spin_lock_irq(&q.pi_state->owner->pi_lock);
1257 WARN_ON(list_empty(&q.pi_state->list));
1239 list_del_init(&q.pi_state->list); 1258 list_del_init(&q.pi_state->list);
1240 spin_unlock_irq(&q.pi_state->owner->pi_lock); 1259 spin_unlock_irq(&q.pi_state->owner->pi_lock);
1241 } else 1260 } else
@@ -1244,6 +1263,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1244 q.pi_state->owner = current; 1263 q.pi_state->owner = current;
1245 1264
1246 spin_lock_irq(&current->pi_lock); 1265 spin_lock_irq(&current->pi_lock);
1266 WARN_ON(!list_empty(&q.pi_state->list));
1247 list_add(&q.pi_state->list, &current->pi_state_list); 1267 list_add(&q.pi_state->list, &current->pi_state_list);
1248 spin_unlock_irq(&current->pi_lock); 1268 spin_unlock_irq(&current->pi_lock);
1249 1269
@@ -1427,9 +1447,11 @@ retry_locked:
1427 * again. If it succeeds then we can return without waking 1447 * again. If it succeeds then we can return without waking
1428 * anyone else up: 1448 * anyone else up:
1429 */ 1449 */
1430 inc_preempt_count(); 1450 if (!(uval & FUTEX_OWNER_DIED)) {
1431 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); 1451 inc_preempt_count();
1432 dec_preempt_count(); 1452 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
1453 dec_preempt_count();
1454 }
1433 1455
1434 if (unlikely(uval == -EFAULT)) 1456 if (unlikely(uval == -EFAULT))
1435 goto pi_faulted; 1457 goto pi_faulted;
@@ -1462,9 +1484,11 @@ retry_locked:
1462 /* 1484 /*
1463 * No waiters - kernel unlocks the futex: 1485 * No waiters - kernel unlocks the futex:
1464 */ 1486 */
1465 ret = unlock_futex_pi(uaddr, uval); 1487 if (!(uval & FUTEX_OWNER_DIED)) {
1466 if (ret == -EFAULT) 1488 ret = unlock_futex_pi(uaddr, uval);
1467 goto pi_faulted; 1489 if (ret == -EFAULT)
1490 goto pi_faulted;
1491 }
1468 1492
1469out_unlock: 1493out_unlock:
1470 spin_unlock(&hb->lock); 1494 spin_unlock(&hb->lock);
@@ -1683,9 +1707,9 @@ err_unlock:
1683 * Process a futex-list entry, check whether it's owned by the 1707 * Process a futex-list entry, check whether it's owned by the
1684 * dying task, and do notification if so: 1708 * dying task, and do notification if so:
1685 */ 1709 */
1686int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) 1710int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1687{ 1711{
1688 u32 uval, nval; 1712 u32 uval, nval, mval;
1689 1713
1690retry: 1714retry:
1691 if (get_user(uval, uaddr)) 1715 if (get_user(uval, uaddr))
@@ -1702,21 +1726,45 @@ retry:
1702 * thread-death.) The rest of the cleanup is done in 1726 * thread-death.) The rest of the cleanup is done in
1703 * userspace. 1727 * userspace.
1704 */ 1728 */
1705 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 1729 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1706 uval | FUTEX_OWNER_DIED); 1730 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1731
1707 if (nval == -EFAULT) 1732 if (nval == -EFAULT)
1708 return -1; 1733 return -1;
1709 1734
1710 if (nval != uval) 1735 if (nval != uval)
1711 goto retry; 1736 goto retry;
1712 1737
1713 if (uval & FUTEX_WAITERS) 1738 /*
1714 futex_wake(uaddr, 1); 1739 * Wake robust non-PI futexes here. The wakeup of
1740 * PI futexes happens in exit_pi_state():
1741 */
1742 if (!pi) {
1743 if (uval & FUTEX_WAITERS)
1744 futex_wake(uaddr, 1);
1745 }
1715 } 1746 }
1716 return 0; 1747 return 0;
1717} 1748}
1718 1749
1719/* 1750/*
1751 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1752 */
1753static inline int fetch_robust_entry(struct robust_list __user **entry,
1754 struct robust_list __user **head, int *pi)
1755{
1756 unsigned long uentry;
1757
1758 if (get_user(uentry, (unsigned long *)head))
1759 return -EFAULT;
1760
1761 *entry = (void *)(uentry & ~1UL);
1762 *pi = uentry & 1;
1763
1764 return 0;
1765}
1766
1767/*
1720 * Walk curr->robust_list (very carefully, it's a userspace list!) 1768 * Walk curr->robust_list (very carefully, it's a userspace list!)
1721 * and mark any locks found there dead, and notify any waiters. 1769 * and mark any locks found there dead, and notify any waiters.
1722 * 1770 *
@@ -1726,14 +1774,14 @@ void exit_robust_list(struct task_struct *curr)
1726{ 1774{
1727 struct robust_list_head __user *head = curr->robust_list; 1775 struct robust_list_head __user *head = curr->robust_list;
1728 struct robust_list __user *entry, *pending; 1776 struct robust_list __user *entry, *pending;
1729 unsigned int limit = ROBUST_LIST_LIMIT; 1777 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1730 unsigned long futex_offset; 1778 unsigned long futex_offset;
1731 1779
1732 /* 1780 /*
1733 * Fetch the list head (which was registered earlier, via 1781 * Fetch the list head (which was registered earlier, via
1734 * sys_set_robust_list()): 1782 * sys_set_robust_list()):
1735 */ 1783 */
1736 if (get_user(entry, &head->list.next)) 1784 if (fetch_robust_entry(&entry, &head->list.next, &pi))
1737 return; 1785 return;
1738 /* 1786 /*
1739 * Fetch the relative futex offset: 1787 * Fetch the relative futex offset:
@@ -1744,10 +1792,11 @@ void exit_robust_list(struct task_struct *curr)
1744 * Fetch any possibly pending lock-add first, and handle it 1792 * Fetch any possibly pending lock-add first, and handle it
1745 * if it exists: 1793 * if it exists:
1746 */ 1794 */
1747 if (get_user(pending, &head->list_op_pending)) 1795 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1748 return; 1796 return;
1797
1749 if (pending) 1798 if (pending)
1750 handle_futex_death((void *)pending + futex_offset, curr); 1799 handle_futex_death((void *)pending + futex_offset, curr, pip);
1751 1800
1752 while (entry != &head->list) { 1801 while (entry != &head->list) {
1753 /* 1802 /*
@@ -1756,12 +1805,12 @@ void exit_robust_list(struct task_struct *curr)
1756 */ 1805 */
1757 if (entry != pending) 1806 if (entry != pending)
1758 if (handle_futex_death((void *)entry + futex_offset, 1807 if (handle_futex_death((void *)entry + futex_offset,
1759 curr)) 1808 curr, pi))
1760 return; 1809 return;
1761 /* 1810 /*
1762 * Fetch the next entry in the list: 1811 * Fetch the next entry in the list:
1763 */ 1812 */
1764 if (get_user(entry, &entry->next)) 1813 if (fetch_robust_entry(&entry, &entry->next, &pi))
1765 return; 1814 return;
1766 /* 1815 /*
1767 * Avoid excessively long or circular lists: 1816 * Avoid excessively long or circular lists:
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index d1d92b441fb7..d1aab1a452cc 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -12,6 +12,23 @@
12 12
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14 14
15
16/*
17 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
18 */
19static inline int
20fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
21 compat_uptr_t *head, int *pi)
22{
23 if (get_user(*uentry, head))
24 return -EFAULT;
25
26 *entry = compat_ptr((*uentry) & ~1);
27 *pi = (unsigned int)(*uentry) & 1;
28
29 return 0;
30}
31
15/* 32/*
16 * Walk curr->robust_list (very carefully, it's a userspace list!) 33 * Walk curr->robust_list (very carefully, it's a userspace list!)
17 * and mark any locks found there dead, and notify any waiters. 34 * and mark any locks found there dead, and notify any waiters.
@@ -22,17 +39,16 @@ void compat_exit_robust_list(struct task_struct *curr)
22{ 39{
23 struct compat_robust_list_head __user *head = curr->compat_robust_list; 40 struct compat_robust_list_head __user *head = curr->compat_robust_list;
24 struct robust_list __user *entry, *pending; 41 struct robust_list __user *entry, *pending;
42 unsigned int limit = ROBUST_LIST_LIMIT, pi;
25 compat_uptr_t uentry, upending; 43 compat_uptr_t uentry, upending;
26 unsigned int limit = ROBUST_LIST_LIMIT;
27 compat_long_t futex_offset; 44 compat_long_t futex_offset;
28 45
29 /* 46 /*
30 * Fetch the list head (which was registered earlier, via 47 * Fetch the list head (which was registered earlier, via
31 * sys_set_robust_list()): 48 * sys_set_robust_list()):
32 */ 49 */
33 if (get_user(uentry, &head->list.next)) 50 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
34 return; 51 return;
35 entry = compat_ptr(uentry);
36 /* 52 /*
37 * Fetch the relative futex offset: 53 * Fetch the relative futex offset:
38 */ 54 */
@@ -42,11 +58,11 @@ void compat_exit_robust_list(struct task_struct *curr)
42 * Fetch any possibly pending lock-add first, and handle it 58 * Fetch any possibly pending lock-add first, and handle it
43 * if it exists: 59 * if it exists:
44 */ 60 */
45 if (get_user(upending, &head->list_op_pending)) 61 if (fetch_robust_entry(&upending, &pending,
62 &head->list_op_pending, &pi))
46 return; 63 return;
47 pending = compat_ptr(upending);
48 if (upending) 64 if (upending)
49 handle_futex_death((void *)pending + futex_offset, curr); 65 handle_futex_death((void *)pending + futex_offset, curr, pi);
50 66
51 while (compat_ptr(uentry) != &head->list) { 67 while (compat_ptr(uentry) != &head->list) {
52 /* 68 /*
@@ -55,15 +71,15 @@ void compat_exit_robust_list(struct task_struct *curr)
55 */ 71 */
56 if (entry != pending) 72 if (entry != pending)
57 if (handle_futex_death((void *)entry + futex_offset, 73 if (handle_futex_death((void *)entry + futex_offset,
58 curr)) 74 curr, pi))
59 return; 75 return;
60 76
61 /* 77 /*
62 * Fetch the next entry in the list: 78 * Fetch the next entry in the list:
63 */ 79 */
64 if (get_user(uentry, (compat_uptr_t *)&entry->next)) 80 if (fetch_robust_entry(&uentry, &entry,
81 (compat_uptr_t *)&entry->next, &pi))
65 return; 82 return;
66 entry = compat_ptr(uentry);
67 /* 83 /*
68 * Avoid excessively long or circular lists: 84 * Avoid excessively long or circular lists:
69 */ 85 */
diff --git a/mm/filemap.c b/mm/filemap.c
index a92d690b3ae5..d9bbea1e87d2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -849,8 +849,6 @@ static void shrink_readahead_size_eio(struct file *filp,
849 return; 849 return;
850 850
851 ra->ra_pages /= 4; 851 ra->ra_pages /= 4;
852 printk(KERN_WARNING "Reducing readahead size to %luK\n",
853 ra->ra_pages << (PAGE_CACHE_SHIFT - 10));
854} 852}
855 853
856/** 854/**
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 458031bfff55..18fcb9fa518d 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -67,10 +67,6 @@ static struct packet_type vlan_packet_type = {
67 .func = vlan_skb_recv, /* VLAN receive method */ 67 .func = vlan_skb_recv, /* VLAN receive method */
68}; 68};
69 69
70/* Bits of netdev state that are propagated from real device to virtual */
71#define VLAN_LINK_STATE_MASK \
72 ((1<<__LINK_STATE_PRESENT)|(1<<__LINK_STATE_NOCARRIER)|(1<<__LINK_STATE_DORMANT))
73
74/* End of global variables definitions. */ 70/* End of global variables definitions. */
75 71
76/* 72/*
@@ -479,7 +475,9 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
479 new_dev->flags = real_dev->flags; 475 new_dev->flags = real_dev->flags;
480 new_dev->flags &= ~IFF_UP; 476 new_dev->flags &= ~IFF_UP;
481 477
482 new_dev->state = real_dev->state & ~(1<<__LINK_STATE_START); 478 new_dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
479 (1<<__LINK_STATE_DORMANT))) |
480 (1<<__LINK_STATE_PRESENT);
483 481
484 /* need 4 bytes for extra VLAN header info, 482 /* need 4 bytes for extra VLAN header info,
485 * hope the underlying device can handle it. 483 * hope the underlying device can handle it.
@@ -542,12 +540,11 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
542 * so it cannot "appear" on us. 540 * so it cannot "appear" on us.
543 */ 541 */
544 if (!grp) { /* need to add a new group */ 542 if (!grp) { /* need to add a new group */
545 grp = kmalloc(sizeof(struct vlan_group), GFP_KERNEL); 543 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
546 if (!grp) 544 if (!grp)
547 goto out_free_unregister; 545 goto out_free_unregister;
548 546
549 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 547 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
550 memset(grp, 0, sizeof(struct vlan_group));
551 grp->real_dev_ifindex = real_dev->ifindex; 548 grp->real_dev_ifindex = real_dev->ifindex;
552 549
553 hlist_add_head_rcu(&grp->hlist, 550 hlist_add_head_rcu(&grp->hlist,
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 5ee96d4b40e9..96dc6bb52d14 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -227,12 +227,11 @@ static void atif_drop_device(struct net_device *dev)
227static struct atalk_iface *atif_add_device(struct net_device *dev, 227static struct atalk_iface *atif_add_device(struct net_device *dev,
228 struct atalk_addr *sa) 228 struct atalk_addr *sa)
229{ 229{
230 struct atalk_iface *iface = kmalloc(sizeof(*iface), GFP_KERNEL); 230 struct atalk_iface *iface = kzalloc(sizeof(*iface), GFP_KERNEL);
231 231
232 if (!iface) 232 if (!iface)
233 goto out; 233 goto out;
234 234
235 memset(iface, 0, sizeof(*iface));
236 dev_hold(dev); 235 dev_hold(dev);
237 iface->dev = dev; 236 iface->dev = dev;
238 dev->atalk_ptr = iface; 237 dev->atalk_ptr = iface;
@@ -559,12 +558,11 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint)
559 } 558 }
560 559
561 if (!rt) { 560 if (!rt) {
562 rt = kmalloc(sizeof(*rt), GFP_ATOMIC); 561 rt = kzalloc(sizeof(*rt), GFP_ATOMIC);
563 562
564 retval = -ENOBUFS; 563 retval = -ENOBUFS;
565 if (!rt) 564 if (!rt)
566 goto out_unlock; 565 goto out_unlock;
567 memset(rt, 0, sizeof(*rt));
568 566
569 rt->next = atalk_routes; 567 rt->next = atalk_routes;
570 atalk_routes = rt; 568 atalk_routes = rt;
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index a487233dc466..d00cca97eb33 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -508,10 +508,9 @@ Note: we do not have explicit unassign, but look at _push()
508 508
509 if (copy_from_user(&be, arg, sizeof be)) 509 if (copy_from_user(&be, arg, sizeof be))
510 return -EFAULT; 510 return -EFAULT;
511 brvcc = kmalloc(sizeof(struct br2684_vcc), GFP_KERNEL); 511 brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
512 if (!brvcc) 512 if (!brvcc)
513 return -ENOMEM; 513 return -ENOMEM;
514 memset(brvcc, 0, sizeof(struct br2684_vcc));
515 write_lock_irq(&devs_lock); 514 write_lock_irq(&devs_lock);
516 net_dev = br2684_find_dev(&be.ifspec); 515 net_dev = br2684_find_dev(&be.ifspec);
517 if (net_dev == NULL) { 516 if (net_dev == NULL) {
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 2e62105d91bd..7ce7bfe3fbad 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -929,12 +929,11 @@ static int arp_seq_open(struct inode *inode, struct file *file)
929 struct seq_file *seq; 929 struct seq_file *seq;
930 int rc = -EAGAIN; 930 int rc = -EAGAIN;
931 931
932 state = kmalloc(sizeof(*state), GFP_KERNEL); 932 state = kzalloc(sizeof(*state), GFP_KERNEL);
933 if (!state) { 933 if (!state) {
934 rc = -ENOMEM; 934 rc = -ENOMEM;
935 goto out_kfree; 935 goto out_kfree;
936 } 936 }
937 memset(state, 0, sizeof(*state));
938 state->ns.neigh_sub_iter = clip_seq_sub_iter; 937 state->ns.neigh_sub_iter = clip_seq_sub_iter;
939 938
940 rc = seq_open(file, &arp_seq_ops); 939 rc = seq_open(file, &arp_seq_ops);
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 4b68a18171cf..b4aa489849df 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1811,12 +1811,11 @@ make_entry(struct lec_priv *priv, unsigned char *mac_addr)
1811{ 1811{
1812 struct lec_arp_table *to_return; 1812 struct lec_arp_table *to_return;
1813 1813
1814 to_return = kmalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1814 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
1815 if (!to_return) { 1815 if (!to_return) {
1816 printk("LEC: Arp entry kmalloc failed\n"); 1816 printk("LEC: Arp entry kmalloc failed\n");
1817 return NULL; 1817 return NULL;
1818 } 1818 }
1819 memset(to_return, 0, sizeof(struct lec_arp_table));
1820 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); 1819 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
1821 init_timer(&to_return->timer); 1820 init_timer(&to_return->timer);
1822 to_return->timer.function = lec_arp_expire_arp; 1821 to_return->timer.function = lec_arp_expire_arp;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 9aafe1e2f048..00704661e83f 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -258,10 +258,9 @@ static struct mpoa_client *alloc_mpc(void)
258{ 258{
259 struct mpoa_client *mpc; 259 struct mpoa_client *mpc;
260 260
261 mpc = kmalloc(sizeof (struct mpoa_client), GFP_KERNEL); 261 mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL);
262 if (mpc == NULL) 262 if (mpc == NULL)
263 return NULL; 263 return NULL;
264 memset(mpc, 0, sizeof(struct mpoa_client));
265 rwlock_init(&mpc->ingress_lock); 264 rwlock_init(&mpc->ingress_lock);
266 rwlock_init(&mpc->egress_lock); 265 rwlock_init(&mpc->egress_lock);
267 mpc->next = mpcs; 266 mpc->next = mpcs;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 76a7d8ff6c0e..19d5dfc0702f 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -287,10 +287,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
287 if (be.encaps != PPPOATM_ENCAPS_AUTODETECT && 287 if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
288 be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC) 288 be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
289 return -EINVAL; 289 return -EINVAL;
290 pvcc = kmalloc(sizeof(*pvcc), GFP_KERNEL); 290 pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
291 if (pvcc == NULL) 291 if (pvcc == NULL)
292 return -ENOMEM; 292 return -ENOMEM;
293 memset(pvcc, 0, sizeof(*pvcc));
294 pvcc->atmvcc = atmvcc; 293 pvcc->atmvcc = atmvcc;
295 pvcc->old_push = atmvcc->push; 294 pvcc->old_push = atmvcc->push;
296 pvcc->old_pop = atmvcc->pop; 295 pvcc->old_pop = atmvcc->pop;
diff --git a/net/atm/resources.c b/net/atm/resources.c
index de25c6408b04..529f7e64aa2c 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -33,10 +33,9 @@ static struct atm_dev *__alloc_atm_dev(const char *type)
33{ 33{
34 struct atm_dev *dev; 34 struct atm_dev *dev;
35 35
36 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 36 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
37 if (!dev) 37 if (!dev)
38 return NULL; 38 return NULL;
39 memset(dev, 0, sizeof(*dev));
40 dev->type = type; 39 dev->type = type;
41 dev->signal = ATM_PHY_SIG_UNKNOWN; 40 dev->signal = ATM_PHY_SIG_UNKNOWN;
42 dev->link_rate = ATM_OC3_PCR; 41 dev->link_rate = ATM_OC3_PCR;
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index 369a75b160f2..867d42537979 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -203,13 +203,11 @@ void ax25_register_sysctl(void)
203 for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) 203 for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
204 ax25_table_size += sizeof(ctl_table); 204 ax25_table_size += sizeof(ctl_table);
205 205
206 if ((ax25_table = kmalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { 206 if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) {
207 spin_unlock_bh(&ax25_dev_lock); 207 spin_unlock_bh(&ax25_dev_lock);
208 return; 208 return;
209 } 209 }
210 210
211 memset(ax25_table, 0x00, ax25_table_size);
212
213 for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { 211 for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) {
214 ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC); 212 ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC);
215 if (!child) { 213 if (!child) {
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 77eab8f4c7fd..332dd8f436ea 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -55,6 +55,7 @@
55#define VERSION "1.8" 55#define VERSION "1.8"
56 56
57static int disable_cfc = 0; 57static int disable_cfc = 0;
58static int channel_mtu = -1;
58static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; 59static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
59 60
60static struct task_struct *rfcomm_thread; 61static struct task_struct *rfcomm_thread;
@@ -812,7 +813,10 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
812 pn->credits = 0; 813 pn->credits = 0;
813 } 814 }
814 815
815 pn->mtu = htobs(d->mtu); 816 if (cr && channel_mtu >= 0)
817 pn->mtu = htobs(channel_mtu);
818 else
819 pn->mtu = htobs(d->mtu);
816 820
817 *ptr = __fcs(buf); ptr++; 821 *ptr = __fcs(buf); ptr++;
818 822
@@ -1243,7 +1247,10 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn)
1243 1247
1244 d->priority = pn->priority; 1248 d->priority = pn->priority;
1245 1249
1246 d->mtu = s->mtu = btohs(pn->mtu); 1250 d->mtu = btohs(pn->mtu);
1251
1252 if (cr && d->mtu > s->mtu)
1253 d->mtu = s->mtu;
1247 1254
1248 return 0; 1255 return 0;
1249} 1256}
@@ -1770,6 +1777,11 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1770 s = rfcomm_session_add(nsock, BT_OPEN); 1777 s = rfcomm_session_add(nsock, BT_OPEN);
1771 if (s) { 1778 if (s) {
1772 rfcomm_session_hold(s); 1779 rfcomm_session_hold(s);
1780
1781 /* We should adjust MTU on incoming sessions.
1782 * L2CAP MTU minus UIH header and FCS. */
1783 s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5;
1784
1773 rfcomm_schedule(RFCOMM_SCHED_RX); 1785 rfcomm_schedule(RFCOMM_SCHED_RX);
1774 } else 1786 } else
1775 sock_release(nsock); 1787 sock_release(nsock);
@@ -2087,6 +2099,9 @@ module_exit(rfcomm_exit);
2087module_param(disable_cfc, bool, 0644); 2099module_param(disable_cfc, bool, 0644);
2088MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); 2100MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control");
2089 2101
2102module_param(channel_mtu, int, 0644);
2103MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel");
2104
2090module_param(l2cap_mtu, uint, 0644); 2105module_param(l2cap_mtu, uint, 0644);
2091MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); 2106MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection");
2092 2107
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 159fb8409824..4e4119a12139 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -162,12 +162,10 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
162 if (num > BR_MAX_PORTS) 162 if (num > BR_MAX_PORTS)
163 num = BR_MAX_PORTS; 163 num = BR_MAX_PORTS;
164 164
165 indices = kmalloc(num*sizeof(int), GFP_KERNEL); 165 indices = kcalloc(num, sizeof(int), GFP_KERNEL);
166 if (indices == NULL) 166 if (indices == NULL)
167 return -ENOMEM; 167 return -ENOMEM;
168 168
169 memset(indices, 0, num*sizeof(int));
170
171 get_port_ifindices(br, indices, num); 169 get_port_ifindices(br, indices, num);
172 if (copy_to_user((void __user *)args[1], indices, num*sizeof(int))) 170 if (copy_to_user((void __user *)args[1], indices, num*sizeof(int)))
173 num = -EFAULT; 171 num = -EFAULT;
@@ -327,11 +325,10 @@ static int old_deviceless(void __user *uarg)
327 325
328 if (args[2] >= 2048) 326 if (args[2] >= 2048)
329 return -ENOMEM; 327 return -ENOMEM;
330 indices = kmalloc(args[2]*sizeof(int), GFP_KERNEL); 328 indices = kcalloc(args[2], sizeof(int), GFP_KERNEL);
331 if (indices == NULL) 329 if (indices == NULL)
332 return -ENOMEM; 330 return -ENOMEM;
333 331
334 memset(indices, 0, args[2]*sizeof(int));
335 args[2] = get_bridge_ifindices(indices, args[2]); 332 args[2] = get_bridge_ifindices(indices, args[2]);
336 333
337 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) 334 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index cbc8a389a0a8..05b3de888243 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -61,6 +61,9 @@ static int brnf_filter_vlan_tagged = 1;
61#define brnf_filter_vlan_tagged 1 61#define brnf_filter_vlan_tagged 1
62#endif 62#endif
63 63
64int brnf_deferred_hooks;
65EXPORT_SYMBOL_GPL(brnf_deferred_hooks);
66
64static __be16 inline vlan_proto(const struct sk_buff *skb) 67static __be16 inline vlan_proto(const struct sk_buff *skb)
65{ 68{
66 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 69 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -890,6 +893,8 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
890 return NF_ACCEPT; 893 return NF_ACCEPT;
891 else if (ip->version == 6 && !brnf_call_ip6tables) 894 else if (ip->version == 6 && !brnf_call_ip6tables)
892 return NF_ACCEPT; 895 return NF_ACCEPT;
896 else if (!brnf_deferred_hooks)
897 return NF_ACCEPT;
893#endif 898#endif
894 if (hook == NF_IP_POST_ROUTING) 899 if (hook == NF_IP_POST_ROUTING)
895 return NF_ACCEPT; 900 return NF_ACCEPT;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 27ce1683caf5..2797e2815418 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -437,7 +437,7 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
437{ 437{
438 struct ethtool_pauseparam pauseparam; 438 struct ethtool_pauseparam pauseparam;
439 439
440 if (!dev->ethtool_ops->get_pauseparam) 440 if (!dev->ethtool_ops->set_pauseparam)
441 return -EOPNOTSUPP; 441 return -EOPNOTSUPP;
442 442
443 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) 443 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index b7c98dbcdb81..248a6b666aff 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -29,6 +29,7 @@
29#include <linux/socket.h> 29#include <linux/socket.h>
30#include <linux/rtnetlink.h> /* for BUG_TRAP */ 30#include <linux/rtnetlink.h> /* for BUG_TRAP */
31#include <net/tcp.h> 31#include <net/tcp.h>
32#include <net/netdma.h>
32 33
33#define NET_DMA_DEFAULT_COPYBREAK 4096 34#define NET_DMA_DEFAULT_COPYBREAK 4096
34 35
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index 6048373c7186..b44c45504fb6 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -26,4 +26,6 @@ extern void dccp_feat_clean(struct dccp_minisock *dmsk);
26extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk); 26extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk);
27extern int dccp_feat_init(struct dccp_minisock *dmsk); 27extern int dccp_feat_init(struct dccp_minisock *dmsk);
28 28
29extern int dccp_feat_default_sequence_window;
30
29#endif /* _DCCP_FEAT_H */ 31#endif /* _DCCP_FEAT_H */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index c3073e7e81d3..7f56f7e8f571 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -504,8 +504,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
504 ireq = inet_rsk(req); 504 ireq = inet_rsk(req);
505 ireq->loc_addr = daddr; 505 ireq->loc_addr = daddr;
506 ireq->rmt_addr = saddr; 506 ireq->rmt_addr = saddr;
507 req->rcv_wnd = 100; /* Fake, option parsing will get the 507 req->rcv_wnd = dccp_feat_default_sequence_window;
508 right value */
509 ireq->opt = NULL; 508 ireq->opt = NULL;
510 509
511 /* 510 /*
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index ff42bc43263d..9f3d4d7cd0bf 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -31,6 +31,7 @@
31 31
32#include "dccp.h" 32#include "dccp.h"
33#include "ipv6.h" 33#include "ipv6.h"
34#include "feat.h"
34 35
35/* Socket used for sending RSTs and ACKs */ 36/* Socket used for sending RSTs and ACKs */
36static struct socket *dccp_v6_ctl_socket; 37static struct socket *dccp_v6_ctl_socket;
@@ -707,8 +708,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
707 ireq = inet_rsk(req); 708 ireq = inet_rsk(req);
708 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); 709 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
709 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); 710 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
710 req->rcv_wnd = 100; /* Fake, option parsing will get the 711 req->rcv_wnd = dccp_feat_default_sequence_window;
711 right value */
712 ireq6->pktopts = NULL; 712 ireq6->pktopts = NULL;
713 713
714 if (ipv6_opt_accepted(sk, skb) || 714 if (ipv6_opt_accepted(sk, skb) ||
diff --git a/net/dccp/options.c b/net/dccp/options.c
index c3cda1e39aa8..daf72bb671f0 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -29,6 +29,8 @@ int dccp_feat_default_ack_ratio = DCCPF_INITIAL_ACK_RATIO;
29int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; 29int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR;
30int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; 30int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT;
31 31
32EXPORT_SYMBOL_GPL(dccp_feat_default_sequence_window);
33
32void dccp_minisock_init(struct dccp_minisock *dmsk) 34void dccp_minisock_init(struct dccp_minisock *dmsk)
33{ 35{
34 dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window; 36 dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 98a25208440d..476455fbdb03 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -413,11 +413,7 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void)
413{ 413{
414 struct dn_ifaddr *ifa; 414 struct dn_ifaddr *ifa;
415 415
416 ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); 416 ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
417
418 if (ifa) {
419 memset(ifa, 0, sizeof(*ifa));
420 }
421 417
422 return ifa; 418 return ifa;
423} 419}
@@ -1105,10 +1101,9 @@ struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1105 return NULL; 1101 return NULL;
1106 1102
1107 *err = -ENOBUFS; 1103 *err = -ENOBUFS;
1108 if ((dn_db = kmalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) 1104 if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
1109 return NULL; 1105 return NULL;
1110 1106
1111 memset(dn_db, 0, sizeof(struct dn_dev));
1112 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1107 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1113 smp_wmb(); 1108 smp_wmb();
1114 dev->dn_ptr = dn_db; 1109 dev->dn_ptr = dn_db;
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 0375077391b7..fa20e2efcfc1 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -283,11 +283,10 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
283 goto err_inval; 283 goto err_inval;
284 } 284 }
285 285
286 fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); 286 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL);
287 err = -ENOBUFS; 287 err = -ENOBUFS;
288 if (fi == NULL) 288 if (fi == NULL)
289 goto failure; 289 goto failure;
290 memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct dn_fib_nh));
291 290
292 fi->fib_protocol = r->rtm_protocol; 291 fi->fib_protocol = r->rtm_protocol;
293 fi->fib_nhs = nhs; 292 fi->fib_nhs = nhs;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 5ce9c9e0565c..ff0ebe99137d 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -580,12 +580,11 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file)
580{ 580{
581 struct seq_file *seq; 581 struct seq_file *seq;
582 int rc = -ENOMEM; 582 int rc = -ENOMEM;
583 struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 583 struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
584 584
585 if (!s) 585 if (!s)
586 goto out; 586 goto out;
587 587
588 memset(s, 0, sizeof(*s));
589 rc = seq_open(file, &dn_neigh_seq_ops); 588 rc = seq_open(file, &dn_neigh_seq_ops);
590 if (rc) 589 if (rc)
591 goto out_kfree; 590 goto out_kfree;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 22f321d9bf9d..6986be754ef2 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -151,10 +151,9 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
151 } 151 }
152 } 152 }
153 153
154 new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); 154 new_r = kzalloc(sizeof(*new_r), GFP_KERNEL);
155 if (!new_r) 155 if (!new_r)
156 return -ENOMEM; 156 return -ENOMEM;
157 memset(new_r, 0, sizeof(*new_r));
158 157
159 if (rta[RTA_SRC-1]) 158 if (rta[RTA_SRC-1])
160 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2); 159 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 37d9d0a1ac8c..e926c952e363 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -158,12 +158,10 @@ static void dn_rehash_zone(struct dn_zone *dz)
158 break; 158 break;
159 } 159 }
160 160
161 ht = kmalloc(new_divisor*sizeof(struct dn_fib_node*), GFP_KERNEL); 161 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
162
163 if (ht == NULL) 162 if (ht == NULL)
164 return; 163 return;
165 164
166 memset(ht, 0, new_divisor*sizeof(struct dn_fib_node *));
167 write_lock_bh(&dn_fib_tables_lock); 165 write_lock_bh(&dn_fib_tables_lock);
168 old_ht = dz->dz_hash; 166 old_ht = dz->dz_hash;
169 dz->dz_hash = ht; 167 dz->dz_hash = ht;
@@ -184,11 +182,10 @@ static void dn_free_node(struct dn_fib_node *f)
184static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) 182static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
185{ 183{
186 int i; 184 int i;
187 struct dn_zone *dz = kmalloc(sizeof(struct dn_zone), GFP_KERNEL); 185 struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL);
188 if (!dz) 186 if (!dz)
189 return NULL; 187 return NULL;
190 188
191 memset(dz, 0, sizeof(struct dn_zone));
192 if (z) { 189 if (z) {
193 dz->dz_divisor = 16; 190 dz->dz_divisor = 16;
194 dz->dz_hashmask = 0x0F; 191 dz->dz_hashmask = 0x0F;
@@ -197,14 +194,12 @@ static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
197 dz->dz_hashmask = 0; 194 dz->dz_hashmask = 0;
198 } 195 }
199 196
200 dz->dz_hash = kmalloc(dz->dz_divisor*sizeof(struct dn_fib_node *), GFP_KERNEL); 197 dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL);
201
202 if (!dz->dz_hash) { 198 if (!dz->dz_hash) {
203 kfree(dz); 199 kfree(dz);
204 return NULL; 200 return NULL;
205 } 201 }
206 202
207 memset(dz->dz_hash, 0, dz->dz_divisor*sizeof(struct dn_fib_node*));
208 dz->dz_order = z; 203 dz->dz_order = z;
209 dz->dz_mask = dnet_make_mask(z); 204 dz->dz_mask = dnet_make_mask(z);
210 205
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 309ae4c6549a..4d66aac13483 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -673,12 +673,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
673 edev = dev->ec_ptr; 673 edev = dev->ec_ptr;
674 if (edev == NULL) { 674 if (edev == NULL) {
675 /* Magic up a new one. */ 675 /* Magic up a new one. */
676 edev = kmalloc(sizeof(struct ec_device), GFP_KERNEL); 676 edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL);
677 if (edev == NULL) { 677 if (edev == NULL) {
678 err = -ENOMEM; 678 err = -ENOMEM;
679 break; 679 break;
680 } 680 }
681 memset(edev, 0, sizeof(struct ec_device));
682 dev->ec_ptr = edev; 681 dev->ec_ptr = edev;
683 } else 682 } else
684 net2dev_map[edev->net] = NULL; 683 net2dev_map[edev->net] = NULL;
diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig
index dbb08528ddf5..f7e84e9d13ad 100644
--- a/net/ieee80211/Kconfig
+++ b/net/ieee80211/Kconfig
@@ -58,6 +58,7 @@ config IEEE80211_CRYPT_TKIP
58 depends on IEEE80211 && NET_RADIO 58 depends on IEEE80211 && NET_RADIO
59 select CRYPTO 59 select CRYPTO
60 select CRYPTO_MICHAEL_MIC 60 select CRYPTO_MICHAEL_MIC
61 select CRC32
61 ---help--- 62 ---help---
62 Include software based cipher suites in support of IEEE 802.11i 63 Include software based cipher suites in support of IEEE 802.11i
63 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled 64 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c
index cb71d794a7d1..5ed0a98b2d76 100644
--- a/net/ieee80211/ieee80211_crypt.c
+++ b/net/ieee80211/ieee80211_crypt.c
@@ -110,11 +110,10 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
110 unsigned long flags; 110 unsigned long flags;
111 struct ieee80211_crypto_alg *alg; 111 struct ieee80211_crypto_alg *alg;
112 112
113 alg = kmalloc(sizeof(*alg), GFP_KERNEL); 113 alg = kzalloc(sizeof(*alg), GFP_KERNEL);
114 if (alg == NULL) 114 if (alg == NULL)
115 return -ENOMEM; 115 return -ENOMEM;
116 116
117 memset(alg, 0, sizeof(*alg));
118 alg->ops = ops; 117 alg->ops = ops;
119 118
120 spin_lock_irqsave(&ieee80211_crypto_lock, flags); 119 spin_lock_irqsave(&ieee80211_crypto_lock, flags);
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
index 492647382ad0..ed90a8af1444 100644
--- a/net/ieee80211/ieee80211_crypt_ccmp.c
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -76,10 +76,9 @@ static void *ieee80211_ccmp_init(int key_idx)
76{ 76{
77 struct ieee80211_ccmp_data *priv; 77 struct ieee80211_ccmp_data *priv;
78 78
79 priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 79 priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
80 if (priv == NULL) 80 if (priv == NULL)
81 goto fail; 81 goto fail;
82 memset(priv, 0, sizeof(*priv));
83 priv->key_idx = key_idx; 82 priv->key_idx = key_idx;
84 83
85 priv->tfm = crypto_alloc_tfm("aes", 0); 84 priv->tfm = crypto_alloc_tfm("aes", 0);
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index c5a87724aabe..0ebf235f6939 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -39,10 +39,9 @@ static void *prism2_wep_init(int keyidx)
39{ 39{
40 struct prism2_wep_data *priv; 40 struct prism2_wep_data *priv;
41 41
42 priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 42 priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
43 if (priv == NULL) 43 if (priv == NULL)
44 goto fail; 44 goto fail;
45 memset(priv, 0, sizeof(*priv));
46 priv->key_idx = keyidx; 45 priv->key_idx = keyidx;
47 46
48 priv->tfm = crypto_alloc_tfm("arc4", 0); 47 priv->tfm = crypto_alloc_tfm("arc4", 0);
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index a78c4f845f66..5cb9cfd35397 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -369,11 +369,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
369 struct ieee80211_crypt_data *new_crypt; 369 struct ieee80211_crypt_data *new_crypt;
370 370
371 /* take WEP into use */ 371 /* take WEP into use */
372 new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data), 372 new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
373 GFP_KERNEL); 373 GFP_KERNEL);
374 if (new_crypt == NULL) 374 if (new_crypt == NULL)
375 return -ENOMEM; 375 return -ENOMEM;
376 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
377 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 376 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
378 if (!new_crypt->ops) { 377 if (!new_crypt->ops) {
379 request_module("ieee80211_crypt_wep"); 378 request_module("ieee80211_crypt_wep");
@@ -616,13 +615,11 @@ int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
616 615
617 ieee80211_crypt_delayed_deinit(ieee, crypt); 616 ieee80211_crypt_delayed_deinit(ieee, crypt);
618 617
619 new_crypt = (struct ieee80211_crypt_data *) 618 new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
620 kmalloc(sizeof(*new_crypt), GFP_KERNEL);
621 if (new_crypt == NULL) { 619 if (new_crypt == NULL) {
622 ret = -ENOMEM; 620 ret = -ENOMEM;
623 goto done; 621 goto done;
624 } 622 }
625 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
626 new_crypt->ops = ops; 623 new_crypt->ops = ops;
627 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 624 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
628 new_crypt->priv = new_crypt->ops->init(idx); 625 new_crypt->priv = new_crypt->ops->init(idx);
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index ebc33ca6e692..4cef39e171d0 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -116,6 +116,16 @@ ieee80211softmac_auth_queue(void *data)
116 kfree(auth); 116 kfree(auth);
117} 117}
118 118
119/* Sends a response to an auth challenge (for shared key auth). */
120static void
121ieee80211softmac_auth_challenge_response(void *_aq)
122{
123 struct ieee80211softmac_auth_queue_item *aq = _aq;
124
125 /* Send our response */
126 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
127}
128
119/* Handle the auth response from the AP 129/* Handle the auth response from the AP
120 * This should be registered with ieee80211 as handle_auth 130 * This should be registered with ieee80211 as handle_auth
121 */ 131 */
@@ -197,24 +207,30 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
197 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE: 207 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE:
198 /* Check to make sure we have a challenge IE */ 208 /* Check to make sure we have a challenge IE */
199 data = (u8 *)auth->info_element; 209 data = (u8 *)auth->info_element;
200 if(*data++ != MFIE_TYPE_CHALLENGE){ 210 if (*data++ != MFIE_TYPE_CHALLENGE) {
201 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n"); 211 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n");
202 break; 212 break;
203 } 213 }
204 /* Save the challenge */ 214 /* Save the challenge */
205 spin_lock_irqsave(&mac->lock, flags); 215 spin_lock_irqsave(&mac->lock, flags);
206 net->challenge_len = *data++; 216 net->challenge_len = *data++;
207 if(net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) 217 if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
208 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; 218 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
209 if(net->challenge != NULL) 219 if (net->challenge != NULL)
210 kfree(net->challenge); 220 kfree(net->challenge);
211 net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC); 221 net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC);
212 memcpy(net->challenge, data, net->challenge_len); 222 memcpy(net->challenge, data, net->challenge_len);
213 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; 223 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
214 spin_unlock_irqrestore(&mac->lock, flags);
215 224
216 /* Send our response */ 225 /* We reuse the work struct from the auth request here.
217 ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); 226 * It is safe to do so as each one is per-request, and
227 * at this point (dealing with authentication response)
228 * we have obviously already sent the initial auth
229 * request. */
230 cancel_delayed_work(&aq->work);
231 INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
232 schedule_work(&aq->work);
233 spin_unlock_irqrestore(&mac->lock, flags);
218 return 0; 234 return 0;
219 case IEEE80211SOFTMAC_AUTH_SHARED_PASS: 235 case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
220 kfree(net->challenge); 236 kfree(net->challenge);
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c
index 8cc8b20f5cda..6ae5a1dc7956 100644
--- a/net/ieee80211/softmac/ieee80211softmac_io.c
+++ b/net/ieee80211/softmac/ieee80211softmac_io.c
@@ -96,8 +96,7 @@ ieee80211softmac_alloc_mgt(u32 size)
96 if(size > IEEE80211_DATA_LEN) 96 if(size > IEEE80211_DATA_LEN)
97 return NULL; 97 return NULL;
98 /* Allocate the frame */ 98 /* Allocate the frame */
99 data = kmalloc(size, GFP_ATOMIC); 99 data = kzalloc(size, GFP_ATOMIC);
100 memset(data, 0, size);
101 return data; 100 return data;
102} 101}
103 102
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 8e748be36c5a..1366bc6ce6a5 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -215,12 +215,10 @@ static int ah_init_state(struct xfrm_state *x)
215 if (x->encap) 215 if (x->encap)
216 goto error; 216 goto error;
217 217
218 ahp = kmalloc(sizeof(*ahp), GFP_KERNEL); 218 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
219 if (ahp == NULL) 219 if (ahp == NULL)
220 return -ENOMEM; 220 return -ENOMEM;
221 221
222 memset(ahp, 0, sizeof(*ahp));
223
224 ahp->key = x->aalg->alg_key; 222 ahp->key = x->aalg->alg_key;
225 ahp->key_len = (x->aalg->alg_key_len+7)/8; 223 ahp->key_len = (x->aalg->alg_key_len+7)/8;
226 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 224 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 7b51b3bdb548..c8a3723bc001 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1372,12 +1372,11 @@ static int arp_seq_open(struct inode *inode, struct file *file)
1372{ 1372{
1373 struct seq_file *seq; 1373 struct seq_file *seq;
1374 int rc = -ENOMEM; 1374 int rc = -ENOMEM;
1375 struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1375 struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1376 1376
1377 if (!s) 1377 if (!s)
1378 goto out; 1378 goto out;
1379 1379
1380 memset(s, 0, sizeof(*s));
1381 rc = seq_open(file, &arp_seq_ops); 1380 rc = seq_open(file, &arp_seq_ops);
1382 if (rc) 1381 if (rc)
1383 goto out_kfree; 1382 goto out_kfree;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index a7c65e9e5ec9..a6cc31d911eb 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -93,10 +93,9 @@ static void devinet_sysctl_unregister(struct ipv4_devconf *p);
93 93
94static struct in_ifaddr *inet_alloc_ifa(void) 94static struct in_ifaddr *inet_alloc_ifa(void)
95{ 95{
96 struct in_ifaddr *ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); 96 struct in_ifaddr *ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
97 97
98 if (ifa) { 98 if (ifa) {
99 memset(ifa, 0, sizeof(*ifa));
100 INIT_RCU_HEAD(&ifa->rcu_head); 99 INIT_RCU_HEAD(&ifa->rcu_head);
101 } 100 }
102 101
@@ -140,10 +139,9 @@ struct in_device *inetdev_init(struct net_device *dev)
140 139
141 ASSERT_RTNL(); 140 ASSERT_RTNL();
142 141
143 in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL); 142 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
144 if (!in_dev) 143 if (!in_dev)
145 goto out; 144 goto out;
146 memset(in_dev, 0, sizeof(*in_dev));
147 INIT_RCU_HEAD(&in_dev->rcu_head); 145 INIT_RCU_HEAD(&in_dev->rcu_head);
148 memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf)); 146 memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf));
149 in_dev->cnf.sysctl = NULL; 147 in_dev->cnf.sysctl = NULL;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 4e112738b3fa..fc2f8ce441de 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -316,12 +316,10 @@ static int esp_init_state(struct xfrm_state *x)
316 if (x->ealg == NULL) 316 if (x->ealg == NULL)
317 goto error; 317 goto error;
318 318
319 esp = kmalloc(sizeof(*esp), GFP_KERNEL); 319 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
320 if (esp == NULL) 320 if (esp == NULL)
321 return -ENOMEM; 321 return -ENOMEM;
322 322
323 memset(esp, 0, sizeof(*esp));
324
325 if (x->aalg) { 323 if (x->aalg) {
326 struct xfrm_algo_desc *aalg_desc; 324 struct xfrm_algo_desc *aalg_desc;
327 325
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 3c1d32ad35f2..72c633b357cf 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -204,11 +204,10 @@ static struct fn_zone *
204fn_new_zone(struct fn_hash *table, int z) 204fn_new_zone(struct fn_hash *table, int z)
205{ 205{
206 int i; 206 int i;
207 struct fn_zone *fz = kmalloc(sizeof(struct fn_zone), GFP_KERNEL); 207 struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
208 if (!fz) 208 if (!fz)
209 return NULL; 209 return NULL;
210 210
211 memset(fz, 0, sizeof(struct fn_zone));
212 if (z) { 211 if (z) {
213 fz->fz_divisor = 16; 212 fz->fz_divisor = 16;
214 } else { 213 } else {
@@ -1046,7 +1045,7 @@ static int fib_seq_open(struct inode *inode, struct file *file)
1046{ 1045{
1047 struct seq_file *seq; 1046 struct seq_file *seq;
1048 int rc = -ENOMEM; 1047 int rc = -ENOMEM;
1049 struct fib_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1048 struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1050 1049
1051 if (!s) 1050 if (!s)
1052 goto out; 1051 goto out;
@@ -1057,7 +1056,6 @@ static int fib_seq_open(struct inode *inode, struct file *file)
1057 1056
1058 seq = file->private_data; 1057 seq = file->private_data;
1059 seq->private = s; 1058 seq->private = s;
1060 memset(s, 0, sizeof(*s));
1061out: 1059out:
1062 return rc; 1060 return rc;
1063out_kfree: 1061out_kfree:
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 773b12ba4e3c..79b04718bdfd 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -196,10 +196,9 @@ int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
196 } 196 }
197 } 197 }
198 198
199 new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); 199 new_r = kzalloc(sizeof(*new_r), GFP_KERNEL);
200 if (!new_r) 200 if (!new_r)
201 return -ENOMEM; 201 return -ENOMEM;
202 memset(new_r, 0, sizeof(*new_r));
203 202
204 if (rta[RTA_SRC-1]) 203 if (rta[RTA_SRC-1])
205 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4); 204 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5f87533684d5..9be53a8e72c3 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -709,11 +709,10 @@ fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
709 goto failure; 709 goto failure;
710 } 710 }
711 711
712 fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 712 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
713 if (fi == NULL) 713 if (fi == NULL)
714 goto failure; 714 goto failure;
715 fib_info_cnt++; 715 fib_info_cnt++;
716 memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct fib_nh));
717 716
718 fi->fib_protocol = r->rtm_protocol; 717 fi->fib_protocol = r->rtm_protocol;
719 718
@@ -962,10 +961,6 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
962 rtm->rtm_protocol = fi->fib_protocol; 961 rtm->rtm_protocol = fi->fib_protocol;
963 if (fi->fib_priority) 962 if (fi->fib_priority)
964 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); 963 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
965#ifdef CONFIG_NET_CLS_ROUTE
966 if (fi->fib_nh[0].nh_tclassid)
967 RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid);
968#endif
969 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 964 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
970 goto rtattr_failure; 965 goto rtattr_failure;
971 if (fi->fib_prefsrc) 966 if (fi->fib_prefsrc)
@@ -975,6 +970,10 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
975 RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw); 970 RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw);
976 if (fi->fib_nh->nh_oif) 971 if (fi->fib_nh->nh_oif)
977 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); 972 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
973#ifdef CONFIG_NET_CLS_ROUTE
974 if (fi->fib_nh[0].nh_tclassid)
975 RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid);
976#endif
978 } 977 }
979#ifdef CONFIG_IP_ROUTE_MULTIPATH 978#ifdef CONFIG_IP_ROUTE_MULTIPATH
980 if (fi->fib_nhs > 1) { 979 if (fi->fib_nhs > 1) {
@@ -993,6 +992,10 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
993 nhp->rtnh_ifindex = nh->nh_oif; 992 nhp->rtnh_ifindex = nh->nh_oif;
994 if (nh->nh_gw) 993 if (nh->nh_gw)
995 RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw); 994 RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw);
995#ifdef CONFIG_NET_CLS_ROUTE
996 if (nh->nh_tclassid)
997 RTA_PUT(skb, RTA_FLOW, 4, &nh->nh_tclassid);
998#endif
996 nhp->rtnh_len = skb->tail - (unsigned char*)nhp; 999 nhp->rtnh_len = skb->tail - (unsigned char*)nhp;
997 } endfor_nexthops(fi); 1000 } endfor_nexthops(fi);
998 mp_head->rta_type = RTA_MULTIPATH; 1001 mp_head->rta_type = RTA_MULTIPATH;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d299c8e547d6..9f4b752f5a33 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1028,10 +1028,9 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1028 * for deleted items allows change reports to use common code with 1028 * for deleted items allows change reports to use common code with
1029 * non-deleted or query-response MCA's. 1029 * non-deleted or query-response MCA's.
1030 */ 1030 */
1031 pmc = kmalloc(sizeof(*pmc), GFP_KERNEL); 1031 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1032 if (!pmc) 1032 if (!pmc)
1033 return; 1033 return;
1034 memset(pmc, 0, sizeof(*pmc));
1035 spin_lock_bh(&im->lock); 1034 spin_lock_bh(&im->lock);
1036 pmc->interface = im->interface; 1035 pmc->interface = im->interface;
1037 in_dev_hold(in_dev); 1036 in_dev_hold(in_dev);
@@ -1529,10 +1528,9 @@ static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
1529 psf_prev = psf; 1528 psf_prev = psf;
1530 } 1529 }
1531 if (!psf) { 1530 if (!psf) {
1532 psf = kmalloc(sizeof(*psf), GFP_ATOMIC); 1531 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
1533 if (!psf) 1532 if (!psf)
1534 return -ENOBUFS; 1533 return -ENOBUFS;
1535 memset(psf, 0, sizeof(*psf));
1536 psf->sf_inaddr = *psfsrc; 1534 psf->sf_inaddr = *psfsrc;
1537 if (psf_prev) { 1535 if (psf_prev) {
1538 psf_prev->sf_next = psf; 1536 psf_prev->sf_next = psf;
@@ -2380,7 +2378,7 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
2380{ 2378{
2381 struct seq_file *seq; 2379 struct seq_file *seq;
2382 int rc = -ENOMEM; 2380 int rc = -ENOMEM;
2383 struct igmp_mc_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 2381 struct igmp_mc_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
2384 2382
2385 if (!s) 2383 if (!s)
2386 goto out; 2384 goto out;
@@ -2390,7 +2388,6 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
2390 2388
2391 seq = file->private_data; 2389 seq = file->private_data;
2392 seq->private = s; 2390 seq->private = s;
2393 memset(s, 0, sizeof(*s));
2394out: 2391out:
2395 return rc; 2392 return rc;
2396out_kfree: 2393out_kfree:
@@ -2555,7 +2552,7 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
2555{ 2552{
2556 struct seq_file *seq; 2553 struct seq_file *seq;
2557 int rc = -ENOMEM; 2554 int rc = -ENOMEM;
2558 struct igmp_mcf_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 2555 struct igmp_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
2559 2556
2560 if (!s) 2557 if (!s)
2561 goto out; 2558 goto out;
@@ -2565,7 +2562,6 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
2565 2562
2566 seq = file->private_data; 2563 seq = file->private_data;
2567 seq->private = s; 2564 seq->private = s;
2568 memset(s, 0, sizeof(*s));
2569out: 2565out:
2570 return rc; 2566 return rc;
2571out_kfree: 2567out_kfree:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8e7e41b66c79..492858e6faf0 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -909,11 +909,10 @@ static int __init inet_diag_init(void)
909 sizeof(struct inet_diag_handler *)); 909 sizeof(struct inet_diag_handler *));
910 int err = -ENOMEM; 910 int err = -ENOMEM;
911 911
912 inet_diag_table = kmalloc(inet_diag_table_size, GFP_KERNEL); 912 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
913 if (!inet_diag_table) 913 if (!inet_diag_table)
914 goto out; 914 goto out;
915 915
916 memset(inet_diag_table, 0, inet_diag_table_size);
917 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv, 916 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv,
918 THIS_MODULE); 917 THIS_MODULE);
919 if (idiagnl == NULL) 918 if (idiagnl == NULL)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6ff9b10d9563..0f9b3a31997b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -617,7 +617,6 @@ static int ipgre_rcv(struct sk_buff *skb)
617 skb->mac.raw = skb->nh.raw; 617 skb->mac.raw = skb->nh.raw;
618 skb->nh.raw = __pskb_pull(skb, offset); 618 skb->nh.raw = __pskb_pull(skb, offset);
619 skb_postpull_rcsum(skb, skb->h.raw, offset); 619 skb_postpull_rcsum(skb, skb->h.raw, offset);
620 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
621 skb->pkt_type = PACKET_HOST; 620 skb->pkt_type = PACKET_HOST;
622#ifdef CONFIG_NET_IPGRE_BROADCAST 621#ifdef CONFIG_NET_IPGRE_BROADCAST
623 if (MULTICAST(iph->daddr)) { 622 if (MULTICAST(iph->daddr)) {
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 184c78ca79e6..212734ca238f 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -429,7 +429,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
429 } 429 }
430 430
431 /* Remove any debris in the socket control block */ 431 /* Remove any debris in the socket control block */
432 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 432 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
433 433
434 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, 434 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL,
435 ip_rcv_finish); 435 ip_rcv_finish);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index cbcae6544622..406056edc02b 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -256,7 +256,6 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
256 256
257 if (!opt) { 257 if (!opt) {
258 opt = &(IPCB(skb)->opt); 258 opt = &(IPCB(skb)->opt);
259 memset(opt, 0, sizeof(struct ip_options));
260 iph = skb->nh.raw; 259 iph = skb->nh.raw;
261 opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr); 260 opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr);
262 optptr = iph + sizeof(struct iphdr); 261 optptr = iph + sizeof(struct iphdr);
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 8a8b5cf2f7fe..a0c28b2b756e 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -410,11 +410,10 @@ static int ipcomp_init_state(struct xfrm_state *x)
410 goto out; 410 goto out;
411 411
412 err = -ENOMEM; 412 err = -ENOMEM;
413 ipcd = kmalloc(sizeof(*ipcd), GFP_KERNEL); 413 ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
414 if (!ipcd) 414 if (!ipcd)
415 goto out; 415 goto out;
416 416
417 memset(ipcd, 0, sizeof(*ipcd));
418 x->props.header_len = 0; 417 x->props.header_len = 0;
419 if (x->props.mode) 418 if (x->props.mode)
420 x->props.header_len += sizeof(struct iphdr); 419 x->props.header_len += sizeof(struct iphdr);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 3291d5192aad..76ab50b0d6ef 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -487,7 +487,6 @@ static int ipip_rcv(struct sk_buff *skb)
487 487
488 skb->mac.raw = skb->nh.raw; 488 skb->mac.raw = skb->nh.raw;
489 skb->nh.raw = skb->data; 489 skb->nh.raw = skb->data;
490 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
491 skb->protocol = htons(ETH_P_IP); 490 skb->protocol = htons(ETH_P_IP);
492 skb->pkt_type = PACKET_HOST; 491 skb->pkt_type = PACKET_HOST;
493 492
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ba33f8621c67..85893eef6b16 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1461,7 +1461,6 @@ int pim_rcv_v1(struct sk_buff * skb)
1461 skb_pull(skb, (u8*)encap - skb->data); 1461 skb_pull(skb, (u8*)encap - skb->data);
1462 skb->nh.iph = (struct iphdr *)skb->data; 1462 skb->nh.iph = (struct iphdr *)skb->data;
1463 skb->dev = reg_dev; 1463 skb->dev = reg_dev;
1464 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1465 skb->protocol = htons(ETH_P_IP); 1464 skb->protocol = htons(ETH_P_IP);
1466 skb->ip_summed = 0; 1465 skb->ip_summed = 0;
1467 skb->pkt_type = PACKET_HOST; 1466 skb->pkt_type = PACKET_HOST;
@@ -1517,7 +1516,6 @@ static int pim_rcv(struct sk_buff * skb)
1517 skb_pull(skb, (u8*)encap - skb->data); 1516 skb_pull(skb, (u8*)encap - skb->data);
1518 skb->nh.iph = (struct iphdr *)skb->data; 1517 skb->nh.iph = (struct iphdr *)skb->data;
1519 skb->dev = reg_dev; 1518 skb->dev = reg_dev;
1520 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1521 skb->protocol = htons(ETH_P_IP); 1519 skb->protocol = htons(ETH_P_IP);
1522 skb->ip_summed = 0; 1520 skb->ip_summed = 0;
1523 skb->pkt_type = PACKET_HOST; 1521 skb->pkt_type = PACKET_HOST;
@@ -1580,6 +1578,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1580 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); 1578 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1581 1579
1582 if (cache==NULL) { 1580 if (cache==NULL) {
1581 struct sk_buff *skb2;
1583 struct net_device *dev; 1582 struct net_device *dev;
1584 int vif; 1583 int vif;
1585 1584
@@ -1593,12 +1592,18 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1593 read_unlock(&mrt_lock); 1592 read_unlock(&mrt_lock);
1594 return -ENODEV; 1593 return -ENODEV;
1595 } 1594 }
1596 skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); 1595 skb2 = skb_clone(skb, GFP_ATOMIC);
1597 skb->nh.iph->ihl = sizeof(struct iphdr)>>2; 1596 if (!skb2) {
1598 skb->nh.iph->saddr = rt->rt_src; 1597 read_unlock(&mrt_lock);
1599 skb->nh.iph->daddr = rt->rt_dst; 1598 return -ENOMEM;
1600 skb->nh.iph->version = 0; 1599 }
1601 err = ipmr_cache_unresolved(vif, skb); 1600
1601 skb2->nh.raw = skb_push(skb2, sizeof(struct iphdr));
1602 skb2->nh.iph->ihl = sizeof(struct iphdr)>>2;
1603 skb2->nh.iph->saddr = rt->rt_src;
1604 skb2->nh.iph->daddr = rt->rt_dst;
1605 skb2->nh.iph->version = 0;
1606 err = ipmr_cache_unresolved(vif, skb2);
1602 read_unlock(&mrt_lock); 1607 read_unlock(&mrt_lock);
1603 return err; 1608 return err;
1604 } 1609 }
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f28ec6882162..6a28fafe910c 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -735,12 +735,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
735 if (atype != RTN_LOCAL && atype != RTN_UNICAST) 735 if (atype != RTN_LOCAL && atype != RTN_UNICAST)
736 return -EINVAL; 736 return -EINVAL;
737 737
738 dest = kmalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 738 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
739 if (dest == NULL) { 739 if (dest == NULL) {
740 IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n"); 740 IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n");
741 return -ENOMEM; 741 return -ENOMEM;
742 } 742 }
743 memset(dest, 0, sizeof(struct ip_vs_dest));
744 743
745 dest->protocol = svc->protocol; 744 dest->protocol = svc->protocol;
746 dest->vaddr = svc->addr; 745 dest->vaddr = svc->addr;
@@ -1050,14 +1049,12 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1050 goto out_mod_dec; 1049 goto out_mod_dec;
1051 } 1050 }
1052 1051
1053 svc = (struct ip_vs_service *) 1052 svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
1054 kmalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
1055 if (svc == NULL) { 1053 if (svc == NULL) {
1056 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); 1054 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
1057 ret = -ENOMEM; 1055 ret = -ENOMEM;
1058 goto out_err; 1056 goto out_err;
1059 } 1057 }
1060 memset(svc, 0, sizeof(struct ip_vs_service));
1061 1058
1062 /* I'm the first user of the service */ 1059 /* I'm the first user of the service */
1063 atomic_set(&svc->usecnt, 1); 1060 atomic_set(&svc->usecnt, 1);
@@ -1797,7 +1794,7 @@ static int ip_vs_info_open(struct inode *inode, struct file *file)
1797{ 1794{
1798 struct seq_file *seq; 1795 struct seq_file *seq;
1799 int rc = -ENOMEM; 1796 int rc = -ENOMEM;
1800 struct ip_vs_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); 1797 struct ip_vs_iter *s = kzalloc(sizeof(*s), GFP_KERNEL);
1801 1798
1802 if (!s) 1799 if (!s)
1803 goto out; 1800 goto out;
@@ -1808,7 +1805,6 @@ static int ip_vs_info_open(struct inode *inode, struct file *file)
1808 1805
1809 seq = file->private_data; 1806 seq = file->private_data;
1810 seq->private = s; 1807 seq->private = s;
1811 memset(s, 0, sizeof(*s));
1812out: 1808out:
1813 return rc; 1809 return rc;
1814out_kfree: 1810out_kfree:
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index 4c1940381ba0..7d68b80c4c19 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -123,11 +123,10 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
123{ 123{
124 struct ip_vs_estimator *est; 124 struct ip_vs_estimator *est;
125 125
126 est = kmalloc(sizeof(*est), GFP_KERNEL); 126 est = kzalloc(sizeof(*est), GFP_KERNEL);
127 if (est == NULL) 127 if (est == NULL)
128 return -ENOMEM; 128 return -ENOMEM;
129 129
130 memset(est, 0, sizeof(*est));
131 est->stats = stats; 130 est->stats = stats;
132 est->last_conns = stats->conns; 131 est->last_conns = stats->conns;
133 est->cps = stats->cps<<10; 132 est->cps = stats->cps<<10;
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
index af35235672d5..9a39e2969712 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
@@ -1200,7 +1200,7 @@ static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct,
1200 tuple.dst.protonum = IPPROTO_TCP; 1200 tuple.dst.protonum = IPPROTO_TCP;
1201 1201
1202 exp = __ip_conntrack_expect_find(&tuple); 1202 exp = __ip_conntrack_expect_find(&tuple);
1203 if (exp->master == ct) 1203 if (exp && exp->master == ct)
1204 return exp; 1204 return exp;
1205 return NULL; 1205 return NULL;
1206} 1206}
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 7bd3c22003a2..7a9fa04a467a 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -534,6 +534,8 @@ static struct nf_hook_ops ip_conntrack_ops[] = {
534 534
535/* Sysctl support */ 535/* Sysctl support */
536 536
537int ip_conntrack_checksum = 1;
538
537#ifdef CONFIG_SYSCTL 539#ifdef CONFIG_SYSCTL
538 540
539/* From ip_conntrack_core.c */ 541/* From ip_conntrack_core.c */
@@ -568,8 +570,6 @@ extern unsigned int ip_ct_generic_timeout;
568static int log_invalid_proto_min = 0; 570static int log_invalid_proto_min = 0;
569static int log_invalid_proto_max = 255; 571static int log_invalid_proto_max = 255;
570 572
571int ip_conntrack_checksum = 1;
572
573static struct ctl_table_header *ip_ct_sysctl_header; 573static struct ctl_table_header *ip_ct_sysctl_header;
574 574
575static ctl_table ip_ct_sysctl_table[] = { 575static ctl_table ip_ct_sysctl_table[] = {
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index 0b1b416759cc..18b7fbdccb61 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -1255,9 +1255,9 @@ static int help(struct sk_buff **pskb,
1255 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); 1255 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
1256 1256
1257 /* SNMP replies and originating SNMP traps get mangled */ 1257 /* SNMP replies and originating SNMP traps get mangled */
1258 if (udph->source == ntohs(SNMP_PORT) && dir != IP_CT_DIR_REPLY) 1258 if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
1259 return NF_ACCEPT; 1259 return NF_ACCEPT;
1260 if (udph->dest == ntohs(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) 1260 if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
1261 return NF_ACCEPT; 1261 return NF_ACCEPT;
1262 1262
1263 /* No NAT? */ 1263 /* No NAT? */
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index cbffeae3f565..d994c5f5744c 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -172,11 +172,10 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip,
172 struct clusterip_config *c; 172 struct clusterip_config *c;
173 char buffer[16]; 173 char buffer[16];
174 174
175 c = kmalloc(sizeof(*c), GFP_ATOMIC); 175 c = kzalloc(sizeof(*c), GFP_ATOMIC);
176 if (!c) 176 if (!c)
177 return NULL; 177 return NULL;
178 178
179 memset(c, 0, sizeof(*c));
180 c->dev = dev; 179 c->dev = dev;
181 c->clusterip = ip; 180 c->clusterip = ip;
182 memcpy(&c->clustermac, &i->clustermac, ETH_ALEN); 181 memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bd221ec3f81e..62b2762a2420 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -609,6 +609,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
609 if (sin) { 609 if (sin) {
610 sin->sin_family = AF_INET; 610 sin->sin_family = AF_INET;
611 sin->sin_addr.s_addr = skb->nh.iph->saddr; 611 sin->sin_addr.s_addr = skb->nh.iph->saddr;
612 sin->sin_port = 0;
612 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 613 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
613 } 614 }
614 if (inet->cmsg_flags) 615 if (inet->cmsg_flags)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a891133f00e4..f6f39e814291 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1640,10 +1640,9 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
1640 if (unlikely(afinfo == NULL)) 1640 if (unlikely(afinfo == NULL))
1641 return -EINVAL; 1641 return -EINVAL;
1642 1642
1643 s = kmalloc(sizeof(*s), GFP_KERNEL); 1643 s = kzalloc(sizeof(*s), GFP_KERNEL);
1644 if (!s) 1644 if (!s)
1645 return -ENOMEM; 1645 return -ENOMEM;
1646 memset(s, 0, sizeof(*s));
1647 s->family = afinfo->family; 1646 s->family = afinfo->family;
1648 s->seq_ops.start = tcp_seq_start; 1647 s->seq_ops.start = tcp_seq_start;
1649 s->seq_ops.next = tcp_seq_next; 1648 s->seq_ops.next = tcp_seq_next;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9bfcddad695b..f136cec96d95 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1468,11 +1468,10 @@ static int udp_seq_open(struct inode *inode, struct file *file)
1468 struct udp_seq_afinfo *afinfo = PDE(inode)->data; 1468 struct udp_seq_afinfo *afinfo = PDE(inode)->data;
1469 struct seq_file *seq; 1469 struct seq_file *seq;
1470 int rc = -ENOMEM; 1470 int rc = -ENOMEM;
1471 struct udp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1471 struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1472 1472
1473 if (!s) 1473 if (!s)
1474 goto out; 1474 goto out;
1475 memset(s, 0, sizeof(*s));
1476 s->family = afinfo->family; 1475 s->family = afinfo->family;
1477 s->seq_ops.start = udp_seq_start; 1476 s->seq_ops.start = udp_seq_start;
1478 s->seq_ops.next = udp_seq_next; 1477 s->seq_ops.next = udp_seq_next;
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index f8d880beb12f..13cafbe56ce3 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,7 +92,6 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 skb->mac.raw = memmove(skb->data - skb->mac_len, 92 skb->mac.raw = memmove(skb->data - skb->mac_len,
93 skb->mac.raw, skb->mac_len); 93 skb->mac.raw, skb->mac_len);
94 skb->nh.raw = skb->data; 94 skb->nh.raw = skb->data;
95 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
96 err = 0; 95 err = 0;
97 96
98out: 97out:
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index df8f051c0fce..25c2a9e03895 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -71,6 +71,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
71 goto out; 71 goto out;
72 } 72 }
73 73
74 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
75
74 /* 76 /*
75 * Store incoming device index. When the packet will 77 * Store incoming device index. When the packet will
76 * be queued, we cannot refer to skb->dev anymore. 78 * be queued, we cannot refer to skb->dev anymore.
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index bc77c0e1a943..84d7ebdb9d21 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -567,10 +567,9 @@ static inline struct ipv6_txoptions *create_tel(__u8 encap_limit)
567 567
568 int opt_len = sizeof(*opt) + 8; 568 int opt_len = sizeof(*opt) + 8;
569 569
570 if (!(opt = kmalloc(opt_len, GFP_ATOMIC))) { 570 if (!(opt = kzalloc(opt_len, GFP_ATOMIC))) {
571 return NULL; 571 return NULL;
572 } 572 }
573 memset(opt, 0, opt_len);
574 opt->tot_len = opt_len; 573 opt->tot_len = opt_len;
575 opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1); 574 opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1);
576 opt->opt_nflen = 8; 575 opt->opt_nflen = 8;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fa1ce0ae123e..d57e61ce4a7d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -411,6 +411,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
411 /* Copy the address. */ 411 /* Copy the address. */
412 if (sin6) { 412 if (sin6) {
413 sin6->sin6_family = AF_INET6; 413 sin6->sin6_family = AF_INET6;
414 sin6->sin6_port = 0;
414 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); 415 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
415 sin6->sin6_flowinfo = 0; 416 sin6->sin6_flowinfo = 0;
416 sin6->sin6_scope_id = 0; 417 sin6->sin6_scope_id = 0;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c56aeece2bf5..836eecd7e62b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -380,7 +380,6 @@ static int ipip6_rcv(struct sk_buff *skb)
380 secpath_reset(skb); 380 secpath_reset(skb);
381 skb->mac.raw = skb->nh.raw; 381 skb->mac.raw = skb->nh.raw;
382 skb->nh.raw = skb->data; 382 skb->nh.raw = skb->data;
383 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
384 IPCB(skb)->flags = 0; 383 IPCB(skb)->flags = 0;
385 skb->protocol = htons(ETH_P_IPV6); 384 skb->protocol = htons(ETH_P_IPV6);
386 skb->pkt_type = PACKET_HOST; 385 skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 6b44fe8516c3..c8f9369c2a87 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -31,27 +31,6 @@
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33 33
34#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
35# define X6TDEBUG 3
36#else
37# define X6TDEBUG 1
38#endif
39
40#define X6TPRINTK(fmt, args...) printk(fmt, ## args)
41#define X6TNOPRINTK(fmt, args...) do { ; } while(0)
42
43#if X6TDEBUG >= 1
44# define X6TPRINTK1 X6TPRINTK
45#else
46# define X6TPRINTK1 X6TNOPRINTK
47#endif
48
49#if X6TDEBUG >= 3
50# define X6TPRINTK3 X6TPRINTK
51#else
52# define X6TPRINTK3 X6TNOPRINTK
53#endif
54
55/* 34/*
56 * xfrm_tunnel_spi things are for allocating unique id ("spi") 35 * xfrm_tunnel_spi things are for allocating unique id ("spi")
57 * per xfrm_address_t. 36 * per xfrm_address_t.
@@ -62,15 +41,8 @@ struct xfrm6_tunnel_spi {
62 xfrm_address_t addr; 41 xfrm_address_t addr;
63 u32 spi; 42 u32 spi;
64 atomic_t refcnt; 43 atomic_t refcnt;
65#ifdef XFRM6_TUNNEL_SPI_MAGIC
66 u32 magic;
67#endif
68}; 44};
69 45
70#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
71# define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef
72#endif
73
74static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock); 46static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock);
75 47
76static u32 xfrm6_tunnel_spi; 48static u32 xfrm6_tunnel_spi;
@@ -86,43 +58,15 @@ static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
86static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE]; 58static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
87static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE]; 59static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
88 60
89#ifdef XFRM6_TUNNEL_SPI_MAGIC
90static int x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
91 const char *name)
92{
93 if (unlikely(x6spi->magic != XFRM6_TUNNEL_SPI_MAGIC)) {
94 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
95 "at %p has corrupted magic %08x "
96 "(should be %08x)\n",
97 name, x6spi, x6spi->magic, XFRM6_TUNNEL_SPI_MAGIC);
98 return -1;
99 }
100 return 0;
101}
102#else
103static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
104 const char *name)
105{
106 return 0;
107}
108#endif
109
110#define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__)
111
112
113static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 61static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
114{ 62{
115 unsigned h; 63 unsigned h;
116 64
117 X6TPRINTK3(KERN_DEBUG "%s(addr=%p)\n", __FUNCTION__, addr);
118
119 h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]; 65 h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3];
120 h ^= h >> 16; 66 h ^= h >> 16;
121 h ^= h >> 8; 67 h ^= h >> 8;
122 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; 68 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
123 69
124 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, h);
125
126 return h; 70 return h;
127} 71}
128 72
@@ -136,19 +80,13 @@ static int xfrm6_tunnel_spi_init(void)
136{ 80{
137 int i; 81 int i;
138 82
139 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
140
141 xfrm6_tunnel_spi = 0; 83 xfrm6_tunnel_spi = 0;
142 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", 84 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
143 sizeof(struct xfrm6_tunnel_spi), 85 sizeof(struct xfrm6_tunnel_spi),
144 0, SLAB_HWCACHE_ALIGN, 86 0, SLAB_HWCACHE_ALIGN,
145 NULL, NULL); 87 NULL, NULL);
146 if (!xfrm6_tunnel_spi_kmem) { 88 if (!xfrm6_tunnel_spi_kmem)
147 X6TPRINTK1(KERN_ERR
148 "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n",
149 __FUNCTION__);
150 return -ENOMEM; 89 return -ENOMEM;
151 }
152 90
153 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) 91 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
154 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]); 92 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
@@ -161,22 +99,16 @@ static void xfrm6_tunnel_spi_fini(void)
161{ 99{
162 int i; 100 int i;
163 101
164 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
165
166 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) { 102 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
167 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i])) 103 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
168 goto err; 104 return;
169 } 105 }
170 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) { 106 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
171 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) 107 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
172 goto err; 108 return;
173 } 109 }
174 kmem_cache_destroy(xfrm6_tunnel_spi_kmem); 110 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
175 xfrm6_tunnel_spi_kmem = NULL; 111 xfrm6_tunnel_spi_kmem = NULL;
176 return;
177err:
178 X6TPRINTK1(KERN_ERR "%s(): table is not empty\n", __FUNCTION__);
179 return;
180} 112}
181 113
182static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) 114static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
@@ -184,19 +116,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
184 struct xfrm6_tunnel_spi *x6spi; 116 struct xfrm6_tunnel_spi *x6spi;
185 struct hlist_node *pos; 117 struct hlist_node *pos;
186 118
187 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
188
189 hlist_for_each_entry(x6spi, pos, 119 hlist_for_each_entry(x6spi, pos,
190 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 120 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
191 list_byaddr) { 121 list_byaddr) {
192 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 122 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
193 X6SPI_CHECK_MAGIC(x6spi);
194 X6TPRINTK3(KERN_DEBUG "%s() = %p(%u)\n", __FUNCTION__, x6spi, x6spi->spi);
195 return x6spi; 123 return x6spi;
196 }
197 } 124 }
198 125
199 X6TPRINTK3(KERN_DEBUG "%s() = NULL(0)\n", __FUNCTION__);
200 return NULL; 126 return NULL;
201} 127}
202 128
@@ -205,8 +131,6 @@ u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
205 struct xfrm6_tunnel_spi *x6spi; 131 struct xfrm6_tunnel_spi *x6spi;
206 u32 spi; 132 u32 spi;
207 133
208 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
209
210 read_lock_bh(&xfrm6_tunnel_spi_lock); 134 read_lock_bh(&xfrm6_tunnel_spi_lock);
211 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 135 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
212 spi = x6spi ? x6spi->spi : 0; 136 spi = x6spi ? x6spi->spi : 0;
@@ -223,8 +147,6 @@ static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
223 struct hlist_node *pos; 147 struct hlist_node *pos;
224 unsigned index; 148 unsigned index;
225 149
226 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
227
228 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || 150 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
229 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) 151 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
230 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; 152 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN;
@@ -258,18 +180,10 @@ try_next_2:;
258 spi = 0; 180 spi = 0;
259 goto out; 181 goto out;
260alloc_spi: 182alloc_spi:
261 X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for " NIP6_FMT "\n",
262 __FUNCTION__,
263 NIP6(*(struct in6_addr *)saddr));
264 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); 183 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
265 if (!x6spi) { 184 if (!x6spi)
266 X6TPRINTK1(KERN_ERR "%s(): kmem_cache_alloc() failed\n",
267 __FUNCTION__);
268 goto out; 185 goto out;
269 } 186
270#ifdef XFRM6_TUNNEL_SPI_MAGIC
271 x6spi->magic = XFRM6_TUNNEL_SPI_MAGIC;
272#endif
273 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); 187 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
274 x6spi->spi = spi; 188 x6spi->spi = spi;
275 atomic_set(&x6spi->refcnt, 1); 189 atomic_set(&x6spi->refcnt, 1);
@@ -278,9 +192,7 @@ alloc_spi:
278 192
279 index = xfrm6_tunnel_spi_hash_byaddr(saddr); 193 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
280 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); 194 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
281 X6SPI_CHECK_MAGIC(x6spi);
282out: 195out:
283 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
284 return spi; 196 return spi;
285} 197}
286 198
@@ -289,8 +201,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
289 struct xfrm6_tunnel_spi *x6spi; 201 struct xfrm6_tunnel_spi *x6spi;
290 u32 spi; 202 u32 spi;
291 203
292 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
293
294 write_lock_bh(&xfrm6_tunnel_spi_lock); 204 write_lock_bh(&xfrm6_tunnel_spi_lock);
295 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 205 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
296 if (x6spi) { 206 if (x6spi) {
@@ -300,8 +210,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
300 spi = __xfrm6_tunnel_alloc_spi(saddr); 210 spi = __xfrm6_tunnel_alloc_spi(saddr);
301 write_unlock_bh(&xfrm6_tunnel_spi_lock); 211 write_unlock_bh(&xfrm6_tunnel_spi_lock);
302 212
303 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
304
305 return spi; 213 return spi;
306} 214}
307 215
@@ -312,8 +220,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
312 struct xfrm6_tunnel_spi *x6spi; 220 struct xfrm6_tunnel_spi *x6spi;
313 struct hlist_node *pos, *n; 221 struct hlist_node *pos, *n;
314 222
315 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
316
317 write_lock_bh(&xfrm6_tunnel_spi_lock); 223 write_lock_bh(&xfrm6_tunnel_spi_lock);
318 224
319 hlist_for_each_entry_safe(x6spi, pos, n, 225 hlist_for_each_entry_safe(x6spi, pos, n,
@@ -321,12 +227,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
321 list_byaddr) 227 list_byaddr)
322 { 228 {
323 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 229 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
324 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object for " NIP6_FMT
325 " found at %p\n",
326 __FUNCTION__,
327 NIP6(*(struct in6_addr *)saddr),
328 x6spi);
329 X6SPI_CHECK_MAGIC(x6spi);
330 if (atomic_dec_and_test(&x6spi->refcnt)) { 230 if (atomic_dec_and_test(&x6spi->refcnt)) {
331 hlist_del(&x6spi->list_byaddr); 231 hlist_del(&x6spi->list_byaddr);
332 hlist_del(&x6spi->list_byspi); 232 hlist_del(&x6spi->list_byspi);
@@ -377,20 +277,14 @@ static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
377 case ICMPV6_ADDR_UNREACH: 277 case ICMPV6_ADDR_UNREACH:
378 case ICMPV6_PORT_UNREACH: 278 case ICMPV6_PORT_UNREACH:
379 default: 279 default:
380 X6TPRINTK3(KERN_DEBUG
381 "xfrm6_tunnel: Destination Unreach.\n");
382 break; 280 break;
383 } 281 }
384 break; 282 break;
385 case ICMPV6_PKT_TOOBIG: 283 case ICMPV6_PKT_TOOBIG:
386 X6TPRINTK3(KERN_DEBUG
387 "xfrm6_tunnel: Packet Too Big.\n");
388 break; 284 break;
389 case ICMPV6_TIME_EXCEED: 285 case ICMPV6_TIME_EXCEED:
390 switch (code) { 286 switch (code) {
391 case ICMPV6_EXC_HOPLIMIT: 287 case ICMPV6_EXC_HOPLIMIT:
392 X6TPRINTK3(KERN_DEBUG
393 "xfrm6_tunnel: Too small Hoplimit.\n");
394 break; 288 break;
395 case ICMPV6_EXC_FRAGTIME: 289 case ICMPV6_EXC_FRAGTIME:
396 default: 290 default:
@@ -447,22 +341,14 @@ static struct xfrm6_tunnel xfrm6_tunnel_handler = {
447 341
448static int __init xfrm6_tunnel_init(void) 342static int __init xfrm6_tunnel_init(void)
449{ 343{
450 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); 344 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0)
451
452 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) {
453 X6TPRINTK1(KERN_ERR
454 "xfrm6_tunnel init: can't add xfrm type\n");
455 return -EAGAIN; 345 return -EAGAIN;
456 } 346
457 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) { 347 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) {
458 X6TPRINTK1(KERN_ERR
459 "xfrm6_tunnel init(): can't add handler\n");
460 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 348 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
461 return -EAGAIN; 349 return -EAGAIN;
462 } 350 }
463 if (xfrm6_tunnel_spi_init() < 0) { 351 if (xfrm6_tunnel_spi_init() < 0) {
464 X6TPRINTK1(KERN_ERR
465 "xfrm6_tunnel init: failed to initialize spi\n");
466 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler); 352 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler);
467 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 353 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
468 return -EAGAIN; 354 return -EAGAIN;
@@ -472,15 +358,9 @@ static int __init xfrm6_tunnel_init(void)
472 358
473static void __exit xfrm6_tunnel_fini(void) 359static void __exit xfrm6_tunnel_fini(void)
474{ 360{
475 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
476
477 xfrm6_tunnel_spi_fini(); 361 xfrm6_tunnel_spi_fini();
478 if (xfrm6_tunnel_deregister(&xfrm6_tunnel_handler)) 362 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler);
479 X6TPRINTK1(KERN_ERR 363 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
480 "xfrm6_tunnel close: can't remove handler\n");
481 if (xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6) < 0)
482 X6TPRINTK1(KERN_ERR
483 "xfrm6_tunnel close: can't remove xfrm type\n");
484} 364}
485 365
486module_init(xfrm6_tunnel_init); 366module_init(xfrm6_tunnel_init);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 7fae48a53bff..17699eeb64d7 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -308,7 +308,7 @@ static void irda_connect_response(struct irda_sock *self)
308 308
309 IRDA_ASSERT(self != NULL, return;); 309 IRDA_ASSERT(self != NULL, return;);
310 310
311 skb = dev_alloc_skb(64); 311 skb = alloc_skb(64, GFP_ATOMIC);
312 if (skb == NULL) { 312 if (skb == NULL) {
313 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", 313 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n",
314 __FUNCTION__); 314 __FUNCTION__);
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index 9c4a902a9dba..ad6b6af3dd97 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -115,12 +115,10 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line)
115 115
116 IRDA_ASSERT(ircomm != NULL, return NULL;); 116 IRDA_ASSERT(ircomm != NULL, return NULL;);
117 117
118 self = kmalloc(sizeof(struct ircomm_cb), GFP_ATOMIC); 118 self = kzalloc(sizeof(struct ircomm_cb), GFP_ATOMIC);
119 if (self == NULL) 119 if (self == NULL)
120 return NULL; 120 return NULL;
121 121
122 memset(self, 0, sizeof(struct ircomm_cb));
123
124 self->notify = *notify; 122 self->notify = *notify;
125 self->magic = IRCOMM_MAGIC; 123 self->magic = IRCOMM_MAGIC;
126 124
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index d9097207aed3..959874b6451f 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -81,7 +81,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
81 81
82 /* Any userdata supplied? */ 82 /* Any userdata supplied? */
83 if (userdata == NULL) { 83 if (userdata == NULL) {
84 tx_skb = dev_alloc_skb(64); 84 tx_skb = alloc_skb(64, GFP_ATOMIC);
85 if (!tx_skb) 85 if (!tx_skb)
86 return -ENOMEM; 86 return -ENOMEM;
87 87
@@ -115,7 +115,7 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
116 116
117 if (!userdata) { 117 if (!userdata) {
118 tx_skb = dev_alloc_skb(64); 118 tx_skb = alloc_skb(64, GFP_ATOMIC);
119 if (!tx_skb) 119 if (!tx_skb)
120 return -ENOMEM; 120 return -ENOMEM;
121 121
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index 6009bab05091..a39f5735a90b 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -121,7 +121,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
121 121
122 skb = self->ctrl_skb; 122 skb = self->ctrl_skb;
123 if (!skb) { 123 if (!skb) {
124 skb = dev_alloc_skb(256); 124 skb = alloc_skb(256, GFP_ATOMIC);
125 if (!skb) { 125 if (!skb) {
126 spin_unlock_irqrestore(&self->spinlock, flags); 126 spin_unlock_irqrestore(&self->spinlock, flags);
127 return -ENOMEM; 127 return -ENOMEM;
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index b400f27851fc..3bcdb467efc5 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -379,12 +379,11 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
379 self = hashbin_lock_find(ircomm_tty, line, NULL); 379 self = hashbin_lock_find(ircomm_tty, line, NULL);
380 if (!self) { 380 if (!self) {
381 /* No, so make new instance */ 381 /* No, so make new instance */
382 self = kmalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); 382 self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);
383 if (self == NULL) { 383 if (self == NULL) {
384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__); 384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__);
385 return -ENOMEM; 385 return -ENOMEM;
386 } 386 }
387 memset(self, 0, sizeof(struct ircomm_tty_cb));
388 387
389 self->magic = IRCOMM_TTY_MAGIC; 388 self->magic = IRCOMM_TTY_MAGIC;
390 self->flow = FLOW_STOP; 389 self->flow = FLOW_STOP;
@@ -759,8 +758,9 @@ static int ircomm_tty_write(struct tty_struct *tty,
759 } 758 }
760 } else { 759 } else {
761 /* Prepare a full sized frame */ 760 /* Prepare a full sized frame */
762 skb = dev_alloc_skb(self->max_data_size+ 761 skb = alloc_skb(self->max_data_size+
763 self->max_header_size); 762 self->max_header_size,
763 GFP_ATOMIC);
764 if (!skb) { 764 if (!skb) {
765 spin_unlock_irqrestore(&self->spinlock, flags); 765 spin_unlock_irqrestore(&self->spinlock, flags);
766 return -ENOBUFS; 766 return -ENOBUFS;
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index ba40e5495f58..7e7a31798d8d 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -401,12 +401,10 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
401 } 401 }
402 402
403 /* Allocate dongle info for this instance */ 403 /* Allocate dongle info for this instance */
404 dongle = kmalloc(sizeof(dongle_t), GFP_KERNEL); 404 dongle = kzalloc(sizeof(dongle_t), GFP_KERNEL);
405 if (!dongle) 405 if (!dongle)
406 goto out; 406 goto out;
407 407
408 memset(dongle, 0, sizeof(dongle_t));
409
410 /* Bind the registration info to this particular instance */ 408 /* Bind the registration info to this particular instance */
411 dongle->issue = reg; 409 dongle->issue = reg;
412 dongle->dev = dev; 410 dongle->dev = dev;
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index a0472652a44e..61128aa05b40 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -345,7 +345,7 @@ static void iriap_disconnect_request(struct iriap_cb *self)
345 IRDA_ASSERT(self != NULL, return;); 345 IRDA_ASSERT(self != NULL, return;);
346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
347 347
348 tx_skb = dev_alloc_skb(64); 348 tx_skb = alloc_skb(64, GFP_ATOMIC);
349 if (tx_skb == NULL) { 349 if (tx_skb == NULL) {
350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n", 350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n",
351 __FUNCTION__, 64); 351 __FUNCTION__, 64);
@@ -396,7 +396,7 @@ int iriap_getvaluebyclass_request(struct iriap_cb *self,
396 attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */ 396 attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */
397 397
398 skb_len = self->max_header_size+2+name_len+1+attr_len+4; 398 skb_len = self->max_header_size+2+name_len+1+attr_len+4;
399 tx_skb = dev_alloc_skb(skb_len); 399 tx_skb = alloc_skb(skb_len, GFP_ATOMIC);
400 if (!tx_skb) 400 if (!tx_skb)
401 return -ENOMEM; 401 return -ENOMEM;
402 402
@@ -562,7 +562,8 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
562 * value. We add 32 bytes because of the 6 bytes for the frame and 562 * value. We add 32 bytes because of the 6 bytes for the frame and
563 * max 5 bytes for the value coding. 563 * max 5 bytes for the value coding.
564 */ 564 */
565 tx_skb = dev_alloc_skb(value->len + self->max_header_size + 32); 565 tx_skb = alloc_skb(value->len + self->max_header_size + 32,
566 GFP_ATOMIC);
566 if (!tx_skb) 567 if (!tx_skb)
567 return; 568 return;
568 569
@@ -700,7 +701,7 @@ void iriap_send_ack(struct iriap_cb *self)
700 IRDA_ASSERT(self != NULL, return;); 701 IRDA_ASSERT(self != NULL, return;);
701 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 702 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
702 703
703 tx_skb = dev_alloc_skb(64); 704 tx_skb = alloc_skb(64, GFP_ATOMIC);
704 if (!tx_skb) 705 if (!tx_skb)
705 return; 706 return;
706 707
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index a73607450de1..da17395df05a 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -365,7 +365,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
365 365
366 switch (event) { 366 switch (event) {
367 case IAP_LM_CONNECT_INDICATION: 367 case IAP_LM_CONNECT_INDICATION:
368 tx_skb = dev_alloc_skb(64); 368 tx_skb = alloc_skb(64, GFP_ATOMIC);
369 if (tx_skb == NULL) { 369 if (tx_skb == NULL) {
370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); 370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__);
371 return; 371 return;
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 82e665c79991..a154b1d71c0f 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -82,13 +82,12 @@ struct ias_object *irias_new_object( char *name, int id)
82 82
83 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 83 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
84 84
85 obj = kmalloc(sizeof(struct ias_object), GFP_ATOMIC); 85 obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC);
86 if (obj == NULL) { 86 if (obj == NULL) {
87 IRDA_WARNING("%s(), Unable to allocate object!\n", 87 IRDA_WARNING("%s(), Unable to allocate object!\n",
88 __FUNCTION__); 88 __FUNCTION__);
89 return NULL; 89 return NULL;
90 } 90 }
91 memset(obj, 0, sizeof( struct ias_object));
92 91
93 obj->magic = IAS_OBJECT_MAGIC; 92 obj->magic = IAS_OBJECT_MAGIC;
94 obj->name = strndup(name, IAS_MAX_CLASSNAME); 93 obj->name = strndup(name, IAS_MAX_CLASSNAME);
@@ -346,13 +345,12 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
346 IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); 345 IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
347 IRDA_ASSERT(name != NULL, return;); 346 IRDA_ASSERT(name != NULL, return;);
348 347
349 attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 348 attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
350 if (attrib == NULL) { 349 if (attrib == NULL) {
351 IRDA_WARNING("%s: Unable to allocate attribute!\n", 350 IRDA_WARNING("%s: Unable to allocate attribute!\n",
352 __FUNCTION__); 351 __FUNCTION__);
353 return; 352 return;
354 } 353 }
355 memset(attrib, 0, sizeof( struct ias_attrib));
356 354
357 attrib->magic = IAS_ATTRIB_MAGIC; 355 attrib->magic = IAS_ATTRIB_MAGIC;
358 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 356 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -382,13 +380,12 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
382 IRDA_ASSERT(name != NULL, return;); 380 IRDA_ASSERT(name != NULL, return;);
383 IRDA_ASSERT(octets != NULL, return;); 381 IRDA_ASSERT(octets != NULL, return;);
384 382
385 attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 383 attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
386 if (attrib == NULL) { 384 if (attrib == NULL) {
387 IRDA_WARNING("%s: Unable to allocate attribute!\n", 385 IRDA_WARNING("%s: Unable to allocate attribute!\n",
388 __FUNCTION__); 386 __FUNCTION__);
389 return; 387 return;
390 } 388 }
391 memset(attrib, 0, sizeof( struct ias_attrib));
392 389
393 attrib->magic = IAS_ATTRIB_MAGIC; 390 attrib->magic = IAS_ATTRIB_MAGIC;
394 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 391 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -416,13 +413,12 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
416 IRDA_ASSERT(name != NULL, return;); 413 IRDA_ASSERT(name != NULL, return;);
417 IRDA_ASSERT(value != NULL, return;); 414 IRDA_ASSERT(value != NULL, return;);
418 415
419 attrib = kmalloc(sizeof( struct ias_attrib), GFP_ATOMIC); 416 attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC);
420 if (attrib == NULL) { 417 if (attrib == NULL) {
421 IRDA_WARNING("%s: Unable to allocate attribute!\n", 418 IRDA_WARNING("%s: Unable to allocate attribute!\n",
422 __FUNCTION__); 419 __FUNCTION__);
423 return; 420 return;
424 } 421 }
425 memset(attrib, 0, sizeof( struct ias_attrib));
426 422
427 attrib->magic = IAS_ATTRIB_MAGIC; 423 attrib->magic = IAS_ATTRIB_MAGIC;
428 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 424 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -443,12 +439,11 @@ struct ias_value *irias_new_integer_value(int integer)
443{ 439{
444 struct ias_value *value; 440 struct ias_value *value;
445 441
446 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 442 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
447 if (value == NULL) { 443 if (value == NULL) {
448 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 444 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
449 return NULL; 445 return NULL;
450 } 446 }
451 memset(value, 0, sizeof(struct ias_value));
452 447
453 value->type = IAS_INTEGER; 448 value->type = IAS_INTEGER;
454 value->len = 4; 449 value->len = 4;
@@ -469,12 +464,11 @@ struct ias_value *irias_new_string_value(char *string)
469{ 464{
470 struct ias_value *value; 465 struct ias_value *value;
471 466
472 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 467 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
473 if (value == NULL) { 468 if (value == NULL) {
474 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 469 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
475 return NULL; 470 return NULL;
476 } 471 }
477 memset( value, 0, sizeof( struct ias_value));
478 472
479 value->type = IAS_STRING; 473 value->type = IAS_STRING;
480 value->charset = CS_ASCII; 474 value->charset = CS_ASCII;
@@ -495,12 +489,11 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
495{ 489{
496 struct ias_value *value; 490 struct ias_value *value;
497 491
498 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 492 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
499 if (value == NULL) { 493 if (value == NULL) {
500 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 494 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
501 return NULL; 495 return NULL;
502 } 496 }
503 memset(value, 0, sizeof(struct ias_value));
504 497
505 value->type = IAS_OCT_SEQ; 498 value->type = IAS_OCT_SEQ;
506 /* Check length */ 499 /* Check length */
@@ -522,12 +515,11 @@ struct ias_value *irias_new_missing_value(void)
522{ 515{
523 struct ias_value *value; 516 struct ias_value *value;
524 517
525 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 518 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
526 if (value == NULL) { 519 if (value == NULL) {
527 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 520 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
528 return NULL; 521 return NULL;
529 } 522 }
530 memset(value, 0, sizeof(struct ias_value));
531 523
532 value->type = IAS_MISSING; 524 value->type = IAS_MISSING;
533 value->len = 0; 525 value->len = 0;
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index bd659dd545ac..7dd0a2fe1d20 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -636,7 +636,7 @@ void irlan_get_provider_info(struct irlan_cb *self)
636 IRDA_ASSERT(self != NULL, return;); 636 IRDA_ASSERT(self != NULL, return;);
637 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 637 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
638 638
639 skb = dev_alloc_skb(64); 639 skb = alloc_skb(64, GFP_ATOMIC);
640 if (!skb) 640 if (!skb)
641 return; 641 return;
642 642
@@ -668,7 +668,7 @@ void irlan_open_data_channel(struct irlan_cb *self)
668 IRDA_ASSERT(self != NULL, return;); 668 IRDA_ASSERT(self != NULL, return;);
669 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 669 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
670 670
671 skb = dev_alloc_skb(64); 671 skb = alloc_skb(64, GFP_ATOMIC);
672 if (!skb) 672 if (!skb)
673 return; 673 return;
674 674
@@ -704,7 +704,7 @@ void irlan_close_data_channel(struct irlan_cb *self)
704 if (self->client.tsap_ctrl == NULL) 704 if (self->client.tsap_ctrl == NULL)
705 return; 705 return;
706 706
707 skb = dev_alloc_skb(64); 707 skb = alloc_skb(64, GFP_ATOMIC);
708 if (!skb) 708 if (!skb)
709 return; 709 return;
710 710
@@ -739,7 +739,7 @@ static void irlan_open_unicast_addr(struct irlan_cb *self)
739 IRDA_ASSERT(self != NULL, return;); 739 IRDA_ASSERT(self != NULL, return;);
740 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 740 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
741 741
742 skb = dev_alloc_skb(128); 742 skb = alloc_skb(128, GFP_ATOMIC);
743 if (!skb) 743 if (!skb)
744 return; 744 return;
745 745
@@ -777,7 +777,7 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
777 IRDA_ASSERT(self != NULL, return;); 777 IRDA_ASSERT(self != NULL, return;);
778 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 778 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
779 779
780 skb = dev_alloc_skb(128); 780 skb = alloc_skb(128, GFP_ATOMIC);
781 if (!skb) 781 if (!skb)
782 return; 782 return;
783 783
@@ -816,7 +816,7 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status)
816 IRDA_ASSERT(self != NULL, return;); 816 IRDA_ASSERT(self != NULL, return;);
817 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 817 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
818 818
819 skb = dev_alloc_skb(128); 819 skb = alloc_skb(128, GFP_ATOMIC);
820 if (!skb) 820 if (!skb)
821 return; 821 return;
822 822
@@ -856,7 +856,7 @@ static void irlan_get_unicast_addr(struct irlan_cb *self)
856 IRDA_ASSERT(self != NULL, return;); 856 IRDA_ASSERT(self != NULL, return;);
857 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 857 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
858 858
859 skb = dev_alloc_skb(128); 859 skb = alloc_skb(128, GFP_ATOMIC);
860 if (!skb) 860 if (!skb)
861 return; 861 return;
862 862
@@ -891,7 +891,7 @@ void irlan_get_media_char(struct irlan_cb *self)
891 IRDA_ASSERT(self != NULL, return;); 891 IRDA_ASSERT(self != NULL, return;);
892 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 892 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
893 893
894 skb = dev_alloc_skb(64); 894 skb = alloc_skb(64, GFP_ATOMIC);
895 if (!skb) 895 if (!skb)
896 return; 896 return;
897 897
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 39c202d1c374..9c0df86044d7 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -296,7 +296,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
296 IRDA_ASSERT(self != NULL, return;); 296 IRDA_ASSERT(self != NULL, return;);
297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
298 298
299 skb = dev_alloc_skb(128); 299 skb = alloc_skb(128, GFP_ATOMIC);
300 if (!skb) 300 if (!skb)
301 return; 301 return;
302 302
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index cade355ac8af..e7852a07495e 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -116,11 +116,10 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
117 117
118 /* Initialize the irlap structure. */ 118 /* Initialize the irlap structure. */
119 self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL); 119 self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
120 if (self == NULL) 120 if (self == NULL)
121 return NULL; 121 return NULL;
122 122
123 memset(self, 0, sizeof(struct irlap_cb));
124 self->magic = LAP_MAGIC; 123 self->magic = LAP_MAGIC;
125 124
126 /* Make a binding between the layers */ 125 /* Make a binding between the layers */
@@ -882,7 +881,7 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
882 /* Change speed now, or just piggyback speed on frames */ 881 /* Change speed now, or just piggyback speed on frames */
883 if (now) { 882 if (now) {
884 /* Send down empty frame to trigger speed change */ 883 /* Send down empty frame to trigger speed change */
885 skb = dev_alloc_skb(0); 884 skb = alloc_skb(0, GFP_ATOMIC);
886 if (skb) 885 if (skb)
887 irlap_queue_xmit(self, skb); 886 irlap_queue_xmit(self, skb);
888 } 887 }
@@ -1222,7 +1221,7 @@ static int irlap_seq_open(struct inode *inode, struct file *file)
1222{ 1221{
1223 struct seq_file *seq; 1222 struct seq_file *seq;
1224 int rc = -ENOMEM; 1223 int rc = -ENOMEM;
1225 struct irlap_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1224 struct irlap_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1226 1225
1227 if (!s) 1226 if (!s)
1228 goto out; 1227 goto out;
@@ -1238,7 +1237,6 @@ static int irlap_seq_open(struct inode *inode, struct file *file)
1238 1237
1239 seq = file->private_data; 1238 seq = file->private_data;
1240 seq->private = s; 1239 seq->private = s;
1241 memset(s, 0, sizeof(*s));
1242out: 1240out:
1243 return rc; 1241 return rc;
1244out_kfree: 1242out_kfree:
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 3e9a06abbdd0..ccb983bf0f4a 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -117,7 +117,7 @@ void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos)
117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
118 118
119 /* Allocate frame */ 119 /* Allocate frame */
120 tx_skb = dev_alloc_skb(64); 120 tx_skb = alloc_skb(64, GFP_ATOMIC);
121 if (!tx_skb) 121 if (!tx_skb)
122 return; 122 return;
123 123
@@ -210,7 +210,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
211 211
212 /* Allocate frame */ 212 /* Allocate frame */
213 tx_skb = dev_alloc_skb(64); 213 tx_skb = alloc_skb(64, GFP_ATOMIC);
214 if (!tx_skb) 214 if (!tx_skb)
215 return; 215 return;
216 216
@@ -250,7 +250,7 @@ void irlap_send_dm_frame( struct irlap_cb *self)
250 IRDA_ASSERT(self != NULL, return;); 250 IRDA_ASSERT(self != NULL, return;);
251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
252 252
253 tx_skb = dev_alloc_skb(32); 253 tx_skb = alloc_skb(32, GFP_ATOMIC);
254 if (!tx_skb) 254 if (!tx_skb)
255 return; 255 return;
256 256
@@ -282,7 +282,7 @@ void irlap_send_disc_frame(struct irlap_cb *self)
282 IRDA_ASSERT(self != NULL, return;); 282 IRDA_ASSERT(self != NULL, return;);
283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
284 284
285 tx_skb = dev_alloc_skb(16); 285 tx_skb = alloc_skb(16, GFP_ATOMIC);
286 if (!tx_skb) 286 if (!tx_skb)
287 return; 287 return;
288 288
@@ -315,7 +315,7 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
316 IRDA_ASSERT(discovery != NULL, return;); 316 IRDA_ASSERT(discovery != NULL, return;);
317 317
318 tx_skb = dev_alloc_skb(64); 318 tx_skb = alloc_skb(64, GFP_ATOMIC);
319 if (!tx_skb) 319 if (!tx_skb)
320 return; 320 return;
321 321
@@ -422,11 +422,10 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
422 return; 422 return;
423 } 423 }
424 424
425 if ((discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { 425 if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
426 IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__); 426 IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__);
427 return; 427 return;
428 } 428 }
429 memset(discovery, 0, sizeof(discovery_t));
430 429
431 discovery->data.daddr = info->daddr; 430 discovery->data.daddr = info->daddr;
432 discovery->data.saddr = self->saddr; 431 discovery->data.saddr = self->saddr;
@@ -576,7 +575,7 @@ void irlap_send_rr_frame(struct irlap_cb *self, int command)
576 struct sk_buff *tx_skb; 575 struct sk_buff *tx_skb;
577 __u8 *frame; 576 __u8 *frame;
578 577
579 tx_skb = dev_alloc_skb(16); 578 tx_skb = alloc_skb(16, GFP_ATOMIC);
580 if (!tx_skb) 579 if (!tx_skb)
581 return; 580 return;
582 581
@@ -601,7 +600,7 @@ void irlap_send_rd_frame(struct irlap_cb *self)
601 struct sk_buff *tx_skb; 600 struct sk_buff *tx_skb;
602 __u8 *frame; 601 __u8 *frame;
603 602
604 tx_skb = dev_alloc_skb(16); 603 tx_skb = alloc_skb(16, GFP_ATOMIC);
605 if (!tx_skb) 604 if (!tx_skb)
606 return; 605 return;
607 606
@@ -1215,7 +1214,7 @@ void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
1215 struct test_frame *frame; 1214 struct test_frame *frame;
1216 __u8 *info; 1215 __u8 *info;
1217 1216
1218 tx_skb = dev_alloc_skb(cmd->len+sizeof(struct test_frame)); 1217 tx_skb = alloc_skb(cmd->len+sizeof(struct test_frame), GFP_ATOMIC);
1219 if (!tx_skb) 1218 if (!tx_skb)
1220 return; 1219 return;
1221 1220
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 129ad64c15bb..c440913dee14 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -78,10 +78,9 @@ int __init irlmp_init(void)
78{ 78{
79 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 79 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
80 /* Initialize the irlmp structure. */ 80 /* Initialize the irlmp structure. */
81 irlmp = kmalloc( sizeof(struct irlmp_cb), GFP_KERNEL); 81 irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
82 if (irlmp == NULL) 82 if (irlmp == NULL)
83 return -ENOMEM; 83 return -ENOMEM;
84 memset(irlmp, 0, sizeof(struct irlmp_cb));
85 84
86 irlmp->magic = LMP_MAGIC; 85 irlmp->magic = LMP_MAGIC;
87 86
@@ -160,12 +159,11 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
160 return NULL; 159 return NULL;
161 160
162 /* Allocate new instance of a LSAP connection */ 161 /* Allocate new instance of a LSAP connection */
163 self = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC); 162 self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
164 if (self == NULL) { 163 if (self == NULL) {
165 IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__); 164 IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__);
166 return NULL; 165 return NULL;
167 } 166 }
168 memset(self, 0, sizeof(struct lsap_cb));
169 167
170 self->magic = LMP_LSAP_MAGIC; 168 self->magic = LMP_LSAP_MAGIC;
171 self->slsap_sel = slsap_sel; 169 self->slsap_sel = slsap_sel;
@@ -288,12 +286,11 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
288 /* 286 /*
289 * Allocate new instance of a LSAP connection 287 * Allocate new instance of a LSAP connection
290 */ 288 */
291 lap = kmalloc(sizeof(struct lap_cb), GFP_KERNEL); 289 lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL);
292 if (lap == NULL) { 290 if (lap == NULL) {
293 IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__); 291 IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__);
294 return; 292 return;
295 } 293 }
296 memset(lap, 0, sizeof(struct lap_cb));
297 294
298 lap->irlap = irlap; 295 lap->irlap = irlap;
299 lap->magic = LMP_LAP_MAGIC; 296 lap->magic = LMP_LAP_MAGIC;
@@ -395,7 +392,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
395 392
396 /* Any userdata? */ 393 /* Any userdata? */
397 if (tx_skb == NULL) { 394 if (tx_skb == NULL) {
398 tx_skb = dev_alloc_skb(64); 395 tx_skb = alloc_skb(64, GFP_ATOMIC);
399 if (!tx_skb) 396 if (!tx_skb)
400 return -ENOMEM; 397 return -ENOMEM;
401 398
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index e53bf9e0053e..a1e502ff9070 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -476,11 +476,10 @@ dev_irnet_open(struct inode * inode,
476#endif /* SECURE_DEVIRNET */ 476#endif /* SECURE_DEVIRNET */
477 477
478 /* Allocate a private structure for this IrNET instance */ 478 /* Allocate a private structure for this IrNET instance */
479 ap = kmalloc(sizeof(*ap), GFP_KERNEL); 479 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
480 DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n"); 480 DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n");
481 481
482 /* initialize the irnet structure */ 482 /* initialize the irnet structure */
483 memset(ap, 0, sizeof(*ap));
484 ap->file = file; 483 ap->file = file;
485 484
486 /* PPP channel setup */ 485 /* PPP channel setup */
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 49c51c5f1a86..42acf1cde737 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -85,10 +85,9 @@ static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
85 */ 85 */
86int __init irttp_init(void) 86int __init irttp_init(void)
87{ 87{
88 irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL); 88 irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL);
89 if (irttp == NULL) 89 if (irttp == NULL)
90 return -ENOMEM; 90 return -ENOMEM;
91 memset(irttp, 0, sizeof(struct irttp_cb));
92 91
93 irttp->magic = TTP_MAGIC; 92 irttp->magic = TTP_MAGIC;
94 93
@@ -306,7 +305,8 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
306 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__); 305 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__);
307 306
308 /* Make new segment */ 307 /* Make new segment */
309 frag = dev_alloc_skb(self->max_seg_size+self->max_header_size); 308 frag = alloc_skb(self->max_seg_size+self->max_header_size,
309 GFP_ATOMIC);
310 if (!frag) 310 if (!frag)
311 return; 311 return;
312 312
@@ -389,12 +389,11 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
389 return NULL; 389 return NULL;
390 } 390 }
391 391
392 self = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); 392 self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
393 if (self == NULL) { 393 if (self == NULL) {
394 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__); 394 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__);
395 return NULL; 395 return NULL;
396 } 396 }
397 memset(self, 0, sizeof(struct tsap_cb));
398 spin_lock_init(&self->lock); 397 spin_lock_init(&self->lock);
399 398
400 /* Initialise todo timer */ 399 /* Initialise todo timer */
@@ -805,7 +804,7 @@ static inline void irttp_give_credit(struct tsap_cb *self)
805 self->send_credit, self->avail_credit, self->remote_credit); 804 self->send_credit, self->avail_credit, self->remote_credit);
806 805
807 /* Give credit to peer */ 806 /* Give credit to peer */
808 tx_skb = dev_alloc_skb(64); 807 tx_skb = alloc_skb(64, GFP_ATOMIC);
809 if (!tx_skb) 808 if (!tx_skb)
810 return; 809 return;
811 810
@@ -1094,7 +1093,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
1094 1093
1095 /* Any userdata supplied? */ 1094 /* Any userdata supplied? */
1096 if (userdata == NULL) { 1095 if (userdata == NULL) {
1097 tx_skb = dev_alloc_skb(64); 1096 tx_skb = alloc_skb(64, GFP_ATOMIC);
1098 if (!tx_skb) 1097 if (!tx_skb)
1099 return -ENOMEM; 1098 return -ENOMEM;
1100 1099
@@ -1342,7 +1341,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
1342 1341
1343 /* Any userdata supplied? */ 1342 /* Any userdata supplied? */
1344 if (userdata == NULL) { 1343 if (userdata == NULL) {
1345 tx_skb = dev_alloc_skb(64); 1344 tx_skb = alloc_skb(64, GFP_ATOMIC);
1346 if (!tx_skb) 1345 if (!tx_skb)
1347 return -ENOMEM; 1346 return -ENOMEM;
1348 1347
@@ -1541,7 +1540,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1541 1540
1542 if (!userdata) { 1541 if (!userdata) {
1543 struct sk_buff *tx_skb; 1542 struct sk_buff *tx_skb;
1544 tx_skb = dev_alloc_skb(64); 1543 tx_skb = alloc_skb(64, GFP_ATOMIC);
1545 if (!tx_skb) 1544 if (!tx_skb)
1546 return -ENOMEM; 1545 return -ENOMEM;
1547 1546
@@ -1876,7 +1875,7 @@ static int irttp_seq_open(struct inode *inode, struct file *file)
1876 int rc = -ENOMEM; 1875 int rc = -ENOMEM;
1877 struct irttp_iter_state *s; 1876 struct irttp_iter_state *s;
1878 1877
1879 s = kmalloc(sizeof(*s), GFP_KERNEL); 1878 s = kzalloc(sizeof(*s), GFP_KERNEL);
1880 if (!s) 1879 if (!s)
1881 goto out; 1880 goto out;
1882 1881
@@ -1886,7 +1885,6 @@ static int irttp_seq_open(struct inode *inode, struct file *file)
1886 1885
1887 seq = file->private_data; 1886 seq = file->private_data;
1888 seq->private = s; 1887 seq->private = s;
1889 memset(s, 0, sizeof(*s));
1890out: 1888out:
1891 return rc; 1889 return rc;
1892out_kfree: 1890out_kfree:
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index aea6616cea3d..d504eed416f6 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -115,14 +115,12 @@ static struct lapb_cb *lapb_devtostruct(struct net_device *dev)
115 */ 115 */
116static struct lapb_cb *lapb_create_cb(void) 116static struct lapb_cb *lapb_create_cb(void)
117{ 117{
118 struct lapb_cb *lapb = kmalloc(sizeof(*lapb), GFP_ATOMIC); 118 struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC);
119 119
120 120
121 if (!lapb) 121 if (!lapb)
122 goto out; 122 goto out;
123 123
124 memset(lapb, 0x00, sizeof(*lapb));
125
126 skb_queue_head_init(&lapb->write_queue); 124 skb_queue_head_init(&lapb->write_queue);
127 skb_queue_head_init(&lapb->ack_queue); 125 skb_queue_head_init(&lapb->ack_queue);
128 126
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index bd242a49514a..d12413cff5bd 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -33,10 +33,9 @@ unsigned char llc_station_mac_sa[ETH_ALEN];
33 */ 33 */
34static struct llc_sap *llc_sap_alloc(void) 34static struct llc_sap *llc_sap_alloc(void)
35{ 35{
36 struct llc_sap *sap = kmalloc(sizeof(*sap), GFP_ATOMIC); 36 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
37 37
38 if (sap) { 38 if (sap) {
39 memset(sap, 0, sizeof(*sap));
40 sap->state = LLC_SAP_STATE_ACTIVE; 39 sap->state = LLC_SAP_STATE_ACTIVE;
41 memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN); 40 memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN);
42 rwlock_init(&sap->sk_list.lock); 41 rwlock_init(&sap->sk_list.lock);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 42a178aa30f9..a9894ddfd72a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -386,8 +386,8 @@ config NETFILTER_XT_MATCH_REALM
386 <file:Documentation/modules.txt>. If unsure, say `N'. 386 <file:Documentation/modules.txt>. If unsure, say `N'.
387 387
388config NETFILTER_XT_MATCH_SCTP 388config NETFILTER_XT_MATCH_SCTP
389 tristate '"sctp" protocol match support' 389 tristate '"sctp" protocol match support (EXPERIMENTAL)'
390 depends on NETFILTER_XTABLES 390 depends on NETFILTER_XTABLES && EXPERIMENTAL
391 help 391 help
392 With this option enabled, you will be able to use the 392 With this option enabled, you will be able to use the
393 `sctp' match in order to match on SCTP source/destination ports 393 `sctp' match in order to match on SCTP source/destination ports
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 5fcab2ef231f..4ef836699962 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -428,6 +428,8 @@ static struct file_operations ct_cpu_seq_fops = {
428 428
429/* Sysctl support */ 429/* Sysctl support */
430 430
431int nf_conntrack_checksum = 1;
432
431#ifdef CONFIG_SYSCTL 433#ifdef CONFIG_SYSCTL
432 434
433/* From nf_conntrack_core.c */ 435/* From nf_conntrack_core.c */
@@ -459,8 +461,6 @@ extern unsigned int nf_ct_generic_timeout;
459static int log_invalid_proto_min = 0; 461static int log_invalid_proto_min = 0;
460static int log_invalid_proto_max = 255; 462static int log_invalid_proto_max = 255;
461 463
462int nf_conntrack_checksum = 1;
463
464static struct ctl_table_header *nf_ct_sysctl_header; 464static struct ctl_table_header *nf_ct_sysctl_header;
465 465
466static ctl_table nf_ct_sysctl_table[] = { 466static ctl_table nf_ct_sysctl_table[] = {
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index bb6fcee452ca..662a869593bf 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -219,21 +219,20 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
219 219
220 switch (verdict & NF_VERDICT_MASK) { 220 switch (verdict & NF_VERDICT_MASK) {
221 case NF_ACCEPT: 221 case NF_ACCEPT:
222 case NF_STOP:
222 info->okfn(skb); 223 info->okfn(skb);
224 case NF_STOLEN:
223 break; 225 break;
224
225 case NF_QUEUE: 226 case NF_QUEUE:
226 if (!nf_queue(&skb, elem, info->pf, info->hook, 227 if (!nf_queue(&skb, elem, info->pf, info->hook,
227 info->indev, info->outdev, info->okfn, 228 info->indev, info->outdev, info->okfn,
228 verdict >> NF_VERDICT_BITS)) 229 verdict >> NF_VERDICT_BITS))
229 goto next_hook; 230 goto next_hook;
230 break; 231 break;
232 default:
233 kfree_skb(skb);
231 } 234 }
232 rcu_read_unlock(); 235 rcu_read_unlock();
233
234 if (verdict == NF_DROP)
235 kfree_skb(skb);
236
237 kfree(info); 236 kfree(info);
238 return; 237 return;
239} 238}
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 5fe4c9df17f5..a9f4f6f3c628 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -113,6 +113,21 @@ checkentry(const char *tablename,
113 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || 113 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
114 info->bitmask & ~XT_PHYSDEV_OP_MASK) 114 info->bitmask & ~XT_PHYSDEV_OP_MASK)
115 return 0; 115 return 0;
116 if (brnf_deferred_hooks == 0 &&
117 info->bitmask & XT_PHYSDEV_OP_OUT &&
118 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
119 info->invert & XT_PHYSDEV_OP_BRIDGED) &&
120 hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) |
121 (1 << NF_IP_POST_ROUTING))) {
122 printk(KERN_WARNING "physdev match: using --physdev-out in the "
123 "OUTPUT, FORWARD and POSTROUTING chains for non-bridged "
124 "traffic is deprecated and breaks other things, it will "
125 "be removed in January 2007. See Documentation/"
126 "feature-removal-schedule.txt for details. This doesn't "
127 "affect you in case you're using it for purely bridged "
128 "traffic.\n");
129 brnf_deferred_hooks = 1;
130 }
116 return 1; 131 return 1;
117} 132}
118 133
diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c
index 3ac703b5cb8f..d2f5320a80bf 100644
--- a/net/netfilter/xt_pkttype.c
+++ b/net/netfilter/xt_pkttype.c
@@ -9,6 +9,8 @@
9#include <linux/skbuff.h> 9#include <linux/skbuff.h>
10#include <linux/if_ether.h> 10#include <linux/if_ether.h>
11#include <linux/if_packet.h> 11#include <linux/if_packet.h>
12#include <linux/in.h>
13#include <linux/ip.h>
12 14
13#include <linux/netfilter/xt_pkttype.h> 15#include <linux/netfilter/xt_pkttype.h>
14#include <linux/netfilter/x_tables.h> 16#include <linux/netfilter/x_tables.h>
@@ -28,9 +30,17 @@ static int match(const struct sk_buff *skb,
28 unsigned int protoff, 30 unsigned int protoff,
29 int *hotdrop) 31 int *hotdrop)
30{ 32{
33 u_int8_t type;
31 const struct xt_pkttype_info *info = matchinfo; 34 const struct xt_pkttype_info *info = matchinfo;
32 35
33 return (skb->pkt_type == info->pkttype) ^ info->invert; 36 if (skb->pkt_type == PACKET_LOOPBACK)
37 type = (MULTICAST(skb->nh.iph->daddr)
38 ? PACKET_MULTICAST
39 : PACKET_BROADCAST);
40 else
41 type = skb->pkt_type;
42
43 return (type == info->pkttype) ^ info->invert;
34} 44}
35 45
36static struct xt_match pkttype_match = { 46static struct xt_match pkttype_match = {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 55c0adc8f115..b85c1f9f1288 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -562,10 +562,9 @@ static int netlink_alloc_groups(struct sock *sk)
562 if (err) 562 if (err)
563 return err; 563 return err;
564 564
565 nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL); 565 nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
566 if (nlk->groups == NULL) 566 if (nlk->groups == NULL)
567 return -ENOMEM; 567 return -ENOMEM;
568 memset(nlk->groups, 0, NLGRPSZ(groups));
569 nlk->ngroups = groups; 568 nlk->ngroups = groups;
570 return 0; 569 return 0;
571} 570}
@@ -1393,11 +1392,10 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1393 struct sock *sk; 1392 struct sock *sk;
1394 struct netlink_sock *nlk; 1393 struct netlink_sock *nlk;
1395 1394
1396 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 1395 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1397 if (cb == NULL) 1396 if (cb == NULL)
1398 return -ENOBUFS; 1397 return -ENOBUFS;
1399 1398
1400 memset(cb, 0, sizeof(*cb));
1401 cb->dump = dump; 1399 cb->dump = dump;
1402 cb->done = done; 1400 cb->done = done;
1403 cb->nlh = nlh; 1401 cb->nlh = nlh;
@@ -1668,7 +1666,7 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
1668 struct nl_seq_iter *iter; 1666 struct nl_seq_iter *iter;
1669 int err; 1667 int err;
1670 1668
1671 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 1669 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1672 if (!iter) 1670 if (!iter)
1673 return -ENOMEM; 1671 return -ENOMEM;
1674 1672
@@ -1678,7 +1676,6 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
1678 return err; 1676 return err;
1679 } 1677 }
1680 1678
1681 memset(iter, 0, sizeof(*iter));
1682 seq = file->private_data; 1679 seq = file->private_data;
1683 seq->private = iter; 1680 seq->private = iter;
1684 return 0; 1681 return 0;
@@ -1747,15 +1744,13 @@ static int __init netlink_proto_init(void)
1747 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) 1744 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1748 netlink_skb_parms_too_large(); 1745 netlink_skb_parms_too_large();
1749 1746
1750 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL); 1747 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
1751 if (!nl_table) { 1748 if (!nl_table) {
1752enomem: 1749enomem:
1753 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n"); 1750 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1754 return -ENOMEM; 1751 return -ENOMEM;
1755 } 1752 }
1756 1753
1757 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1758
1759 if (num_physpages >= (128 * 1024)) 1754 if (num_physpages >= (128 * 1024))
1760 max = num_physpages >> (21 - PAGE_SHIFT); 1755 max = num_physpages >> (21 - PAGE_SHIFT);
1761 else 1756 else
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index 573b572f8f91..93d2c55ad2d5 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -58,13 +58,12 @@ static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
58 _enter("%p",peer); 58 _enter("%p",peer);
59 59
60 /* allocate and initialise a connection record */ 60 /* allocate and initialise a connection record */
61 conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); 61 conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
62 if (!conn) { 62 if (!conn) {
63 _leave(" = -ENOMEM"); 63 _leave(" = -ENOMEM");
64 return -ENOMEM; 64 return -ENOMEM;
65 } 65 }
66 66
67 memset(conn, 0, sizeof(struct rxrpc_connection));
68 atomic_set(&conn->usage, 1); 67 atomic_set(&conn->usage, 1);
69 68
70 INIT_LIST_HEAD(&conn->link); 69 INIT_LIST_HEAD(&conn->link);
@@ -535,13 +534,12 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
535 return -EINVAL; 534 return -EINVAL;
536 } 535 }
537 536
538 msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags); 537 msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags);
539 if (!msg) { 538 if (!msg) {
540 _leave(" = -ENOMEM"); 539 _leave(" = -ENOMEM");
541 return -ENOMEM; 540 return -ENOMEM;
542 } 541 }
543 542
544 memset(msg, 0, sizeof(*msg));
545 atomic_set(&msg->usage, 1); 543 atomic_set(&msg->usage, 1);
546 544
547 INIT_LIST_HEAD(&msg->link); 545 INIT_LIST_HEAD(&msg->link);
diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c
index ed38f5b17c1b..8a275157a3bb 100644
--- a/net/rxrpc/peer.c
+++ b/net/rxrpc/peer.c
@@ -58,13 +58,12 @@ static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr,
58 _enter("%p,%08x", trans, ntohl(addr)); 58 _enter("%p,%08x", trans, ntohl(addr));
59 59
60 /* allocate and initialise a peer record */ 60 /* allocate and initialise a peer record */
61 peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); 61 peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
62 if (!peer) { 62 if (!peer) {
63 _leave(" = -ENOMEM"); 63 _leave(" = -ENOMEM");
64 return -ENOMEM; 64 return -ENOMEM;
65 } 65 }
66 66
67 memset(peer, 0, sizeof(struct rxrpc_peer));
68 atomic_set(&peer->usage, 1); 67 atomic_set(&peer->usage, 1);
69 68
70 INIT_LIST_HEAD(&peer->link); 69 INIT_LIST_HEAD(&peer->link);
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index dbe6105e83a5..465efc86fccf 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -68,11 +68,10 @@ int rxrpc_create_transport(unsigned short port,
68 68
69 _enter("%hu", port); 69 _enter("%hu", port);
70 70
71 trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); 71 trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
72 if (!trans) 72 if (!trans)
73 return -ENOMEM; 73 return -ENOMEM;
74 74
75 memset(trans, 0, sizeof(struct rxrpc_transport));
76 atomic_set(&trans->usage, 1); 75 atomic_set(&trans->usage, 1);
77 INIT_LIST_HEAD(&trans->services); 76 INIT_LIST_HEAD(&trans->services);
78 INIT_LIST_HEAD(&trans->link); 77 INIT_LIST_HEAD(&trans->link);
@@ -312,13 +311,12 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
312 311
313 _enter(""); 312 _enter("");
314 313
315 msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL); 314 msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
316 if (!msg) { 315 if (!msg) {
317 _leave(" = -ENOMEM"); 316 _leave(" = -ENOMEM");
318 return -ENOMEM; 317 return -ENOMEM;
319 } 318 }
320 319
321 memset(msg, 0, sizeof(*msg));
322 atomic_set(&msg->usage, 1); 320 atomic_set(&msg->usage, 1);
323 list_add_tail(&msg->link,msgq); 321 list_add_tail(&msg->link,msgq);
324 322
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 9affeeedf107..a2587b52e531 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -312,10 +312,9 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
312 } 312 }
313 313
314 *err = -ENOMEM; 314 *err = -ENOMEM;
315 a = kmalloc(sizeof(*a), GFP_KERNEL); 315 a = kzalloc(sizeof(*a), GFP_KERNEL);
316 if (a == NULL) 316 if (a == NULL)
317 goto err_mod; 317 goto err_mod;
318 memset(a, 0, sizeof(*a));
319 318
320 /* backward compatibility for policer */ 319 /* backward compatibility for policer */
321 if (name == NULL) 320 if (name == NULL)
@@ -492,10 +491,9 @@ tcf_action_get_1(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int *err)
492 index = *(int *)RTA_DATA(tb[TCA_ACT_INDEX - 1]); 491 index = *(int *)RTA_DATA(tb[TCA_ACT_INDEX - 1]);
493 492
494 *err = -ENOMEM; 493 *err = -ENOMEM;
495 a = kmalloc(sizeof(struct tc_action), GFP_KERNEL); 494 a = kzalloc(sizeof(struct tc_action), GFP_KERNEL);
496 if (a == NULL) 495 if (a == NULL)
497 return NULL; 496 return NULL;
498 memset(a, 0, sizeof(struct tc_action));
499 497
500 *err = -EINVAL; 498 *err = -EINVAL;
501 a->ops = tc_lookup_action(tb[TCA_ACT_KIND - 1]); 499 a->ops = tc_lookup_action(tb[TCA_ACT_KIND - 1]);
@@ -531,12 +529,11 @@ static struct tc_action *create_a(int i)
531{ 529{
532 struct tc_action *act; 530 struct tc_action *act;
533 531
534 act = kmalloc(sizeof(*act), GFP_KERNEL); 532 act = kzalloc(sizeof(*act), GFP_KERNEL);
535 if (act == NULL) { 533 if (act == NULL) {
536 printk("create_a: failed to alloc!\n"); 534 printk("create_a: failed to alloc!\n");
537 return NULL; 535 return NULL;
538 } 536 }
539 memset(act, 0, sizeof(*act));
540 act->order = i; 537 act->order = i;
541 return act; 538 return act;
542} 539}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 58b3a8652042..f257475e0e0c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -209,10 +209,9 @@ tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
209 s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key); 209 s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key);
210 210
211 /* netlink spinlocks held above us - must use ATOMIC */ 211 /* netlink spinlocks held above us - must use ATOMIC */
212 opt = kmalloc(s, GFP_ATOMIC); 212 opt = kzalloc(s, GFP_ATOMIC);
213 if (opt == NULL) 213 if (opt == NULL)
214 return -ENOBUFS; 214 return -ENOBUFS;
215 memset(opt, 0, s);
216 215
217 memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key)); 216 memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key));
218 opt->index = p->index; 217 opt->index = p->index;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 47e00bd9625e..da905d7b4b40 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -196,10 +196,9 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
196 return ret; 196 return ret;
197 } 197 }
198 198
199 p = kmalloc(sizeof(*p), GFP_KERNEL); 199 p = kzalloc(sizeof(*p), GFP_KERNEL);
200 if (p == NULL) 200 if (p == NULL)
201 return -ENOMEM; 201 return -ENOMEM;
202 memset(p, 0, sizeof(*p));
203 202
204 ret = ACT_P_CREATED; 203 ret = ACT_P_CREATED;
205 p->refcnt = 1; 204 p->refcnt = 1;
@@ -429,11 +428,10 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
429 return p; 428 return p;
430 } 429 }
431 430
432 p = kmalloc(sizeof(*p), GFP_KERNEL); 431 p = kzalloc(sizeof(*p), GFP_KERNEL);
433 if (p == NULL) 432 if (p == NULL)
434 return NULL; 433 return NULL;
435 434
436 memset(p, 0, sizeof(*p));
437 p->refcnt = 1; 435 p->refcnt = 1;
438 spin_lock_init(&p->lock); 436 spin_lock_init(&p->lock);
439 p->stats_lock = &p->lock; 437 p->stats_lock = &p->lock;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 61507f006b11..86cac49a0531 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -178,19 +178,17 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
178 178
179 err = -ENOBUFS; 179 err = -ENOBUFS;
180 if (head == NULL) { 180 if (head == NULL) {
181 head = kmalloc(sizeof(*head), GFP_KERNEL); 181 head = kzalloc(sizeof(*head), GFP_KERNEL);
182 if (head == NULL) 182 if (head == NULL)
183 goto errout; 183 goto errout;
184 184
185 memset(head, 0, sizeof(*head));
186 INIT_LIST_HEAD(&head->flist); 185 INIT_LIST_HEAD(&head->flist);
187 tp->root = head; 186 tp->root = head;
188 } 187 }
189 188
190 f = kmalloc(sizeof(*f), GFP_KERNEL); 189 f = kzalloc(sizeof(*f), GFP_KERNEL);
191 if (f == NULL) 190 if (f == NULL)
192 goto errout; 191 goto errout;
193 memset(f, 0, sizeof(*f));
194 192
195 err = -EINVAL; 193 err = -EINVAL;
196 if (handle) 194 if (handle)
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index d41de91fc4f6..e6973d9b686d 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -267,20 +267,18 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
267 return -EINVAL; 267 return -EINVAL;
268 268
269 if (head == NULL) { 269 if (head == NULL) {
270 head = kmalloc(sizeof(struct fw_head), GFP_KERNEL); 270 head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
271 if (head == NULL) 271 if (head == NULL)
272 return -ENOBUFS; 272 return -ENOBUFS;
273 memset(head, 0, sizeof(*head));
274 273
275 tcf_tree_lock(tp); 274 tcf_tree_lock(tp);
276 tp->root = head; 275 tp->root = head;
277 tcf_tree_unlock(tp); 276 tcf_tree_unlock(tp);
278 } 277 }
279 278
280 f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL); 279 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
281 if (f == NULL) 280 if (f == NULL)
282 return -ENOBUFS; 281 return -ENOBUFS;
283 memset(f, 0, sizeof(*f));
284 282
285 f->id = handle; 283 f->id = handle;
286 284
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index c2e71900f7bd..d3aea730d4c8 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -396,10 +396,9 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
396 h1 = to_hash(nhandle); 396 h1 = to_hash(nhandle);
397 if ((b = head->table[h1]) == NULL) { 397 if ((b = head->table[h1]) == NULL) {
398 err = -ENOBUFS; 398 err = -ENOBUFS;
399 b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL); 399 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
400 if (b == NULL) 400 if (b == NULL)
401 goto errout; 401 goto errout;
402 memset(b, 0, sizeof(*b));
403 402
404 tcf_tree_lock(tp); 403 tcf_tree_lock(tp);
405 head->table[h1] = b; 404 head->table[h1] = b;
@@ -475,20 +474,18 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
475 474
476 err = -ENOBUFS; 475 err = -ENOBUFS;
477 if (head == NULL) { 476 if (head == NULL) {
478 head = kmalloc(sizeof(struct route4_head), GFP_KERNEL); 477 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
479 if (head == NULL) 478 if (head == NULL)
480 goto errout; 479 goto errout;
481 memset(head, 0, sizeof(struct route4_head));
482 480
483 tcf_tree_lock(tp); 481 tcf_tree_lock(tp);
484 tp->root = head; 482 tp->root = head;
485 tcf_tree_unlock(tp); 483 tcf_tree_unlock(tp);
486 } 484 }
487 485
488 f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL); 486 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
489 if (f == NULL) 487 if (f == NULL)
490 goto errout; 488 goto errout;
491 memset(f, 0, sizeof(*f));
492 489
493 err = route4_set_parms(tp, base, f, handle, head, tb, 490 err = route4_set_parms(tp, base, f, handle, head, tb,
494 tca[TCA_RATE-1], 1); 491 tca[TCA_RATE-1], 1);
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index ba8741971629..6e230ecfba05 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -240,9 +240,8 @@ static int rsvp_init(struct tcf_proto *tp)
240{ 240{
241 struct rsvp_head *data; 241 struct rsvp_head *data;
242 242
243 data = kmalloc(sizeof(struct rsvp_head), GFP_KERNEL); 243 data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
244 if (data) { 244 if (data) {
245 memset(data, 0, sizeof(struct rsvp_head));
246 tp->root = data; 245 tp->root = data;
247 return 0; 246 return 0;
248 } 247 }
@@ -446,11 +445,10 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
446 goto errout2; 445 goto errout2;
447 446
448 err = -ENOBUFS; 447 err = -ENOBUFS;
449 f = kmalloc(sizeof(struct rsvp_filter), GFP_KERNEL); 448 f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
450 if (f == NULL) 449 if (f == NULL)
451 goto errout2; 450 goto errout2;
452 451
453 memset(f, 0, sizeof(*f));
454 h2 = 16; 452 h2 = 16;
455 if (tb[TCA_RSVP_SRC-1]) { 453 if (tb[TCA_RSVP_SRC-1]) {
456 err = -EINVAL; 454 err = -EINVAL;
@@ -532,10 +530,9 @@ insert:
532 /* No session found. Create new one. */ 530 /* No session found. Create new one. */
533 531
534 err = -ENOBUFS; 532 err = -ENOBUFS;
535 s = kmalloc(sizeof(struct rsvp_session), GFP_KERNEL); 533 s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
536 if (s == NULL) 534 if (s == NULL)
537 goto errout; 535 goto errout;
538 memset(s, 0, sizeof(*s));
539 memcpy(s->dst, dst, sizeof(s->dst)); 536 memcpy(s->dst, dst, sizeof(s->dst));
540 537
541 if (pinfo) { 538 if (pinfo) {
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 7870e7bb0bac..5af8a59e1503 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -148,11 +148,10 @@ static int tcindex_init(struct tcf_proto *tp)
148 struct tcindex_data *p; 148 struct tcindex_data *p;
149 149
150 DPRINTK("tcindex_init(tp %p)\n",tp); 150 DPRINTK("tcindex_init(tp %p)\n",tp);
151 p = kmalloc(sizeof(struct tcindex_data),GFP_KERNEL); 151 p = kzalloc(sizeof(struct tcindex_data),GFP_KERNEL);
152 if (!p) 152 if (!p)
153 return -ENOMEM; 153 return -ENOMEM;
154 154
155 memset(p, 0, sizeof(*p));
156 p->mask = 0xffff; 155 p->mask = 0xffff;
157 p->hash = DEFAULT_HASH_SIZE; 156 p->hash = DEFAULT_HASH_SIZE;
158 p->fall_through = 1; 157 p->fall_through = 1;
@@ -296,16 +295,14 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
296 err = -ENOMEM; 295 err = -ENOMEM;
297 if (!cp.perfect && !cp.h) { 296 if (!cp.perfect && !cp.h) {
298 if (valid_perfect_hash(&cp)) { 297 if (valid_perfect_hash(&cp)) {
299 cp.perfect = kmalloc(cp.hash * sizeof(*r), GFP_KERNEL); 298 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
300 if (!cp.perfect) 299 if (!cp.perfect)
301 goto errout; 300 goto errout;
302 memset(cp.perfect, 0, cp.hash * sizeof(*r));
303 balloc = 1; 301 balloc = 1;
304 } else { 302 } else {
305 cp.h = kmalloc(cp.hash * sizeof(f), GFP_KERNEL); 303 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
306 if (!cp.h) 304 if (!cp.h)
307 goto errout; 305 goto errout;
308 memset(cp.h, 0, cp.hash * sizeof(f));
309 balloc = 2; 306 balloc = 2;
310 } 307 }
311 } 308 }
@@ -316,10 +313,9 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
316 r = tcindex_lookup(&cp, handle) ? : &new_filter_result; 313 r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
317 314
318 if (r == &new_filter_result) { 315 if (r == &new_filter_result) {
319 f = kmalloc(sizeof(*f), GFP_KERNEL); 316 f = kzalloc(sizeof(*f), GFP_KERNEL);
320 if (!f) 317 if (!f)
321 goto errout_alloc; 318 goto errout_alloc;
322 memset(f, 0, sizeof(*f));
323 } 319 }
324 320
325 if (tb[TCA_TCINDEX_CLASSID-1]) { 321 if (tb[TCA_TCINDEX_CLASSID-1]) {
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d712edcd1bcf..eea366966740 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -307,23 +307,21 @@ static int u32_init(struct tcf_proto *tp)
307 if (tp_c->q == tp->q) 307 if (tp_c->q == tp->q)
308 break; 308 break;
309 309
310 root_ht = kmalloc(sizeof(*root_ht), GFP_KERNEL); 310 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
311 if (root_ht == NULL) 311 if (root_ht == NULL)
312 return -ENOBUFS; 312 return -ENOBUFS;
313 313
314 memset(root_ht, 0, sizeof(*root_ht));
315 root_ht->divisor = 0; 314 root_ht->divisor = 0;
316 root_ht->refcnt++; 315 root_ht->refcnt++;
317 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; 316 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
318 root_ht->prio = tp->prio; 317 root_ht->prio = tp->prio;
319 318
320 if (tp_c == NULL) { 319 if (tp_c == NULL) {
321 tp_c = kmalloc(sizeof(*tp_c), GFP_KERNEL); 320 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
322 if (tp_c == NULL) { 321 if (tp_c == NULL) {
323 kfree(root_ht); 322 kfree(root_ht);
324 return -ENOBUFS; 323 return -ENOBUFS;
325 } 324 }
326 memset(tp_c, 0, sizeof(*tp_c));
327 tp_c->q = tp->q; 325 tp_c->q = tp->q;
328 tp_c->next = u32_list; 326 tp_c->next = u32_list;
329 u32_list = tp_c; 327 u32_list = tp_c;
@@ -571,10 +569,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
571 if (handle == 0) 569 if (handle == 0)
572 return -ENOMEM; 570 return -ENOMEM;
573 } 571 }
574 ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); 572 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
575 if (ht == NULL) 573 if (ht == NULL)
576 return -ENOBUFS; 574 return -ENOBUFS;
577 memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*));
578 ht->tp_c = tp_c; 575 ht->tp_c = tp_c;
579 ht->refcnt = 0; 576 ht->refcnt = 0;
580 ht->divisor = divisor; 577 ht->divisor = divisor;
@@ -617,18 +614,16 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
617 614
618 s = RTA_DATA(tb[TCA_U32_SEL-1]); 615 s = RTA_DATA(tb[TCA_U32_SEL-1]);
619 616
620 n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); 617 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
621 if (n == NULL) 618 if (n == NULL)
622 return -ENOBUFS; 619 return -ENOBUFS;
623 620
624 memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));
625#ifdef CONFIG_CLS_U32_PERF 621#ifdef CONFIG_CLS_U32_PERF
626 n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); 622 n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
627 if (n->pf == NULL) { 623 if (n->pf == NULL) {
628 kfree(n); 624 kfree(n);
629 return -ENOBUFS; 625 return -ENOBUFS;
630 } 626 }
631 memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64));
632#endif 627#endif
633 628
634 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); 629 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 698372954f4d..61e3b740ab1a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -773,10 +773,9 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len,
773 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX) 773 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
774 goto errout; 774 goto errout;
775 775
776 meta = kmalloc(sizeof(*meta), GFP_KERNEL); 776 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
777 if (meta == NULL) 777 if (meta == NULL)
778 goto errout; 778 goto errout;
779 memset(meta, 0, sizeof(*meta));
780 779
781 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left)); 780 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
782 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right)); 781 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 2405a86093a2..0fd0768a17c6 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -321,10 +321,9 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct rtattr *rta,
321 list_len = RTA_PAYLOAD(rt_list); 321 list_len = RTA_PAYLOAD(rt_list);
322 matches_len = tree_hdr->nmatches * sizeof(*em); 322 matches_len = tree_hdr->nmatches * sizeof(*em);
323 323
324 tree->matches = kmalloc(matches_len, GFP_KERNEL); 324 tree->matches = kzalloc(matches_len, GFP_KERNEL);
325 if (tree->matches == NULL) 325 if (tree->matches == NULL)
326 goto errout; 326 goto errout;
327 memset(tree->matches, 0, matches_len);
328 327
329 /* We do not use rtattr_parse_nested here because the maximum 328 /* We do not use rtattr_parse_nested here because the maximum
330 * number of attributes is unknown. This saves us the allocation 329 * number of attributes is unknown. This saves us the allocation
diff --git a/net/sched/estimator.c b/net/sched/estimator.c
index 5d3ae03e22a7..0ebc98e9be2d 100644
--- a/net/sched/estimator.c
+++ b/net/sched/estimator.c
@@ -139,11 +139,10 @@ int qdisc_new_estimator(struct tc_stats *stats, spinlock_t *stats_lock, struct r
139 if (parm->interval < -2 || parm->interval > 3) 139 if (parm->interval < -2 || parm->interval > 3)
140 return -EINVAL; 140 return -EINVAL;
141 141
142 est = kmalloc(sizeof(*est), GFP_KERNEL); 142 est = kzalloc(sizeof(*est), GFP_KERNEL);
143 if (est == NULL) 143 if (est == NULL)
144 return -ENOBUFS; 144 return -ENOBUFS;
145 145
146 memset(est, 0, sizeof(*est));
147 est->interval = parm->interval + 2; 146 est->interval = parm->interval + 2;
148 est->stats = stats; 147 est->stats = stats;
149 est->stats_lock = stats_lock; 148 est->stats_lock = stats_lock;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 80b7f6a8d008..bac881bfe362 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1926,10 +1926,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1926 } 1926 }
1927 1927
1928 err = -ENOBUFS; 1928 err = -ENOBUFS;
1929 cl = kmalloc(sizeof(*cl), GFP_KERNEL); 1929 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1930 if (cl == NULL) 1930 if (cl == NULL)
1931 goto failure; 1931 goto failure;
1932 memset(cl, 0, sizeof(*cl));
1933 cl->R_tab = rtab; 1932 cl->R_tab = rtab;
1934 rtab = NULL; 1933 rtab = NULL;
1935 cl->refcnt = 1; 1934 cl->refcnt = 1;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d735f51686a1..0834c2ee9174 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -432,10 +432,9 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
432 size = QDISC_ALIGN(sizeof(*sch)); 432 size = QDISC_ALIGN(sizeof(*sch));
433 size += ops->priv_size + (QDISC_ALIGNTO - 1); 433 size += ops->priv_size + (QDISC_ALIGNTO - 1);
434 434
435 p = kmalloc(size, GFP_KERNEL); 435 p = kzalloc(size, GFP_KERNEL);
436 if (!p) 436 if (!p)
437 goto errout; 437 goto errout;
438 memset(p, 0, size);
439 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 438 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
440 sch->padded = (char *) sch - (char *) p; 439 sch->padded = (char *) sch - (char *) p;
441 440
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 0cafdd5feb1b..18e81a8ffb01 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -406,10 +406,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
406 struct gred_sched_data *q; 406 struct gred_sched_data *q;
407 407
408 if (table->tab[dp] == NULL) { 408 if (table->tab[dp] == NULL) {
409 table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL); 409 table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
410 if (table->tab[dp] == NULL) 410 if (table->tab[dp] == NULL)
411 return -ENOMEM; 411 return -ENOMEM;
412 memset(table->tab[dp], 0, sizeof(*q));
413 } 412 }
414 413
415 q = table->tab[dp]; 414 q = table->tab[dp];
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6b1b4a981e88..6a6735a2ed35 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1123,10 +1123,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1123 if (rsc == NULL && fsc == NULL) 1123 if (rsc == NULL && fsc == NULL)
1124 return -EINVAL; 1124 return -EINVAL;
1125 1125
1126 cl = kmalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1126 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1127 if (cl == NULL) 1127 if (cl == NULL)
1128 return -ENOBUFS; 1128 return -ENOBUFS;
1129 memset(cl, 0, sizeof(struct hfsc_class));
1130 1129
1131 if (rsc != NULL) 1130 if (rsc != NULL)
1132 hfsc_change_rsc(cl, rsc, 0); 1131 hfsc_change_rsc(cl, rsc, 0);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index cc5f339e6f91..880a3394a51f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1559,10 +1559,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1559 goto failure; 1559 goto failure;
1560 } 1560 }
1561 err = -ENOBUFS; 1561 err = -ENOBUFS;
1562 if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1562 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1563 goto failure; 1563 goto failure;
1564 1564
1565 memset(cl, 0, sizeof(*cl));
1566 cl->refcnt = 1; 1565 cl->refcnt = 1;
1567 INIT_LIST_HEAD(&cl->sibling); 1566 INIT_LIST_HEAD(&cl->sibling);
1568 INIT_LIST_HEAD(&cl->hlist); 1567 INIT_LIST_HEAD(&cl->hlist);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index c5bd8064e6d8..a08ec4c7c55d 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -148,7 +148,8 @@ static long tabledist(unsigned long mu, long sigma,
148static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) 148static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
149{ 149{
150 struct netem_sched_data *q = qdisc_priv(sch); 150 struct netem_sched_data *q = qdisc_priv(sch);
151 struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; 151 /* We don't fill cb now as skb_unshare() may invalidate it */
152 struct netem_skb_cb *cb;
152 struct sk_buff *skb2; 153 struct sk_buff *skb2;
153 int ret; 154 int ret;
154 int count = 1; 155 int count = 1;
@@ -200,6 +201,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
200 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 201 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
201 } 202 }
202 203
204 cb = (struct netem_skb_cb *)skb->cb;
203 if (q->gap == 0 /* not doing reordering */ 205 if (q->gap == 0 /* not doing reordering */
204 || q->counter < q->gap /* inside last reordering gap */ 206 || q->counter < q->gap /* inside last reordering gap */
205 || q->reorder < get_crandom(&q->reorder_cor)) { 207 || q->reorder < get_crandom(&q->reorder_cor)) {
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9d05e13e92f6..27329ce9c311 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -441,7 +441,8 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
441 /* If the primary path is changing, assume that the 441 /* If the primary path is changing, assume that the
442 * user wants to use this new path. 442 * user wants to use this new path.
443 */ 443 */
444 if (transport->state != SCTP_INACTIVE) 444 if ((transport->state == SCTP_ACTIVE) ||
445 (transport->state == SCTP_UNKNOWN))
445 asoc->peer.active_path = transport; 446 asoc->peer.active_path = transport;
446 447
447 /* 448 /*
@@ -532,11 +533,11 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
532 port = addr->v4.sin_port; 533 port = addr->v4.sin_port;
533 534
534 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", 535 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
535 " port: %d state:%s\n", 536 " port: %d state:%d\n",
536 asoc, 537 asoc,
537 addr, 538 addr,
538 addr->v4.sin_port, 539 addr->v4.sin_port,
539 peer_state == SCTP_UNKNOWN?"UNKNOWN":"ACTIVE"); 540 peer_state);
540 541
541 /* Set the port if it has not been set yet. */ 542 /* Set the port if it has not been set yet. */
542 if (0 == asoc->peer.port) 543 if (0 == asoc->peer.port)
@@ -545,9 +546,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
545 /* Check to see if this is a duplicate. */ 546 /* Check to see if this is a duplicate. */
546 peer = sctp_assoc_lookup_paddr(asoc, addr); 547 peer = sctp_assoc_lookup_paddr(asoc, addr);
547 if (peer) { 548 if (peer) {
548 if (peer_state == SCTP_ACTIVE && 549 if (peer->state == SCTP_UNKNOWN) {
549 peer->state == SCTP_UNKNOWN) 550 if (peer_state == SCTP_ACTIVE)
550 peer->state = SCTP_ACTIVE; 551 peer->state = SCTP_ACTIVE;
552 if (peer_state == SCTP_UNCONFIRMED)
553 peer->state = SCTP_UNCONFIRMED;
554 }
551 return peer; 555 return peer;
552 } 556 }
553 557
@@ -739,7 +743,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
739 list_for_each(pos, &asoc->peer.transport_addr_list) { 743 list_for_each(pos, &asoc->peer.transport_addr_list) {
740 t = list_entry(pos, struct sctp_transport, transports); 744 t = list_entry(pos, struct sctp_transport, transports);
741 745
742 if (t->state == SCTP_INACTIVE) 746 if ((t->state == SCTP_INACTIVE) ||
747 (t->state == SCTP_UNCONFIRMED))
743 continue; 748 continue;
744 if (!first || t->last_time_heard > first->last_time_heard) { 749 if (!first || t->last_time_heard > first->last_time_heard) {
745 second = first; 750 second = first;
@@ -759,7 +764,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
759 * [If the primary is active but not most recent, bump the most 764 * [If the primary is active but not most recent, bump the most
760 * recently used transport.] 765 * recently used transport.]
761 */ 766 */
762 if (asoc->peer.primary_path->state != SCTP_INACTIVE && 767 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
768 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
763 first != asoc->peer.primary_path) { 769 first != asoc->peer.primary_path) {
764 second = first; 770 second = first;
765 first = asoc->peer.primary_path; 771 first = asoc->peer.primary_path;
@@ -1054,7 +1060,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
1054 transports); 1060 transports);
1055 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) 1061 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1056 sctp_assoc_add_peer(asoc, &trans->ipaddr, 1062 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1057 GFP_ATOMIC, SCTP_ACTIVE); 1063 GFP_ATOMIC, trans->state);
1058 } 1064 }
1059 1065
1060 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1066 asoc->ctsn_ack_point = asoc->next_tsn - 1;
@@ -1094,7 +1100,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1094 1100
1095 /* Try to find an active transport. */ 1101 /* Try to find an active transport. */
1096 1102
1097 if (t->state != SCTP_INACTIVE) { 1103 if ((t->state == SCTP_ACTIVE) ||
1104 (t->state == SCTP_UNKNOWN)) {
1098 break; 1105 break;
1099 } else { 1106 } else {
1100 /* Keep track of the next transport in case 1107 /* Keep track of the next transport in case
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 2b962627f631..2b9c12a170e5 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -146,7 +146,7 @@ void sctp_bind_addr_free(struct sctp_bind_addr *bp)
146 146
147/* Add an address to the bind address list in the SCTP_bind_addr structure. */ 147/* Add an address to the bind address list in the SCTP_bind_addr structure. */
148int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, 148int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
149 gfp_t gfp) 149 __u8 use_as_src, gfp_t gfp)
150{ 150{
151 struct sctp_sockaddr_entry *addr; 151 struct sctp_sockaddr_entry *addr;
152 152
@@ -163,6 +163,8 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
163 if (!addr->a.v4.sin_port) 163 if (!addr->a.v4.sin_port)
164 addr->a.v4.sin_port = bp->port; 164 addr->a.v4.sin_port = bp->port;
165 165
166 addr->use_as_src = use_as_src;
167
166 INIT_LIST_HEAD(&addr->list); 168 INIT_LIST_HEAD(&addr->list);
167 list_add_tail(&addr->list, &bp->address_list); 169 list_add_tail(&addr->list, &bp->address_list);
168 SCTP_DBG_OBJCNT_INC(addr); 170 SCTP_DBG_OBJCNT_INC(addr);
@@ -274,7 +276,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
274 } 276 }
275 277
276 af->from_addr_param(&addr, rawaddr, port, 0); 278 af->from_addr_param(&addr, rawaddr, port, 0);
277 retval = sctp_add_bind_addr(bp, &addr, gfp); 279 retval = sctp_add_bind_addr(bp, &addr, 1, gfp);
278 if (retval) { 280 if (retval) {
279 /* Can't finish building the list, clean up. */ 281 /* Can't finish building the list, clean up. */
280 sctp_bind_addr_clean(bp); 282 sctp_bind_addr_clean(bp);
@@ -367,7 +369,7 @@ static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
367 (((AF_INET6 == addr->sa.sa_family) && 369 (((AF_INET6 == addr->sa.sa_family) &&
368 (flags & SCTP_ADDR6_ALLOWED) && 370 (flags & SCTP_ADDR6_ALLOWED) &&
369 (flags & SCTP_ADDR6_PEERSUPP)))) 371 (flags & SCTP_ADDR6_PEERSUPP))))
370 error = sctp_add_bind_addr(dest, addr, gfp); 372 error = sctp_add_bind_addr(dest, addr, 1, gfp);
371 } 373 }
372 374
373 return error; 375 return error;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 67bd53070ee0..ffda1d680529 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -158,6 +158,12 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
158void sctp_endpoint_free(struct sctp_endpoint *ep) 158void sctp_endpoint_free(struct sctp_endpoint *ep)
159{ 159{
160 ep->base.dead = 1; 160 ep->base.dead = 1;
161
162 ep->base.sk->sk_state = SCTP_SS_CLOSED;
163
164 /* Unlink this endpoint, so we can't find it again! */
165 sctp_unhash_endpoint(ep);
166
161 sctp_endpoint_put(ep); 167 sctp_endpoint_put(ep);
162} 168}
163 169
@@ -166,11 +172,6 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
166{ 172{
167 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 173 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
168 174
169 ep->base.sk->sk_state = SCTP_SS_CLOSED;
170
171 /* Unlink this endpoint, so we can't find it again! */
172 sctp_unhash_endpoint(ep);
173
174 /* Free up the HMAC transform. */ 175 /* Free up the HMAC transform. */
175 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); 176 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
176 177
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 8ef08070c8b6..99c0cefc04e0 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -290,7 +290,8 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
290 sctp_read_lock(addr_lock); 290 sctp_read_lock(addr_lock);
291 list_for_each(pos, &bp->address_list) { 291 list_for_each(pos, &bp->address_list) {
292 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 292 laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
293 if ((laddr->a.sa.sa_family == AF_INET6) && 293 if ((laddr->use_as_src) &&
294 (laddr->a.sa.sa_family == AF_INET6) &&
294 (scope <= sctp_scope(&laddr->a))) { 295 (scope <= sctp_scope(&laddr->a))) {
295 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 296 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
296 if (!baddr || (matchlen < bmatchlen)) { 297 if (!baddr || (matchlen < bmatchlen)) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index e5faa351aaad..30b710c54e64 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -691,7 +691,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
691 691
692 if (!new_transport) { 692 if (!new_transport) {
693 new_transport = asoc->peer.active_path; 693 new_transport = asoc->peer.active_path;
694 } else if (new_transport->state == SCTP_INACTIVE) { 694 } else if ((new_transport->state == SCTP_INACTIVE) ||
695 (new_transport->state == SCTP_UNCONFIRMED)) {
695 /* If the chunk is Heartbeat or Heartbeat Ack, 696 /* If the chunk is Heartbeat or Heartbeat Ack,
696 * send it to chunk->transport, even if it's 697 * send it to chunk->transport, even if it's
697 * inactive. 698 * inactive.
@@ -848,7 +849,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
848 */ 849 */
849 new_transport = chunk->transport; 850 new_transport = chunk->transport;
850 if (!new_transport || 851 if (!new_transport ||
851 new_transport->state == SCTP_INACTIVE) 852 ((new_transport->state == SCTP_INACTIVE) ||
853 (new_transport->state == SCTP_UNCONFIRMED)))
852 new_transport = asoc->peer.active_path; 854 new_transport = asoc->peer.active_path;
853 855
854 /* Change packets if necessary. */ 856 /* Change packets if necessary. */
@@ -1464,7 +1466,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1464 /* Mark the destination transport address as 1466 /* Mark the destination transport address as
1465 * active if it is not so marked. 1467 * active if it is not so marked.
1466 */ 1468 */
1467 if (transport->state == SCTP_INACTIVE) { 1469 if ((transport->state == SCTP_INACTIVE) ||
1470 (transport->state == SCTP_UNCONFIRMED)) {
1468 sctp_assoc_control_transport( 1471 sctp_assoc_control_transport(
1469 transport->asoc, 1472 transport->asoc,
1470 transport, 1473 transport,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 816c033d7886..1ab03a27a76e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -240,7 +240,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
240 (((AF_INET6 == addr->a.sa.sa_family) && 240 (((AF_INET6 == addr->a.sa.sa_family) &&
241 (copy_flags & SCTP_ADDR6_ALLOWED) && 241 (copy_flags & SCTP_ADDR6_ALLOWED) &&
242 (copy_flags & SCTP_ADDR6_PEERSUPP)))) { 242 (copy_flags & SCTP_ADDR6_PEERSUPP)))) {
243 error = sctp_add_bind_addr(bp, &addr->a, 243 error = sctp_add_bind_addr(bp, &addr->a, 1,
244 GFP_ATOMIC); 244 GFP_ATOMIC);
245 if (error) 245 if (error)
246 goto end_copy; 246 goto end_copy;
@@ -486,6 +486,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
486 list_for_each(pos, &bp->address_list) { 486 list_for_each(pos, &bp->address_list) {
487 laddr = list_entry(pos, struct sctp_sockaddr_entry, 487 laddr = list_entry(pos, struct sctp_sockaddr_entry,
488 list); 488 list);
489 if (!laddr->use_as_src)
490 continue;
489 sctp_v4_dst_saddr(&dst_saddr, dst, bp->port); 491 sctp_v4_dst_saddr(&dst_saddr, dst, bp->port);
490 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 492 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
491 goto out_unlock; 493 goto out_unlock;
@@ -506,7 +508,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
506 list_for_each(pos, &bp->address_list) { 508 list_for_each(pos, &bp->address_list) {
507 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 509 laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
508 510
509 if (AF_INET == laddr->a.sa.sa_family) { 511 if ((laddr->use_as_src) &&
512 (AF_INET == laddr->a.sa.sa_family)) {
510 fl.fl4_src = laddr->a.v4.sin_addr.s_addr; 513 fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
511 if (!ip_route_output_key(&rt, &fl)) { 514 if (!ip_route_output_key(&rt, &fl)) {
512 dst = &rt->u.dst; 515 dst = &rt->u.dst;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2a8773691695..4f11f5858209 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1493,7 +1493,7 @@ no_hmac:
1493 1493
1494 /* Also, add the destination address. */ 1494 /* Also, add the destination address. */
1495 if (list_empty(&retval->base.bind_addr.address_list)) { 1495 if (list_empty(&retval->base.bind_addr.address_list)) {
1496 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1496 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,
1497 GFP_ATOMIC); 1497 GFP_ATOMIC);
1498 } 1498 }
1499 1499
@@ -2017,7 +2017,7 @@ static int sctp_process_param(struct sctp_association *asoc,
2017 af->from_addr_param(&addr, param.addr, asoc->peer.port, 0); 2017 af->from_addr_param(&addr, param.addr, asoc->peer.port, 0);
2018 scope = sctp_scope(peer_addr); 2018 scope = sctp_scope(peer_addr);
2019 if (sctp_in_scope(&addr, scope)) 2019 if (sctp_in_scope(&addr, scope))
2020 if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_ACTIVE)) 2020 if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
2021 return 0; 2021 return 0;
2022 break; 2022 break;
2023 2023
@@ -2418,7 +2418,7 @@ static __u16 sctp_process_asconf_param(struct sctp_association *asoc,
2418 * Due to Resource Shortage'. 2418 * Due to Resource Shortage'.
2419 */ 2419 */
2420 2420
2421 peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_ACTIVE); 2421 peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
2422 if (!peer) 2422 if (!peer)
2423 return SCTP_ERROR_RSRC_LOW; 2423 return SCTP_ERROR_RSRC_LOW;
2424 2424
@@ -2565,6 +2565,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2565 union sctp_addr_param *addr_param; 2565 union sctp_addr_param *addr_param;
2566 struct list_head *pos; 2566 struct list_head *pos;
2567 struct sctp_transport *transport; 2567 struct sctp_transport *transport;
2568 struct sctp_sockaddr_entry *saddr;
2568 int retval = 0; 2569 int retval = 0;
2569 2570
2570 addr_param = (union sctp_addr_param *) 2571 addr_param = (union sctp_addr_param *)
@@ -2578,7 +2579,11 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2578 case SCTP_PARAM_ADD_IP: 2579 case SCTP_PARAM_ADD_IP:
2579 sctp_local_bh_disable(); 2580 sctp_local_bh_disable();
2580 sctp_write_lock(&asoc->base.addr_lock); 2581 sctp_write_lock(&asoc->base.addr_lock);
2581 retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC); 2582 list_for_each(pos, &bp->address_list) {
2583 saddr = list_entry(pos, struct sctp_sockaddr_entry, list);
2584 if (sctp_cmp_addr_exact(&saddr->a, &addr))
2585 saddr->use_as_src = 1;
2586 }
2582 sctp_write_unlock(&asoc->base.addr_lock); 2587 sctp_write_unlock(&asoc->base.addr_lock);
2583 sctp_local_bh_enable(); 2588 sctp_local_bh_enable();
2584 break; 2589 break;
@@ -2591,6 +2596,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2591 list_for_each(pos, &asoc->peer.transport_addr_list) { 2596 list_for_each(pos, &asoc->peer.transport_addr_list) {
2592 transport = list_entry(pos, struct sctp_transport, 2597 transport = list_entry(pos, struct sctp_transport,
2593 transports); 2598 transports);
2599 dst_release(transport->dst);
2594 sctp_transport_route(transport, NULL, 2600 sctp_transport_route(transport, NULL,
2595 sctp_sk(asoc->base.sk)); 2601 sctp_sk(asoc->base.sk));
2596 } 2602 }
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index c5beb2ad7ef7..9c10bdec1afe 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -430,7 +430,11 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
430 /* The check for association's overall error counter exceeding the 430 /* The check for association's overall error counter exceeding the
431 * threshold is done in the state function. 431 * threshold is done in the state function.
432 */ 432 */
433 asoc->overall_error_count++; 433 /* When probing UNCONFIRMED addresses, the association overall
434 * error count is NOT incremented
435 */
436 if (transport->state != SCTP_UNCONFIRMED)
437 asoc->overall_error_count++;
434 438
435 if (transport->state != SCTP_INACTIVE && 439 if (transport->state != SCTP_INACTIVE &&
436 (transport->error_count++ >= transport->pathmaxrxt)) { 440 (transport->error_count++ >= transport->pathmaxrxt)) {
@@ -610,7 +614,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
610 /* Mark the destination transport address as active if it is not so 614 /* Mark the destination transport address as active if it is not so
611 * marked. 615 * marked.
612 */ 616 */
613 if (t->state == SCTP_INACTIVE) 617 if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
614 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, 618 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
615 SCTP_HEARTBEAT_SUCCESS); 619 SCTP_HEARTBEAT_SUCCESS);
616 620
@@ -620,6 +624,10 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
620 */ 624 */
621 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; 625 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
622 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 626 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
627
628 /* Update the heartbeat timer. */
629 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
630 sctp_transport_hold(t);
623} 631}
624 632
625/* Helper function to do a transport reset at the expiry of the hearbeat 633/* Helper function to do a transport reset at the expiry of the hearbeat
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 9e58144f4851..ead3f1b0ea3d 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -846,6 +846,7 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
846 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); 846 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
847 hbinfo.daddr = transport->ipaddr; 847 hbinfo.daddr = transport->ipaddr;
848 hbinfo.sent_at = jiffies; 848 hbinfo.sent_at = jiffies;
849 hbinfo.hb_nonce = transport->hb_nonce;
849 850
850 /* Send a heartbeat to our peer. */ 851 /* Send a heartbeat to our peer. */
851 paylen = sizeof(sctp_sender_hb_info_t); 852 paylen = sizeof(sctp_sender_hb_info_t);
@@ -1048,6 +1049,10 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1048 return SCTP_DISPOSITION_DISCARD; 1049 return SCTP_DISPOSITION_DISCARD;
1049 } 1050 }
1050 1051
1052 /* Validate the 64-bit random nonce. */
1053 if (hbinfo->hb_nonce != link->hb_nonce)
1054 return SCTP_DISPOSITION_DISCARD;
1055
1051 max_interval = link->hbinterval + link->rto; 1056 max_interval = link->hbinterval + link->rto;
1052 1057
1053 /* Check if the timestamp looks valid. */ 1058 /* Check if the timestamp looks valid. */
@@ -5278,7 +5283,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5278 datalen -= sizeof(sctp_data_chunk_t); 5283 datalen -= sizeof(sctp_data_chunk_t);
5279 5284
5280 deliver = SCTP_CMD_CHUNK_ULP; 5285 deliver = SCTP_CMD_CHUNK_ULP;
5281 chunk->data_accepted = 1;
5282 5286
5283 /* Think about partial delivery. */ 5287 /* Think about partial delivery. */
5284 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { 5288 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
@@ -5357,6 +5361,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5357 if (SCTP_CMD_CHUNK_ULP == deliver) 5361 if (SCTP_CMD_CHUNK_ULP == deliver)
5358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 5362 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
5359 5363
5364 chunk->data_accepted = 1;
5365
5360 /* Note: Some chunks may get overcounted (if we drop) or overcounted 5366 /* Note: Some chunks may get overcounted (if we drop) or overcounted
5361 * if we renege and the chunk arrives again. 5367 * if we renege and the chunk arrives again.
5362 */ 5368 */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0a2c71d0d8aa..54722e622e6d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -369,7 +369,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
369 369
370 /* Use GFP_ATOMIC since BHs are disabled. */ 370 /* Use GFP_ATOMIC since BHs are disabled. */
371 addr->v4.sin_port = ntohs(addr->v4.sin_port); 371 addr->v4.sin_port = ntohs(addr->v4.sin_port);
372 ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC); 372 ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
373 addr->v4.sin_port = htons(addr->v4.sin_port); 373 addr->v4.sin_port = htons(addr->v4.sin_port);
374 sctp_write_unlock(&ep->base.addr_lock); 374 sctp_write_unlock(&ep->base.addr_lock);
375 sctp_local_bh_enable(); 375 sctp_local_bh_enable();
@@ -491,6 +491,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
491 struct sctp_chunk *chunk; 491 struct sctp_chunk *chunk;
492 struct sctp_sockaddr_entry *laddr; 492 struct sctp_sockaddr_entry *laddr;
493 union sctp_addr *addr; 493 union sctp_addr *addr;
494 union sctp_addr saveaddr;
494 void *addr_buf; 495 void *addr_buf;
495 struct sctp_af *af; 496 struct sctp_af *af;
496 struct list_head *pos; 497 struct list_head *pos;
@@ -558,14 +559,26 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
558 } 559 }
559 560
560 retval = sctp_send_asconf(asoc, chunk); 561 retval = sctp_send_asconf(asoc, chunk);
562 if (retval)
563 goto out;
561 564
562 /* FIXME: After sending the add address ASCONF chunk, we 565 /* Add the new addresses to the bind address list with
563 * cannot append the address to the association's binding 566 * use_as_src set to 0.
564 * address list, because the new address may be used as the
565 * source of a message sent to the peer before the ASCONF
566 * chunk is received by the peer. So we should wait until
567 * ASCONF_ACK is received.
568 */ 567 */
568 sctp_local_bh_disable();
569 sctp_write_lock(&asoc->base.addr_lock);
570 addr_buf = addrs;
571 for (i = 0; i < addrcnt; i++) {
572 addr = (union sctp_addr *)addr_buf;
573 af = sctp_get_af_specific(addr->v4.sin_family);
574 memcpy(&saveaddr, addr, af->sockaddr_len);
575 saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
576 retval = sctp_add_bind_addr(bp, &saveaddr, 0,
577 GFP_ATOMIC);
578 addr_buf += af->sockaddr_len;
579 }
580 sctp_write_unlock(&asoc->base.addr_lock);
581 sctp_local_bh_enable();
569 } 582 }
570 583
571out: 584out:
@@ -676,12 +689,15 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
676 struct sctp_sock *sp; 689 struct sctp_sock *sp;
677 struct sctp_endpoint *ep; 690 struct sctp_endpoint *ep;
678 struct sctp_association *asoc; 691 struct sctp_association *asoc;
692 struct sctp_transport *transport;
679 struct sctp_bind_addr *bp; 693 struct sctp_bind_addr *bp;
680 struct sctp_chunk *chunk; 694 struct sctp_chunk *chunk;
681 union sctp_addr *laddr; 695 union sctp_addr *laddr;
696 union sctp_addr saveaddr;
682 void *addr_buf; 697 void *addr_buf;
683 struct sctp_af *af; 698 struct sctp_af *af;
684 struct list_head *pos; 699 struct list_head *pos, *pos1;
700 struct sctp_sockaddr_entry *saddr;
685 int i; 701 int i;
686 int retval = 0; 702 int retval = 0;
687 703
@@ -748,14 +764,42 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
748 goto out; 764 goto out;
749 } 765 }
750 766
751 retval = sctp_send_asconf(asoc, chunk); 767 /* Reset use_as_src flag for the addresses in the bind address
768 * list that are to be deleted.
769 */
770 sctp_local_bh_disable();
771 sctp_write_lock(&asoc->base.addr_lock);
772 addr_buf = addrs;
773 for (i = 0; i < addrcnt; i++) {
774 laddr = (union sctp_addr *)addr_buf;
775 af = sctp_get_af_specific(laddr->v4.sin_family);
776 memcpy(&saveaddr, laddr, af->sockaddr_len);
777 saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
778 list_for_each(pos1, &bp->address_list) {
779 saddr = list_entry(pos1,
780 struct sctp_sockaddr_entry,
781 list);
782 if (sctp_cmp_addr_exact(&saddr->a, &saveaddr))
783 saddr->use_as_src = 0;
784 }
785 addr_buf += af->sockaddr_len;
786 }
787 sctp_write_unlock(&asoc->base.addr_lock);
788 sctp_local_bh_enable();
752 789
753 /* FIXME: After sending the delete address ASCONF chunk, we 790 /* Update the route and saddr entries for all the transports
754 * cannot remove the addresses from the association's bind 791 * as some of the addresses in the bind address list are
755 * address list, because there maybe some packet send to 792 * about to be deleted and cannot be used as source addresses.
756 * the delete addresses, so we should wait until ASCONF_ACK
757 * packet is received.
758 */ 793 */
794 list_for_each(pos1, &asoc->peer.transport_addr_list) {
795 transport = list_entry(pos1, struct sctp_transport,
796 transports);
797 dst_release(transport->dst);
798 sctp_transport_route(transport, NULL,
799 sctp_sk(asoc->base.sk));
800 }
801
802 retval = sctp_send_asconf(asoc, chunk);
759 } 803 }
760out: 804out:
761 return retval; 805 return retval;
@@ -4977,7 +5021,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
4977/* Caller must hold hashbucket lock for this tb with local BH disabled */ 5021/* Caller must hold hashbucket lock for this tb with local BH disabled */
4978static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 5022static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
4979{ 5023{
4980 if (hlist_empty(&pp->owner)) { 5024 if (pp && hlist_empty(&pp->owner)) {
4981 if (pp->next) 5025 if (pp->next)
4982 pp->next->pprev = pp->pprev; 5026 pp->next->pprev = pp->pprev;
4983 *(pp->pprev) = pp->next; 5027 *(pp->pprev) = pp->next;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 160f62ad1cc5..2763aa93de1a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -49,6 +49,7 @@
49 */ 49 */
50 50
51#include <linux/types.h> 51#include <linux/types.h>
52#include <linux/random.h>
52#include <net/sctp/sctp.h> 53#include <net/sctp/sctp.h>
53#include <net/sctp/sm.h> 54#include <net/sctp/sm.h>
54 55
@@ -85,7 +86,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
85 86
86 peer->init_sent_count = 0; 87 peer->init_sent_count = 0;
87 88
88 peer->state = SCTP_ACTIVE;
89 peer->param_flags = SPP_HB_DISABLE | 89 peer->param_flags = SPP_HB_DISABLE |
90 SPP_PMTUD_ENABLE | 90 SPP_PMTUD_ENABLE |
91 SPP_SACKDELAY_ENABLE; 91 SPP_SACKDELAY_ENABLE;
@@ -109,6 +109,9 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
109 peer->hb_timer.function = sctp_generate_heartbeat_event; 109 peer->hb_timer.function = sctp_generate_heartbeat_event;
110 peer->hb_timer.data = (unsigned long)peer; 110 peer->hb_timer.data = (unsigned long)peer;
111 111
112 /* Initialize the 64-bit random nonce sent with heartbeat. */
113 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
114
112 atomic_set(&peer->refcnt, 1); 115 atomic_set(&peer->refcnt, 1);
113 peer->dead = 0; 116 peer->dead = 0;
114 117
@@ -517,7 +520,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
517unsigned long sctp_transport_timeout(struct sctp_transport *t) 520unsigned long sctp_transport_timeout(struct sctp_transport *t)
518{ 521{
519 unsigned long timeout; 522 unsigned long timeout;
520 timeout = t->hbinterval + t->rto + sctp_jitter(t->rto); 523 timeout = t->rto + sctp_jitter(t->rto);
524 if (t->state != SCTP_UNCONFIRMED)
525 timeout += t->hbinterval;
521 timeout += jiffies; 526 timeout += jiffies;
522 return timeout; 527 return timeout;
523} 528}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 519ebc17c028..4a9aa9393b97 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -225,9 +225,8 @@ gss_alloc_context(void)
225{ 225{
226 struct gss_cl_ctx *ctx; 226 struct gss_cl_ctx *ctx;
227 227
228 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 228 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
229 if (ctx != NULL) { 229 if (ctx != NULL) {
230 memset(ctx, 0, sizeof(*ctx));
231 ctx->gc_proc = RPC_GSS_PROC_DATA; 230 ctx->gc_proc = RPC_GSS_PROC_DATA;
232 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 231 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
233 spin_lock_init(&ctx->gc_seq_lock); 232 spin_lock_init(&ctx->gc_seq_lock);
@@ -391,9 +390,8 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
391{ 390{
392 struct gss_upcall_msg *gss_msg; 391 struct gss_upcall_msg *gss_msg;
393 392
394 gss_msg = kmalloc(sizeof(*gss_msg), GFP_KERNEL); 393 gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
395 if (gss_msg != NULL) { 394 if (gss_msg != NULL) {
396 memset(gss_msg, 0, sizeof(*gss_msg));
397 INIT_LIST_HEAD(&gss_msg->list); 395 INIT_LIST_HEAD(&gss_msg->list);
398 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 396 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
399 init_waitqueue_head(&gss_msg->waitqueue); 397 init_waitqueue_head(&gss_msg->waitqueue);
@@ -776,10 +774,9 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
776 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", 774 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n",
777 acred->uid, auth->au_flavor); 775 acred->uid, auth->au_flavor);
778 776
779 if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) 777 if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
780 goto out_err; 778 goto out_err;
781 779
782 memset(cred, 0, sizeof(*cred));
783 atomic_set(&cred->gc_count, 1); 780 atomic_set(&cred->gc_count, 1);
784 cred->gc_uid = acred->uid; 781 cred->gc_uid = acred->uid;
785 /* 782 /*
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index b8714a87b34c..70e1e53a632b 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -129,9 +129,8 @@ gss_import_sec_context_kerberos(const void *p,
129 const void *end = (const void *)((const char *)p + len); 129 const void *end = (const void *)((const char *)p + len);
130 struct krb5_ctx *ctx; 130 struct krb5_ctx *ctx;
131 131
132 if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 132 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
133 goto out_err; 133 goto out_err;
134 memset(ctx, 0, sizeof(*ctx));
135 134
136 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 135 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
137 if (IS_ERR(p)) 136 if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index d88468d21c37..3db745379d06 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -237,9 +237,8 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
237 struct gss_api_mech *mech, 237 struct gss_api_mech *mech,
238 struct gss_ctx **ctx_id) 238 struct gss_ctx **ctx_id)
239{ 239{
240 if (!(*ctx_id = kmalloc(sizeof(**ctx_id), GFP_KERNEL))) 240 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
241 return GSS_S_FAILURE; 241 return GSS_S_FAILURE;
242 memset(*ctx_id, 0, sizeof(**ctx_id));
243 (*ctx_id)->mech_type = gss_mech_get(mech); 242 (*ctx_id)->mech_type = gss_mech_get(mech);
244 243
245 return mech->gm_ops 244 return mech->gm_ops
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 3d0432aa45c1..88dcb52d171b 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -152,9 +152,8 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
152 const void *end = (const void *)((const char *)p + len); 152 const void *end = (const void *)((const char *)p + len);
153 struct spkm3_ctx *ctx; 153 struct spkm3_ctx *ctx;
154 154
155 if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 155 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
156 goto out_err; 156 goto out_err;
157 memset(ctx, 0, sizeof(*ctx));
158 157
159 p = simple_get_netobj(p, end, &ctx->ctx_id); 158 p = simple_get_netobj(p, end, &ctx->ctx_id);
160 if (IS_ERR(p)) 159 if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index af0d7ce74686..854a983ccf26 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -90,10 +90,9 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
90int 90int
91decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) 91decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
92{ 92{
93 if (!(out->data = kmalloc(explen,GFP_KERNEL))) 93 if (!(out->data = kzalloc(explen,GFP_KERNEL)))
94 return 0; 94 return 0;
95 out->len = explen; 95 out->len = explen;
96 memset(out->data, 0, explen);
97 memcpy(out->data, in, enclen); 96 memcpy(out->data, in, enclen);
98 return 1; 97 return 1;
99} 98}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index aa8965e9d307..4ba271f892c8 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -125,10 +125,9 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
125 goto out_err; 125 goto out_err;
126 126
127 err = -ENOMEM; 127 err = -ENOMEM;
128 clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); 128 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
129 if (!clnt) 129 if (!clnt)
130 goto out_err; 130 goto out_err;
131 memset(clnt, 0, sizeof(*clnt));
132 atomic_set(&clnt->cl_users, 0); 131 atomic_set(&clnt->cl_users, 0);
133 atomic_set(&clnt->cl_count, 1); 132 atomic_set(&clnt->cl_count, 1);
134 clnt->cl_parent = clnt; 133 clnt->cl_parent = clnt;
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 15c2db26767b..bd98124c3a64 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -114,13 +114,8 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
114 */ 114 */
115struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) 115struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
116{ 116{
117 unsigned int ops = clnt->cl_maxproc;
118 size_t size = ops * sizeof(struct rpc_iostats);
119 struct rpc_iostats *new; 117 struct rpc_iostats *new;
120 118 new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
121 new = kmalloc(size, GFP_KERNEL);
122 if (new)
123 memset(new, 0 , size);
124 return new; 119 return new;
125} 120}
126EXPORT_SYMBOL(rpc_alloc_iostats); 121EXPORT_SYMBOL(rpc_alloc_iostats);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 01ba60a49572..b76a227dd3ad 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -32,9 +32,8 @@ svc_create(struct svc_program *prog, unsigned int bufsize)
32 int vers; 32 int vers;
33 unsigned int xdrsize; 33 unsigned int xdrsize;
34 34
35 if (!(serv = kmalloc(sizeof(*serv), GFP_KERNEL))) 35 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
36 return NULL; 36 return NULL;
37 memset(serv, 0, sizeof(*serv));
38 serv->sv_name = prog->pg_name; 37 serv->sv_name = prog->pg_name;
39 serv->sv_program = prog; 38 serv->sv_program = prog;
40 serv->sv_nrthreads = 1; 39 serv->sv_nrthreads = 1;
@@ -159,11 +158,10 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
159 struct svc_rqst *rqstp; 158 struct svc_rqst *rqstp;
160 int error = -ENOMEM; 159 int error = -ENOMEM;
161 160
162 rqstp = kmalloc(sizeof(*rqstp), GFP_KERNEL); 161 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
163 if (!rqstp) 162 if (!rqstp)
164 goto out; 163 goto out;
165 164
166 memset(rqstp, 0, sizeof(*rqstp));
167 init_waitqueue_head(&rqstp->rq_wait); 165 init_waitqueue_head(&rqstp->rq_wait);
168 166
169 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 167 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a27905a0ad27..d9a95732df46 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1322,11 +1322,10 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1322 struct sock *inet; 1322 struct sock *inet;
1323 1323
1324 dprintk("svc: svc_setup_socket %p\n", sock); 1324 dprintk("svc: svc_setup_socket %p\n", sock);
1325 if (!(svsk = kmalloc(sizeof(*svsk), GFP_KERNEL))) { 1325 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1326 *errp = -ENOMEM; 1326 *errp = -ENOMEM;
1327 return NULL; 1327 return NULL;
1328 } 1328 }
1329 memset(svsk, 0, sizeof(*svsk));
1330 1329
1331 inet = sock->sk; 1330 inet = sock->sk;
1332 1331
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 02060d0e7be8..313b68d892c6 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -908,9 +908,8 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
908 struct rpc_xprt *xprt; 908 struct rpc_xprt *xprt;
909 struct rpc_rqst *req; 909 struct rpc_rqst *req;
910 910
911 if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) 911 if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
912 return ERR_PTR(-ENOMEM); 912 return ERR_PTR(-ENOMEM);
913 memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
914 913
915 xprt->addr = *ap; 914 xprt->addr = *ap;
916 915
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 21006b109101..ee678ed13b6f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1276,10 +1276,9 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1276 1276
1277 xprt->max_reqs = xprt_udp_slot_table_entries; 1277 xprt->max_reqs = xprt_udp_slot_table_entries;
1278 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1278 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
1279 xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1279 xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
1280 if (xprt->slot == NULL) 1280 if (xprt->slot == NULL)
1281 return -ENOMEM; 1281 return -ENOMEM;
1282 memset(xprt->slot, 0, slot_table_size);
1283 1282
1284 xprt->prot = IPPROTO_UDP; 1283 xprt->prot = IPPROTO_UDP;
1285 xprt->port = xs_get_random_port(); 1284 xprt->port = xs_get_random_port();
@@ -1318,10 +1317,9 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1318 1317
1319 xprt->max_reqs = xprt_tcp_slot_table_entries; 1318 xprt->max_reqs = xprt_tcp_slot_table_entries;
1320 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1319 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
1321 xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1320 xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
1322 if (xprt->slot == NULL) 1321 if (xprt->slot == NULL)
1323 return -ENOMEM; 1322 return -ENOMEM;
1324 memset(xprt->slot, 0, slot_table_size);
1325 1323
1326 xprt->prot = IPPROTO_TCP; 1324 xprt->prot = IPPROTO_TCP;
1327 xprt->port = xs_get_random_port(); 1325 xprt->port = xs_get_random_port();
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 7ef17a449cfd..75a5968c2139 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -665,11 +665,9 @@ int tipc_bearer_init(void)
665 int res; 665 int res;
666 666
667 write_lock_bh(&tipc_net_lock); 667 write_lock_bh(&tipc_net_lock);
668 tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC); 668 tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
669 media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC); 669 media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
670 if (tipc_bearers && media_list) { 670 if (tipc_bearers && media_list) {
671 memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer));
672 memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
673 res = TIPC_OK; 671 res = TIPC_OK;
674 } else { 672 } else {
675 kfree(tipc_bearers); 673 kfree(tipc_bearers);
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 1dcb6940e338..b46b5188a9fd 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -57,29 +57,25 @@ struct cluster *tipc_cltr_create(u32 addr)
57 struct _zone *z_ptr; 57 struct _zone *z_ptr;
58 struct cluster *c_ptr; 58 struct cluster *c_ptr;
59 int max_nodes; 59 int max_nodes;
60 int alloc;
61 60
62 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC); 61 c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC);
63 if (c_ptr == NULL) { 62 if (c_ptr == NULL) {
64 warn("Cluster creation failure, no memory\n"); 63 warn("Cluster creation failure, no memory\n");
65 return NULL; 64 return NULL;
66 } 65 }
67 memset(c_ptr, 0, sizeof(*c_ptr));
68 66
69 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
70 if (in_own_cluster(addr)) 68 if (in_own_cluster(addr))
71 max_nodes = LOWEST_SLAVE + tipc_max_slaves; 69 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
72 else 70 else
73 max_nodes = tipc_max_nodes + 1; 71 max_nodes = tipc_max_nodes + 1;
74 alloc = sizeof(void *) * (max_nodes + 1);
75 72
76 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC); 73 c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC);
77 if (c_ptr->nodes == NULL) { 74 if (c_ptr->nodes == NULL) {
78 warn("Cluster creation failure, no memory for node area\n"); 75 warn("Cluster creation failure, no memory for node area\n");
79 kfree(c_ptr); 76 kfree(c_ptr);
80 return NULL; 77 return NULL;
81 } 78 }
82 memset(c_ptr->nodes, 0, alloc);
83 79
84 if (in_own_cluster(addr)) 80 if (in_own_cluster(addr))
85 tipc_local_nodes = c_ptr->nodes; 81 tipc_local_nodes = c_ptr->nodes;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 2b8441203120..ee94de92ae99 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -295,7 +295,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
295{ 295{
296 struct link_req *req; 296 struct link_req *req;
297 297
298 req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC); 298 req = kmalloc(sizeof(*req), GFP_ATOMIC);
299 if (!req) 299 if (!req)
300 return NULL; 300 return NULL;
301 301
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c10e18a49b96..693f02eca6d6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -417,12 +417,11 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
417 struct tipc_msg *msg; 417 struct tipc_msg *msg;
418 char *if_name; 418 char *if_name;
419 419
420 l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC); 420 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
421 if (!l_ptr) { 421 if (!l_ptr) {
422 warn("Link creation failed, no memory\n"); 422 warn("Link creation failed, no memory\n");
423 return NULL; 423 return NULL;
424 } 424 }
425 memset(l_ptr, 0, sizeof(*l_ptr));
426 425
427 l_ptr->addr = peer; 426 l_ptr->addr = peer;
428 if_name = strchr(b_ptr->publ.name, ':') + 1; 427 if_name = strchr(b_ptr->publ.name, ':') + 1;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index a6926ff07bcc..049242ea5c38 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -117,14 +117,12 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
117 u32 scope, u32 node, u32 port_ref, 117 u32 scope, u32 node, u32 port_ref,
118 u32 key) 118 u32 key)
119{ 119{
120 struct publication *publ = 120 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
121 (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
122 if (publ == NULL) { 121 if (publ == NULL) {
123 warn("Publication creation failure, no memory\n"); 122 warn("Publication creation failure, no memory\n");
124 return NULL; 123 return NULL;
125 } 124 }
126 125
127 memset(publ, 0, sizeof(*publ));
128 publ->type = type; 126 publ->type = type;
129 publ->lower = lower; 127 publ->lower = lower;
130 publ->upper = upper; 128 publ->upper = upper;
@@ -144,11 +142,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
144 142
145static struct sub_seq *tipc_subseq_alloc(u32 cnt) 143static struct sub_seq *tipc_subseq_alloc(u32 cnt)
146{ 144{
147 u32 sz = cnt * sizeof(struct sub_seq); 145 struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
148 struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
149
150 if (sseq)
151 memset(sseq, 0, sz);
152 return sseq; 146 return sseq;
153} 147}
154 148
@@ -160,8 +154,7 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt)
160 154
161static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) 155static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
162{ 156{
163 struct name_seq *nseq = 157 struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
164 (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
165 struct sub_seq *sseq = tipc_subseq_alloc(1); 158 struct sub_seq *sseq = tipc_subseq_alloc(1);
166 159
167 if (!nseq || !sseq) { 160 if (!nseq || !sseq) {
@@ -171,7 +164,6 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
171 return NULL; 164 return NULL;
172 } 165 }
173 166
174 memset(nseq, 0, sizeof(*nseq));
175 spin_lock_init(&nseq->lock); 167 spin_lock_init(&nseq->lock);
176 nseq->type = type; 168 nseq->type = type;
177 nseq->sseqs = sseq; 169 nseq->sseqs = sseq;
@@ -1060,7 +1052,7 @@ int tipc_nametbl_init(void)
1060{ 1052{
1061 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; 1053 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
1062 1054
1063 table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC); 1055 table.types = kmalloc(array_size, GFP_ATOMIC);
1064 if (!table.types) 1056 if (!table.types)
1065 return -ENOMEM; 1057 return -ENOMEM;
1066 1058
diff --git a/net/tipc/net.c b/net/tipc/net.c
index e5a359ab4930..a991bf8a7f74 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -160,14 +160,11 @@ void tipc_net_send_external_routes(u32 dest)
160 160
161static int net_init(void) 161static int net_init(void)
162{ 162{
163 u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
164
165 memset(&tipc_net, 0, sizeof(tipc_net)); 163 memset(&tipc_net, 0, sizeof(tipc_net));
166 tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC); 164 tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
167 if (!tipc_net.zones) { 165 if (!tipc_net.zones) {
168 return -ENOMEM; 166 return -ENOMEM;
169 } 167 }
170 memset(tipc_net.zones, 0, sz);
171 return TIPC_OK; 168 return TIPC_OK;
172} 169}
173 170
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 3251c8d8e53c..b9c8c6b9e94f 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -226,12 +226,11 @@ u32 tipc_createport_raw(void *usr_handle,
226 struct tipc_msg *msg; 226 struct tipc_msg *msg;
227 u32 ref; 227 u32 ref;
228 228
229 p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC); 229 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
230 if (!p_ptr) { 230 if (!p_ptr) {
231 warn("Port creation failed, no memory\n"); 231 warn("Port creation failed, no memory\n");
232 return 0; 232 return 0;
233 } 233 }
234 memset(p_ptr, 0, sizeof(*p_ptr));
235 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); 234 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
236 if (!ref) { 235 if (!ref) {
237 warn("Port creation failed, reference table exhausted\n"); 236 warn("Port creation failed, reference table exhausted\n");
@@ -1058,7 +1057,7 @@ int tipc_createport(u32 user_ref,
1058 struct port *p_ptr; 1057 struct port *p_ptr;
1059 u32 ref; 1058 u32 ref;
1060 1059
1061 up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1060 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1062 if (!up_ptr) { 1061 if (!up_ptr) {
1063 warn("Port creation failed, no memory\n"); 1062 warn("Port creation failed, no memory\n");
1064 return -ENOMEM; 1063 return -ENOMEM;
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 596d3c8ff750..e6d6ae22ea49 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -79,7 +79,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
79 while (sz < requested_size) { 79 while (sz < requested_size) {
80 sz <<= 1; 80 sz <<= 1;
81 } 81 }
82 table = (struct reference *)vmalloc(sz * sizeof(struct reference)); 82 table = vmalloc(sz * sizeof(*table));
83 if (table == NULL) 83 if (table == NULL)
84 return -ENOMEM; 84 return -ENOMEM;
85 85
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index e19b4bcd67ec..c51600ba5f4a 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -393,12 +393,11 @@ static void subscr_named_msg_event(void *usr_handle,
393 393
394 /* Create subscriber object */ 394 /* Create subscriber object */
395 395
396 subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC); 396 subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC);
397 if (subscriber == NULL) { 397 if (subscriber == NULL) {
398 warn("Subscriber rejected, no memory\n"); 398 warn("Subscriber rejected, no memory\n");
399 return; 399 return;
400 } 400 }
401 memset(subscriber, 0, sizeof(struct subscriber));
402 INIT_LIST_HEAD(&subscriber->subscription_list); 401 INIT_LIST_HEAD(&subscriber->subscription_list);
403 INIT_LIST_HEAD(&subscriber->subscriber_list); 402 INIT_LIST_HEAD(&subscriber->subscriber_list);
404 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock); 403 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 1e3ae57c7228..04d1b9be9c51 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -82,9 +82,8 @@ static int reg_init(void)
82 82
83 spin_lock_bh(&reg_lock); 83 spin_lock_bh(&reg_lock);
84 if (!users) { 84 if (!users) {
85 users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC); 85 users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);
86 if (users) { 86 if (users) {
87 memset(users, 0, USER_LIST_SIZE);
88 for (i = 1; i <= MAX_USERID; i++) { 87 for (i = 1; i <= MAX_USERID; i++) {
89 users[i].next = i - 1; 88 users[i].next = i - 1;
90 } 89 }
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 316c4872ff5b..f5b00ea2d5ac 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -52,13 +52,12 @@ struct _zone *tipc_zone_create(u32 addr)
52 return NULL; 52 return NULL;
53 } 53 }
54 54
55 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); 55 z_ptr = kzalloc(sizeof(*z_ptr), GFP_ATOMIC);
56 if (!z_ptr) { 56 if (!z_ptr) {
57 warn("Zone creation failed, insufficient memory\n"); 57 warn("Zone creation failed, insufficient memory\n");
58 return NULL; 58 return NULL;
59 } 59 }
60 60
61 memset(z_ptr, 0, sizeof(*z_ptr));
62 z_num = tipc_zone(addr); 61 z_num = tipc_zone(addr);
63 z_ptr->addr = tipc_addr(z_num, 0, 0); 62 z_ptr->addr = tipc_addr(z_num, 0, 0);
64 tipc_net.zones[z_num] = z_ptr; 63 tipc_net.zones[z_num] = z_ptr;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f70475bfb62a..6f2909279268 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -663,11 +663,10 @@ static int unix_autobind(struct socket *sock)
663 goto out; 663 goto out;
664 664
665 err = -ENOMEM; 665 err = -ENOMEM;
666 addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); 666 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
667 if (!addr) 667 if (!addr)
668 goto out; 668 goto out;
669 669
670 memset(addr, 0, sizeof(*addr) + sizeof(short) + 16);
671 addr->name->sun_family = AF_UNIX; 670 addr->name->sun_family = AF_UNIX;
672 atomic_set(&addr->refcnt, 1); 671 atomic_set(&addr->refcnt, 1);
673 672
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c
index a690cf773b6a..6f39faa15832 100644
--- a/net/wanrouter/af_wanpipe.c
+++ b/net/wanrouter/af_wanpipe.c
@@ -370,12 +370,11 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
370 * used by the ioctl call to read call information 370 * used by the ioctl call to read call information
371 * and to execute commands. 371 * and to execute commands.
372 */ 372 */
373 if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) { 373 if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) {
374 wanpipe_kill_sock_irq (newsk); 374 wanpipe_kill_sock_irq (newsk);
375 release_device(dev); 375 release_device(dev);
376 return -ENOMEM; 376 return -ENOMEM;
377 } 377 }
378 memset(mbox_ptr, 0, sizeof(mbox_cmd_t));
379 memcpy(mbox_ptr,skb->data,skb->len); 378 memcpy(mbox_ptr,skb->data,skb->len);
380 379
381 /* Register the lcn on which incoming call came 380 /* Register the lcn on which incoming call came
@@ -507,11 +506,10 @@ static struct sock *wanpipe_alloc_socket(void)
507 if ((sk = sk_alloc(PF_WANPIPE, GFP_ATOMIC, &wanpipe_proto, 1)) == NULL) 506 if ((sk = sk_alloc(PF_WANPIPE, GFP_ATOMIC, &wanpipe_proto, 1)) == NULL)
508 return NULL; 507 return NULL;
509 508
510 if ((wan_opt = kmalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) { 509 if ((wan_opt = kzalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) {
511 sk_free(sk); 510 sk_free(sk);
512 return NULL; 511 return NULL;
513 } 512 }
514 memset(wan_opt, 0x00, sizeof(struct wanpipe_opt));
515 513
516 wp_sk(sk) = wan_opt; 514 wp_sk(sk) = wan_opt;
517 515
@@ -2011,10 +2009,9 @@ static int set_ioctl_cmd (struct sock *sk, void *arg)
2011 2009
2012 dev_put(dev); 2010 dev_put(dev);
2013 2011
2014 if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) 2012 if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL)
2015 return -ENOMEM; 2013 return -ENOMEM;
2016 2014
2017 memset(mbox_ptr, 0, sizeof(mbox_cmd_t));
2018 wp_sk(sk)->mbox = mbox_ptr; 2015 wp_sk(sk)->mbox = mbox_ptr;
2019 2016
2020 wanpipe_link_driver(dev,sk); 2017 wanpipe_link_driver(dev,sk);
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index ad8e8a797790..9479659277ae 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -642,18 +642,16 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
642 642
643 if (cnf->config_id == WANCONFIG_MPPP) { 643 if (cnf->config_id == WANCONFIG_MPPP) {
644#ifdef CONFIG_WANPIPE_MULTPPP 644#ifdef CONFIG_WANPIPE_MULTPPP
645 pppdev = kmalloc(sizeof(struct ppp_device), GFP_KERNEL); 645 pppdev = kzalloc(sizeof(struct ppp_device), GFP_KERNEL);
646 err = -ENOBUFS; 646 err = -ENOBUFS;
647 if (pppdev == NULL) 647 if (pppdev == NULL)
648 goto out; 648 goto out;
649 memset(pppdev, 0, sizeof(struct ppp_device)); 649 pppdev->dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
650 pppdev->dev = kmalloc(sizeof(struct net_device), GFP_KERNEL);
651 if (pppdev->dev == NULL) { 650 if (pppdev->dev == NULL) {
652 kfree(pppdev); 651 kfree(pppdev);
653 err = -ENOBUFS; 652 err = -ENOBUFS;
654 goto out; 653 goto out;
655 } 654 }
656 memset(pppdev->dev, 0, sizeof(struct net_device));
657 err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf); 655 err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf);
658 dev = pppdev->dev; 656 dev = pppdev->dev;
659#else 657#else
@@ -663,11 +661,10 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
663 goto out; 661 goto out;
664#endif 662#endif
665 } else { 663 } else {
666 dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); 664 dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
667 err = -ENOBUFS; 665 err = -ENOBUFS;
668 if (dev == NULL) 666 if (dev == NULL)
669 goto out; 667 goto out;
670 memset(dev, 0, sizeof(struct net_device));
671 err = wandev->new_if(wandev, dev, cnf); 668 err = wandev->new_if(wandev, dev, cnf);
672 } 669 }
673 670
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 405b741dff43..f35bc676128c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -307,10 +307,9 @@ struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
307{ 307{
308 struct xfrm_policy *policy; 308 struct xfrm_policy *policy;
309 309
310 policy = kmalloc(sizeof(struct xfrm_policy), gfp); 310 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
311 311
312 if (policy) { 312 if (policy) {
313 memset(policy, 0, sizeof(struct xfrm_policy));
314 atomic_set(&policy->refcnt, 1); 313 atomic_set(&policy->refcnt, 1);
315 rwlock_init(&policy->lock); 314 rwlock_init(&policy->lock);
316 init_timer(&policy->timer); 315 init_timer(&policy->timer);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 43f00fc28a3d..0021aad5db43 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -194,10 +194,9 @@ struct xfrm_state *xfrm_state_alloc(void)
194{ 194{
195 struct xfrm_state *x; 195 struct xfrm_state *x;
196 196
197 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC); 197 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
198 198
199 if (x) { 199 if (x) {
200 memset(x, 0, sizeof(struct xfrm_state));
201 atomic_set(&x->refcnt, 1); 200 atomic_set(&x->refcnt, 1);
202 atomic_set(&x->tunnel_users, 0); 201 atomic_set(&x->tunnel_users, 0);
203 INIT_LIST_HEAD(&x->bydst); 202 INIT_LIST_HEAD(&x->bydst);