aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/drivers/edac/edac.txt673
-rw-r--r--Documentation/sysctl/vm.txt18
-rw-r--r--MAINTAINERS26
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/configs/ep80219_defconfig1
-rw-r--r--arch/arm/configs/iq31244_defconfig1
-rw-r--r--arch/arm/configs/iq80321_defconfig1
-rw-r--r--arch/arm/configs/iq80331_defconfig1
-rw-r--r--arch/arm/configs/iq80332_defconfig1
-rw-r--r--arch/arm/kernel/entry-armv.S5
-rw-r--r--arch/arm/kernel/signal.c29
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/mach-ixp4xx/common.c3
-rw-r--r--arch/arm/mach-omap1/clock.c44
-rw-r--r--arch/arm/mach-omap1/clock.h168
-rw-r--r--arch/arm/mach-omap1/serial.c6
-rw-r--r--arch/arm/mach-omap2/clock.c32
-rw-r--r--arch/arm/mach-omap2/clock.h12
-rw-r--r--arch/arm/mach-omap2/serial.c12
-rw-r--r--arch/arm/mach-omap2/timer-gp.c2
-rw-r--r--arch/arm/plat-omap/clock.c41
-rw-r--r--arch/arm/plat-omap/gpio.c6
-rw-r--r--arch/arm/plat-omap/mcbsp.c12
-rw-r--r--arch/arm/plat-omap/ocpi.c4
-rw-r--r--arch/frv/kernel/signal.c120
-rw-r--r--arch/i386/defconfig2
-rw-r--r--arch/i386/kernel/quirks.c9
-rw-r--r--arch/i386/kernel/signal.c109
-rw-r--r--arch/i386/kernel/syscall_table.S15
-rw-r--r--arch/ia64/configs/gensparse_defconfig1
-rw-r--r--arch/ia64/configs/tiger_defconfig1
-rw-r--r--arch/ia64/configs/zx1_defconfig1
-rw-r--r--arch/ia64/defconfig1
-rw-r--r--arch/ia64/ia32/sys_ia32.c28
-rw-r--r--arch/ia64/kernel/perfmon.c11
-rw-r--r--arch/ia64/kernel/uncached.c1
-rw-r--r--arch/ia64/sn/include/xtalk/hubdev.h9
-rw-r--r--arch/ia64/sn/kernel/io_init.c54
-rw-r--r--arch/ia64/sn/kernel/mca.c7
-rw-r--r--arch/ia64/sn/kernel/xp_main.c17
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c34
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c17
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c12
-rw-r--r--arch/parisc/configs/a500_defconfig1
-rw-r--r--arch/parisc/configs/c3000_defconfig1
-rw-r--r--arch/powerpc/configs/cell_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/iseries_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/kernel/entry_32.S8
-rw-r--r--arch/powerpc/kernel/entry_64.S2
-rw-r--r--arch/powerpc/kernel/signal_32.c76
-rw-r--r--arch/powerpc/kernel/signal_64.c56
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/ppc/configs/bamboo_defconfig1
-rw-r--r--arch/ppc/configs/katana_defconfig1
-rw-r--r--arch/ppc/configs/mpc834x_sys_defconfig1
-rw-r--r--arch/ppc/configs/power3_defconfig1
-rw-r--r--arch/sparc/kernel/entry.S56
-rw-r--r--arch/sparc/kernel/rtrap.S9
-rw-r--r--arch/sparc/kernel/signal.c117
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c2
-rw-r--r--arch/sparc/kernel/systbls.S10
-rw-r--r--arch/sparc64/defconfig25
-rw-r--r--arch/sparc64/kernel/entry.S23
-rw-r--r--arch/sparc64/kernel/process.c4
-rw-r--r--arch/sparc64/kernel/rtrap.S33
-rw-r--r--arch/sparc64/kernel/setup.c2
-rw-r--r--arch/sparc64/kernel/signal.c151
-rw-r--r--arch/sparc64/kernel/signal32.c122
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c4
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c4
-rw-r--r--arch/sparc64/kernel/systbls.S21
-rw-r--r--arch/sparc64/prom/console.c8
-rw-r--r--arch/sparc64/solaris/entry64.S2
-rw-r--r--arch/um/Kconfig27
-rw-r--r--arch/um/Kconfig.i3866
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/drivers/daemon_kern.c4
-rw-r--r--arch/um/drivers/daemon_user.c6
-rw-r--r--arch/um/drivers/fd.c1
-rw-r--r--arch/um/drivers/net_kern.c21
-rw-r--r--arch/um/drivers/ubd_kern.c59
-rw-r--r--arch/um/include/kern_util.h4
-rw-r--r--arch/um/include/longjmp.h19
-rw-r--r--arch/um/include/mode_kern.h15
-rw-r--r--arch/um/include/os.h58
-rw-r--r--arch/um/include/skas/mm_id.h (renamed from arch/um/kernel/skas/include/mm_id.h)0
-rw-r--r--arch/um/include/skas/mmu-skas.h24
-rw-r--r--arch/um/include/skas/mode-skas.h19
-rw-r--r--arch/um/include/skas/mode_kern_skas.h (renamed from arch/um/kernel/skas/include/mode_kern-skas.h)12
-rw-r--r--arch/um/include/skas/proc_mm.h (renamed from arch/um/kernel/skas/include/proc_mm.h)17
-rw-r--r--arch/um/include/skas/skas.h26
-rw-r--r--arch/um/include/skas/stub-data.h (renamed from arch/um/kernel/skas/include/stub-data.h)0
-rw-r--r--arch/um/include/skas/uaccess-skas.h (renamed from arch/um/kernel/skas/include/uaccess-skas.h)11
-rw-r--r--arch/um/include/time_user.h19
-rw-r--r--arch/um/include/tt/debug.h (renamed from arch/um/kernel/tt/include/debug.h)2
-rw-r--r--arch/um/include/tt/mmu-tt.h12
-rw-r--r--arch/um/include/tt/mode-tt.h23
-rw-r--r--arch/um/include/tt/mode_kern_tt.h41
-rw-r--r--arch/um/include/tt/tt.h (renamed from arch/um/kernel/tt/include/tt.h)12
-rw-r--r--arch/um/include/tt/uaccess-tt.h (renamed from arch/um/kernel/tt/include/uaccess-tt.h)11
-rw-r--r--arch/um/include/user.h1
-rw-r--r--arch/um/include/user_util.h18
-rw-r--r--arch/um/kernel/Makefile7
-rw-r--r--arch/um/kernel/exec_kern.c1
-rw-r--r--arch/um/kernel/process_kern.c22
-rw-r--r--arch/um/kernel/sigio_user.c83
-rw-r--r--arch/um/kernel/signal_kern.c91
-rw-r--r--arch/um/kernel/skas/Makefile6
-rw-r--r--arch/um/kernel/skas/include/mmu-skas.h35
-rw-r--r--arch/um/kernel/skas/include/mode-skas.h33
-rw-r--r--arch/um/kernel/skas/include/skas.h49
-rw-r--r--arch/um/kernel/skas/mmu.c8
-rw-r--r--arch/um/kernel/skas/process.c1
-rw-r--r--arch/um/kernel/skas/process_kern.c35
-rw-r--r--arch/um/kernel/skas/uaccess.c2
-rw-r--r--arch/um/kernel/syscall.c4
-rw-r--r--arch/um/kernel/time_kern.c132
-rw-r--r--arch/um/kernel/tt/exec_kern.c1
-rw-r--r--arch/um/kernel/tt/gdb.c1
-rw-r--r--arch/um/kernel/tt/include/mmu-tt.h23
-rw-r--r--arch/um/kernel/tt/process_kern.c1
-rw-r--r--arch/um/kernel/tt/ptproxy/ptrace.c1
-rw-r--r--arch/um/kernel/tt/ptproxy/sysdep.c1
-rw-r--r--arch/um/kernel/tt/trap_user.c5
-rw-r--r--arch/um/os-Linux/Makefile4
-rw-r--r--arch/um/os-Linux/helper.c4
-rw-r--r--arch/um/os-Linux/main.c13
-rw-r--r--arch/um/os-Linux/process.c30
-rw-r--r--arch/um/os-Linux/signal.c218
-rw-r--r--arch/um/os-Linux/skas/Makefile4
-rw-r--r--arch/um/os-Linux/skas/mem.c (renamed from arch/um/kernel/skas/mem_user.c)202
-rw-r--r--arch/um/os-Linux/skas/process.c566
-rw-r--r--arch/um/os-Linux/start_up.c1
-rw-r--r--arch/um/os-Linux/time.c131
-rw-r--r--arch/um/os-Linux/trap.c3
-rw-r--r--arch/um/os-Linux/tt.c49
-rw-r--r--arch/um/os-Linux/uaccess.c4
-rw-r--r--arch/um/os-Linux/util.c (renamed from arch/um/kernel/user_util.c)69
-rw-r--r--arch/um/sys-i386/ldt.c26
-rw-r--r--arch/x86_64/defconfig1
-rw-r--r--arch/x86_64/ia32/ia32entry.S13
-rw-r--r--arch/x86_64/kernel/setup64.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/synclink_gt.c2
-rw-r--r--drivers/char/tlclk.c93
-rw-r--r--drivers/edac/Kconfig102
-rw-r--r--drivers/edac/Makefile18
-rw-r--r--drivers/edac/amd76x_edac.c356
-rw-r--r--drivers/edac/e752x_edac.c1071
-rw-r--r--drivers/edac/e7xxx_edac.c558
-rw-r--r--drivers/edac/edac_mc.c2209
-rw-r--r--drivers/edac/edac_mc.h448
-rw-r--r--drivers/edac/i82860_edac.c299
-rw-r--r--drivers/edac/i82875p_edac.c532
-rw-r--r--drivers/edac/r82600_edac.c407
-rw-r--r--drivers/md/kcopyd.c1
-rw-r--r--drivers/net/Kconfig10
-rw-r--r--drivers/net/cassini.c36
-rw-r--r--drivers/net/e1000/e1000_ethtool.c307
-rw-r--r--drivers/net/e1000/e1000_hw.c18
-rw-r--r--drivers/net/e1000/e1000_hw.h20
-rw-r--r--drivers/net/e1000/e1000_main.c802
-rw-r--r--drivers/net/e1000/e1000_osdep.h2
-rw-r--r--drivers/net/e1000/e1000_param.c44
-rw-r--r--drivers/serial/8250.c13
-rw-r--r--drivers/serial/8250_pci.c10
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/serial/at91_serial.c2
-rw-r--r--drivers/serial/sn_console.c129
-rw-r--r--drivers/serial/suncore.c34
-rw-r--r--drivers/serial/sunsab.c7
-rw-r--r--fs/9p/Makefile1
-rw-r--r--fs/9p/v9fs_vfs.h1
-rw-r--r--fs/9p/vfs_addr.c109
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/9p/vfs_inode.c1
-rw-r--r--fs/cifs/CHANGES10
-rw-r--r--fs/cifs/README12
-rw-r--r--fs/cifs/cifs_debug.c51
-rw-r--r--fs/cifs/cifs_fs_sb.h5
-rw-r--r--fs/cifs/cifsacl.h38
-rw-r--r--fs/cifs/cifsencrypt.c55
-rw-r--r--fs/cifs/cifsfs.c28
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h18
-rw-r--r--fs/cifs/cifspdu.h97
-rw-r--r--fs/cifs/cifsproto.h23
-rw-r--r--fs/cifs/cifssmb.c294
-rw-r--r--fs/cifs/connect.c89
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/file.c73
-rw-r--r--fs/cifs/inode.c9
-rw-r--r--fs/cifs/misc.c17
-rw-r--r--fs/cifs/readdir.c17
-rw-r--r--fs/cifs/rfc1002pdu.h4
-rw-r--r--fs/cifs/transport.c40
-rw-r--r--fs/cifs/xattr.c22
-rw-r--r--fs/compat.c308
-rw-r--r--fs/exec.c2
-rw-r--r--fs/exportfs/expfs.c79
-rw-r--r--fs/hfs/bfind.c3
-rw-r--r--fs/hfs/bnode.c6
-rw-r--r--fs/hfs/brec.c2
-rw-r--r--fs/hfs/btree.c10
-rw-r--r--fs/hfs/catalog.c2
-rw-r--r--fs/hfs/dir.c12
-rw-r--r--fs/hfs/hfs_fs.h3
-rw-r--r--fs/hfs/inode.c1
-rw-r--r--fs/hfs/mdb.c22
-rw-r--r--fs/hfs/super.c40
-rw-r--r--fs/hfsplus/bfind.c3
-rw-r--r--fs/hfsplus/bnode.c11
-rw-r--r--fs/hfsplus/brec.c2
-rw-r--r--fs/hfsplus/btree.c37
-rw-r--r--fs/hfsplus/catalog.c44
-rw-r--r--fs/hfsplus/dir.c47
-rw-r--r--fs/hfsplus/extents.c6
-rw-r--r--fs/hfsplus/hfsplus_fs.h12
-rw-r--r--fs/hfsplus/hfsplus_raw.h13
-rw-r--r--fs/hfsplus/inode.c11
-rw-r--r--fs/hfsplus/options.c18
-rw-r--r--fs/hfsplus/super.c54
-rw-r--r--fs/hfsplus/unicode.c30
-rw-r--r--fs/hfsplus/wrapper.c17
-rw-r--r--fs/inotify.c1
-rw-r--r--fs/jbd/checkpoint.c2
-rw-r--r--fs/jbd/commit.c3
-rw-r--r--fs/namei.c172
-rw-r--r--fs/nfsctl.c1
-rw-r--r--fs/nfsd/nfs4proc.c55
-rw-r--r--fs/nfsd/nfs4recover.c10
-rw-r--r--fs/nfsd/nfs4state.c220
-rw-r--r--fs/nfsd/nfs4xdr.c10
-rw-r--r--fs/nfsd/nfsproc.c37
-rw-r--r--fs/nfsd/vfs.c89
-rw-r--r--fs/open.c83
-rw-r--r--fs/select.c348
-rw-r--r--fs/stat.c66
-rw-r--r--include/asm-arm/arch-omap/clock.h2
-rw-r--r--include/asm-arm/arch-pxa/pxa-regs.h14
-rw-r--r--include/asm-frv/thread_info.h2
-rw-r--r--include/asm-frv/unistd.h1
-rw-r--r--include/asm-i386/edac.h18
-rw-r--r--include/asm-i386/futex.h2
-rw-r--r--include/asm-i386/signal.h1
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-i386/unistd.h18
-rw-r--r--include/asm-ia64/semaphore.h8
-rw-r--r--include/asm-ia64/sn/xp.h3
-rw-r--r--include/asm-ia64/sn/xpc.h9
-rw-r--r--include/asm-ia64/topology.h4
-rw-r--r--include/asm-powerpc/thread_info.h5
-rw-r--r--include/asm-powerpc/unistd.h6
-rw-r--r--include/asm-sparc/oplib.h2
-rw-r--r--include/asm-sparc/thread_info.h5
-rw-r--r--include/asm-sparc/unistd.h22
-rw-r--r--include/asm-sparc64/oplib.h2
-rw-r--r--include/asm-sparc64/thread_info.h6
-rw-r--r--include/asm-sparc64/unistd.h23
-rw-r--r--include/asm-um/io.h16
-rw-r--r--include/asm-um/thread_info.h13
-rw-r--r--include/asm-um/unistd.h1
-rw-r--r--include/asm-x86_64/edac.h18
-rw-r--r--include/asm-x86_64/ia32_unistd.h15
-rw-r--r--include/asm-x86_64/unistd.h29
-rw-r--r--include/linux/fcntl.h7
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/mempolicy.h1
-rw-r--r--include/linux/mm_inline.h21
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--include/linux/namei.h7
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/nfsd/nfsd.h2
-rw-r--r--include/linux/nfsd/xdr4.h5
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/poll.h6
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/swap.h12
-rw-r--r--include/linux/syscalls.h19
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/time.h2
-rw-r--r--include/linux/tipc_config.h7
-rw-r--r--include/linux/topology.h8
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/structs.h89
-rw-r--r--include/scsi/scsi_transport_spi.h1
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/compat.c28
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/signal.c26
-rw-r--r--kernel/sysctl.c11
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/mempolicy.c99
-rw-r--r--mm/page-writeback.c7
-rw-r--r--mm/page_alloc.c17
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/slab.c58
-rw-r--r--mm/swap.c26
-rw-r--r--mm/swapfile.c17
-rw-r--r--mm/vmscan.c146
-rw-r--r--net/Kconfig2
-rw-r--r--net/core/pktgen.c5
-rw-r--r--net/ipv4/igmp.c152
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/sctp/input.c75
-rw-r--r--net/sctp/inqueue.c4
-rw-r--r--net/sctp/proc.c32
-rw-r--r--net/sctp/sm_make_chunk.c16
-rw-r--r--net/sctp/sm_sideeffect.c4
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/sysctl.c7
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c38
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/tipc/Kconfig7
-rw-r--r--net/tipc/addr.c10
-rw-r--r--net/tipc/addr.h4
-rw-r--r--net/tipc/bcast.c166
-rw-r--r--net/tipc/bcast.h44
-rw-r--r--net/tipc/bearer.c189
-rw-r--r--net/tipc/bearer.h40
-rw-r--r--net/tipc/cluster.c154
-rw-r--r--net/tipc/cluster.h40
-rw-r--r--net/tipc/config.c218
-rw-r--r--net/tipc/config.h41
-rw-r--r--net/tipc/core.c87
-rw-r--r--net/tipc/core.h35
-rw-r--r--net/tipc/dbg.c112
-rw-r--r--net/tipc/dbg.h18
-rw-r--r--net/tipc/discover.c52
-rw-r--r--net/tipc/discover.h18
-rw-r--r--net/tipc/eth_media.c20
-rw-r--r--net/tipc/handler.c6
-rw-r--r--net/tipc/link.c605
-rw-r--r--net/tipc/link.h73
-rw-r--r--net/tipc/msg.c19
-rw-r--r--net/tipc/msg.h18
-rw-r--r--net/tipc/name_distr.c92
-rw-r--r--net/tipc/name_distr.h10
-rw-r--r--net/tipc/name_table.c206
-rw-r--r--net/tipc/name_table.h26
-rw-r--r--net/tipc/net.c126
-rw-r--r--net/tipc/net.h20
-rw-r--r--net/tipc/netlink.c16
-rw-r--r--net/tipc/node.c215
-rw-r--r--net/tipc/node.h50
-rw-r--r--net/tipc/node_subscr.c20
-rw-r--r--net/tipc/node_subscr.h6
-rw-r--r--net/tipc/port.c274
-rw-r--r--net/tipc/port.h60
-rw-r--r--net/tipc/ref.c72
-rw-r--r--net/tipc/ref.h38
-rw-r--r--net/tipc/socket.c12
-rw-r--r--net/tipc/subscr.c58
-rw-r--r--net/tipc/subscr.h24
-rw-r--r--net/tipc/user_reg.c22
-rw-r--r--net/tipc/user_reg.h8
-rw-r--r--net/tipc/zone.c40
-rw-r--r--net/tipc/zone.h20
-rw-r--r--sound/oss/trident.c18
371 files changed, 14587 insertions, 5191 deletions
diff --git a/Documentation/drivers/edac/edac.txt b/Documentation/drivers/edac/edac.txt
new file mode 100644
index 00000000000..d37191fe568
--- /dev/null
+++ b/Documentation/drivers/edac/edac.txt
@@ -0,0 +1,673 @@
1
2
3EDAC - Error Detection And Correction
4
5Written by Doug Thompson <norsk5@xmission.com>
67 Dec 2005
7
8
9EDAC was written by:
10 Thayne Harbaugh,
11 modified by Dave Peterson, Doug Thompson, et al,
12 from the bluesmoke.sourceforge.net project.
13
14
15============================================================================
16EDAC PURPOSE
17
18The 'edac' kernel module goal is to detect and report errors that occur
19within the computer system. In the initial release, memory Correctable Errors
20(CE) and Uncorrectable Errors (UE) are the primary errors being harvested.
21
22Detecting CE events, then harvesting those events and reporting them,
23CAN be a predictor of future UE events. With CE events, the system can
24continue to operate, but with less safety. Preventive maintainence and
25proactive part replacement of memory DIMMs exhibiting CEs can reduce
26the likelihood of the dreaded UE events and system 'panics'.
27
28
29In addition, PCI Bus Parity and SERR Errors are scanned for on PCI devices
30in order to determine if errors are occurring on data transfers.
31The presence of PCI Parity errors must be examined with a grain of salt.
32There are several addin adapters that do NOT follow the PCI specification
33with regards to Parity generation and reporting. The specification says
34the vendor should tie the parity status bits to 0 if they do not intend
35to generate parity. Some vendors do not do this, and thus the parity bit
36can "float" giving false positives.
37
38The PCI Parity EDAC device has the ability to "skip" known flakey
39cards during the parity scan. These are set by the parity "blacklist"
40interface in the sysfs for PCI Parity. (See the PCI section in the sysfs
41section below.) There is also a parity "whitelist" which is used as
42an explicit list of devices to scan, while the blacklist is a list
43of devices to skip.
44
45EDAC will have future error detectors that will be added or integrated
46into EDAC in the following list:
47
48 MCE Machine Check Exception
49 MCA Machine Check Architecture
50 NMI NMI notification of ECC errors
51 MSRs Machine Specific Register error cases
52 and other mechanisms.
53
54These errors are usually bus errors, ECC errors, thermal throttling
55and the like.
56
57
58============================================================================
59EDAC VERSIONING
60
61EDAC is composed of a "core" module (edac_mc.ko) and several Memory
62Controller (MC) driver modules. On a given system, the CORE
63is loaded and one MC driver will be loaded. Both the CORE and
64the MC driver have individual versions that reflect current release
65level of their respective modules. Thus, to "report" on what version
66a system is running, one must report both the CORE's and the
67MC driver's versions.
68
69
70LOADING
71
72If 'edac' was statically linked with the kernel then no loading is
73necessary. If 'edac' was built as modules then simply modprobe the
74'edac' pieces that you need. You should be able to modprobe
75hardware-specific modules and have the dependencies load the necessary core
76modules.
77
78Example:
79
80$> modprobe amd76x_edac
81
82loads both the amd76x_edac.ko memory controller module and the edac_mc.ko
83core module.
84
85
86============================================================================
87EDAC sysfs INTERFACE
88
89EDAC presents a 'sysfs' interface for control, reporting and attribute
90reporting purposes.
91
92EDAC lives in the /sys/devices/system/edac directory. Within this directory
93there currently reside 2 'edac' components:
94
95 mc memory controller(s) system
96 pci PCI status system
97
98
99============================================================================
100Memory Controller (mc) Model
101
102First a background on the memory controller's model abstracted in EDAC.
103Each mc device controls a set of DIMM memory modules. These modules are
104layed out in a Chip-Select Row (csrowX) and Channel table (chX). There can
105be multiple csrows and two channels.
106
107Memory controllers allow for several csrows, with 8 csrows being a typical value.
108Yet, the actual number of csrows depends on the electrical "loading"
109of a given motherboard, memory controller and DIMM characteristics.
110
111Dual channels allows for 128 bit data transfers to the CPU from memory.
112
113
114 Channel 0 Channel 1
115 ===================================
116 csrow0 | DIMM_A0 | DIMM_B0 |
117 csrow1 | DIMM_A0 | DIMM_B0 |
118 ===================================
119
120 ===================================
121 csrow2 | DIMM_A1 | DIMM_B1 |
122 csrow3 | DIMM_A1 | DIMM_B1 |
123 ===================================
124
125In the above example table there are 4 physical slots on the motherboard
126for memory DIMMs:
127
128 DIMM_A0
129 DIMM_B0
130 DIMM_A1
131 DIMM_B1
132
133Labels for these slots are usually silk screened on the motherboard. Slots
134labeled 'A' are channel 0 in this example. Slots labled 'B'
135are channel 1. Notice that there are two csrows possible on a
136physical DIMM. These csrows are allocated their csrow assignment
137based on the slot into which the memory DIMM is placed. Thus, when 1 DIMM
138is placed in each Channel, the csrows cross both DIMMs.
139
140Memory DIMMs come single or dual "ranked". A rank is a populated csrow.
141Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above
142will have 1 csrow, csrow0. csrow1 will be empty. On the other hand,
143when 2 dual ranked DIMMs are similiaryly placed, then both csrow0 and
144csrow1 will be populated. The pattern repeats itself for csrow2 and
145csrow3.
146
147The representation of the above is reflected in the directory tree
148in EDAC's sysfs interface. Starting in directory
149/sys/devices/system/edac/mc each memory controller will be represented
150by its own 'mcX' directory, where 'X" is the index of the MC.
151
152
153 ..../edac/mc/
154 |
155 |->mc0
156 |->mc1
157 |->mc2
158 ....
159
160Under each 'mcX' directory each 'csrowX' is again represented by a
161'csrowX', where 'X" is the csrow index:
162
163
164 .../mc/mc0/
165 |
166 |->csrow0
167 |->csrow2
168 |->csrow3
169 ....
170
171Notice that there is no csrow1, which indicates that csrow0 is
172composed of a single ranked DIMMs. This should also apply in both
173Channels, in order to have dual-channel mode be operational. Since
174both csrow2 and csrow3 are populated, this indicates a dual ranked
175set of DIMMs for channels 0 and 1.
176
177
178Within each of the 'mc','mcX' and 'csrowX' directories are several
179EDAC control and attribute files.
180
181
182============================================================================
183DIRECTORY 'mc'
184
185In directory 'mc' are EDAC system overall control and attribute files:
186
187
188Panic on UE control file:
189
190 'panic_on_ue'
191
192 An uncorrectable error will cause a machine panic. This is usually
193 desirable. It is a bad idea to continue when an uncorrectable error
194 occurs - it is indeterminate what was uncorrected and the operating
195 system context might be so mangled that continuing will lead to further
196 corruption. If the kernel has MCE configured, then EDAC will never
197 notice the UE.
198
199 LOAD TIME: module/kernel parameter: panic_on_ue=[0|1]
200
201 RUN TIME: echo "1" >/sys/devices/system/edac/mc/panic_on_ue
202
203
204Log UE control file:
205
206 'log_ue'
207
208 Generate kernel messages describing uncorrectable errors. These errors
209 are reported through the system message log system. UE statistics
210 will be accumulated even when UE logging is disabled.
211
212 LOAD TIME: module/kernel parameter: log_ue=[0|1]
213
214 RUN TIME: echo "1" >/sys/devices/system/edac/mc/log_ue
215
216
217Log CE control file:
218
219 'log_ce'
220
221 Generate kernel messages describing correctable errors. These
222 errors are reported through the system message log system.
223 CE statistics will be accumulated even when CE logging is disabled.
224
225 LOAD TIME: module/kernel parameter: log_ce=[0|1]
226
227 RUN TIME: echo "1" >/sys/devices/system/edac/mc/log_ce
228
229
230Polling period control file:
231
232 'poll_msec'
233
234 The time period, in milliseconds, for polling for error information.
235 Too small a value wastes resources. Too large a value might delay
236 necessary handling of errors and might loose valuable information for
237 locating the error. 1000 milliseconds (once each second) is about
238 right for most uses.
239
240 LOAD TIME: module/kernel parameter: poll_msec=[0|1]
241
242 RUN TIME: echo "1000" >/sys/devices/system/edac/mc/poll_msec
243
244
245Module Version read-only attribute file:
246
247 'mc_version'
248
249 The EDAC CORE modules's version and compile date are shown here to
250 indicate what EDAC is running.
251
252
253
254============================================================================
255'mcX' DIRECTORIES
256
257
258In 'mcX' directories are EDAC control and attribute files for
259this 'X" instance of the memory controllers:
260
261
262Counter reset control file:
263
264 'reset_counters'
265
266 This write-only control file will zero all the statistical counters
267 for UE and CE errors. Zeroing the counters will also reset the timer
268 indicating how long since the last counter zero. This is useful
269 for computing errors/time. Since the counters are always reset at
270 driver initialization time, no module/kernel parameter is available.
271
272 RUN TIME: echo "anything" >/sys/devices/system/edac/mc/mc0/counter_reset
273
274 This resets the counters on memory controller 0
275
276
277Seconds since last counter reset control file:
278
279 'seconds_since_reset'
280
281 This attribute file displays how many seconds have elapsed since the
282 last counter reset. This can be used with the error counters to
283 measure error rates.
284
285
286
287DIMM capability attribute file:
288
289 'edac_capability'
290
291 The EDAC (Error Detection and Correction) capabilities/modes of
292 the memory controller hardware.
293
294
295DIMM Current Capability attribute file:
296
297 'edac_current_capability'
298
299 The EDAC capabilities available with the hardware
300 configuration. This may not be the same as "EDAC capability"
301 if the correct memory is not used. If a memory controller is
302 capable of EDAC, but DIMMs without check bits are in use, then
303 Parity, SECDED, S4ECD4ED capabilities will not be available
304 even though the memory controller might be capable of those
305 modes with the proper memory loaded.
306
307
308Memory Type supported on this controller attribute file:
309
310 'supported_mem_type'
311
312 This attribute file displays the memory type, usually
313 buffered and unbuffered DIMMs.
314
315
316Memory Controller name attribute file:
317
318 'mc_name'
319
320 This attribute file displays the type of memory controller
321 that is being utilized.
322
323
324Memory Controller Module name attribute file:
325
326 'module_name'
327
328 This attribute file displays the memory controller module name,
329 version and date built. The name of the memory controller
330 hardware - some drivers work with multiple controllers and
331 this field shows which hardware is present.
332
333
334Total memory managed by this memory controller attribute file:
335
336 'size_mb'
337
338 This attribute file displays, in count of megabytes, of memory
339 that this instance of memory controller manages.
340
341
342Total Uncorrectable Errors count attribute file:
343
344 'ue_count'
345
346 This attribute file displays the total count of uncorrectable
347 errors that have occurred on this memory controller. If panic_on_ue
348 is set this counter will not have a chance to increment,
349 since EDAC will panic the system.
350
351
352Total UE count that had no information attribute fileY:
353
354 'ue_noinfo_count'
355
356 This attribute file displays the number of UEs that
357 have occurred have occurred with no informations as to which DIMM
358 slot is having errors.
359
360
361Total Correctable Errors count attribute file:
362
363 'ce_count'
364
365 This attribute file displays the total count of correctable
366 errors that have occurred on this memory controller. This
367 count is very important to examine. CEs provide early
368 indications that a DIMM is beginning to fail. This count
369 field should be monitored for non-zero values and report
370 such information to the system administrator.
371
372
373Total Correctable Errors count attribute file:
374
375 'ce_noinfo_count'
376
377 This attribute file displays the number of CEs that
378 have occurred wherewith no informations as to which DIMM slot
379 is having errors. Memory is handicapped, but operational,
380 yet no information is available to indicate which slot
381 the failing memory is in. This count field should be also
382 be monitored for non-zero values.
383
384Device Symlink:
385
386 'device'
387
388 Symlink to the memory controller device
389
390
391
392============================================================================
393'csrowX' DIRECTORIES
394
395In the 'csrowX' directories are EDAC control and attribute files for
396this 'X" instance of csrow:
397
398
399Total Uncorrectable Errors count attribute file:
400
401 'ue_count'
402
403 This attribute file displays the total count of uncorrectable
404 errors that have occurred on this csrow. If panic_on_ue is set
405 this counter will not have a chance to increment, since EDAC
406 will panic the system.
407
408
409Total Correctable Errors count attribute file:
410
411 'ce_count'
412
413 This attribute file displays the total count of correctable
414 errors that have occurred on this csrow. This
415 count is very important to examine. CEs provide early
416 indications that a DIMM is beginning to fail. This count
417 field should be monitored for non-zero values and report
418 such information to the system administrator.
419
420
421Total memory managed by this csrow attribute file:
422
423 'size_mb'
424
425 This attribute file displays, in count of megabytes, of memory
426 that this csrow contatins.
427
428
429Memory Type attribute file:
430
431 'mem_type'
432
433 This attribute file will display what type of memory is currently
434 on this csrow. Normally, either buffered or unbuffered memory.
435
436
437EDAC Mode of operation attribute file:
438
439 'edac_mode'
440
441 This attribute file will display what type of Error detection
442 and correction is being utilized.
443
444
445Device type attribute file:
446
447 'dev_type'
448
449 This attribute file will display what type of DIMM device is
450 being utilized. Example: x4
451
452
453Channel 0 CE Count attribute file:
454
455 'ch0_ce_count'
456
457 This attribute file will display the count of CEs on this
458 DIMM located in channel 0.
459
460
461Channel 0 UE Count attribute file:
462
463 'ch0_ue_count'
464
465 This attribute file will display the count of UEs on this
466 DIMM located in channel 0.
467
468
469Channel 0 DIMM Label control file:
470
471 'ch0_dimm_label'
472
473 This control file allows this DIMM to have a label assigned
474 to it. With this label in the module, when errors occur
475 the output can provide the DIMM label in the system log.
476 This becomes vital for panic events to isolate the
477 cause of the UE event.
478
479 DIMM Labels must be assigned after booting, with information
480 that correctly identifies the physical slot with its
481 silk screen label. This information is currently very
482 motherboard specific and determination of this information
483 must occur in userland at this time.
484
485
486Channel 1 CE Count attribute file:
487
488 'ch1_ce_count'
489
490 This attribute file will display the count of CEs on this
491 DIMM located in channel 1.
492
493
494Channel 1 UE Count attribute file:
495
496 'ch1_ue_count'
497
498 This attribute file will display the count of UEs on this
499 DIMM located in channel 0.
500
501
502Channel 1 DIMM Label control file:
503
504 'ch1_dimm_label'
505
506 This control file allows this DIMM to have a label assigned
507 to it. With this label in the module, when errors occur
508 the output can provide the DIMM label in the system log.
509 This becomes vital for panic events to isolate the
510 cause of the UE event.
511
512 DIMM Labels must be assigned after booting, with information
513 that correctly identifies the physical slot with its
514 silk screen label. This information is currently very
515 motherboard specific and determination of this information
516 must occur in userland at this time.
517
518
519============================================================================
520SYSTEM LOGGING
521
522If logging for UEs and CEs are enabled then system logs will have
523error notices indicating errors that have been detected:
524
525MC0: CE page 0x283, offset 0xce0, grain 8, syndrome 0x6ec3, row 0,
526channel 1 "DIMM_B1": amd76x_edac
527
528MC0: CE page 0x1e5, offset 0xfb0, grain 8, syndrome 0xb741, row 0,
529channel 1 "DIMM_B1": amd76x_edac
530
531
532The structure of the message is:
533 the memory controller (MC0)
534 Error type (CE)
535 memory page (0x283)
536 offset in the page (0xce0)
537 the byte granularity (grain 8)
538 or resolution of the error
539 the error syndrome (0xb741)
540 memory row (row 0)
541 memory channel (channel 1)
542 DIMM label, if set prior (DIMM B1
543 and then an optional, driver-specific message that may
544 have additional information.
545
546Both UEs and CEs with no info will lack all but memory controller,
547error type, a notice of "no info" and then an optional,
548driver-specific error message.
549
550
551
552============================================================================
553PCI Bus Parity Detection
554
555
556On Header Type 00 devices the primary status is looked at
557for any parity error regardless of whether Parity is enabled on the
558device. (The spec indicates parity is generated in some cases).
559On Header Type 01 bridges, the secondary status register is also
560looked at to see if parity ocurred on the bus on the other side of
561the bridge.
562
563
564SYSFS CONFIGURATION
565
566Under /sys/devices/system/edac/pci are control and attribute files as follows:
567
568
569Enable/Disable PCI Parity checking control file:
570
571 'check_pci_parity'
572
573
574 This control file enables or disables the PCI Bus Parity scanning
575 operation. Writing a 1 to this file enables the scanning. Writing
576 a 0 to this file disables the scanning.
577
578 Enable:
579 echo "1" >/sys/devices/system/edac/pci/check_pci_parity
580
581 Disable:
582 echo "0" >/sys/devices/system/edac/pci/check_pci_parity
583
584
585
586Panic on PCI PARITY Error:
587
588 'panic_on_pci_parity'
589
590
591 This control files enables or disables panic'ing when a parity
592 error has been detected.
593
594
595 module/kernel parameter: panic_on_pci_parity=[0|1]
596
597 Enable:
598 echo "1" >/sys/devices/system/edac/pci/panic_on_pci_parity
599
600 Disable:
601 echo "0" >/sys/devices/system/edac/pci/panic_on_pci_parity
602
603
604Parity Count:
605
606 'pci_parity_count'
607
608 This attribute file will display the number of parity errors that
609 have been detected.
610
611
612
613PCI Device Whitelist:
614
615 'pci_parity_whitelist'
616
617 This control file allows for an explicit list of PCI devices to be
618 scanned for parity errors. Only devices found on this list will
619 be examined. The list is a line of hexadecimel VENDOR and DEVICE
620 ID tuples:
621
622 1022:7450,1434:16a6
623
624 One or more can be inserted, seperated by a comma.
625
626 To write the above list doing the following as one command line:
627
628 echo "1022:7450,1434:16a6"
629 > /sys/devices/system/edac/pci/pci_parity_whitelist
630
631
632
633 To display what the whitelist is, simply 'cat' the same file.
634
635
636PCI Device Blacklist:
637
638 'pci_parity_blacklist'
639
640 This control file allows for a list of PCI devices to be
641 skipped for scanning.
642 The list is a line of hexadecimel VENDOR and DEVICE ID tuples:
643
644 1022:7450,1434:16a6
645
646 One or more can be inserted, seperated by a comma.
647
648 To write the above list doing the following as one command line:
649
650 echo "1022:7450,1434:16a6"
651 > /sys/devices/system/edac/pci/pci_parity_blacklist
652
653
654 To display what the whitelist current contatins,
655 simply 'cat' the same file.
656
657=======================================================================
658
659PCI Vendor and Devices IDs can be obtained with the lspci command. Using
660the -n option lspci will display the vendor and device IDs. The system
661adminstrator will have to determine which devices should be scanned or
662skipped.
663
664
665
666The two lists (white and black) are prioritized. blacklist is the lower
667priority and will NOT be utilized when a whitelist has been set.
668Turn OFF a whitelist by an empty echo command:
669
670 echo > /sys/devices/system/edac/pci/pci_parity_whitelist
671
672and any previous blacklist will be utililzed.
673
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 6910c0136f8..391dd64363e 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -27,6 +27,7 @@ Currently, these files are in /proc/sys/vm:
27- laptop_mode 27- laptop_mode
28- block_dump 28- block_dump
29- drop-caches 29- drop-caches
30- zone_reclaim_mode
30 31
31============================================================== 32==============================================================
32 33
@@ -120,3 +121,20 @@ set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8)
120 121
121The initial value is zero. Kernel does not use this value at boot time to set 122The initial value is zero. Kernel does not use this value at boot time to set
122the high water marks for each per cpu page list. 123the high water marks for each per cpu page list.
124
125===============================================================
126
127zone_reclaim_mode:
128
129This is set during bootup to 1 if it is determined that pages from
130remote zones will cause a significant performance reduction. The
131page allocator will then reclaim easily reusable pages (those page
132cache pages that are currently not used) before going off node.
133
134The user can override this setting. It may be beneficial to switch
135off zone reclaim if the system is used for a file server and all
136of memory should be used for caching files from disk.
137
138It may be beneficial to switch this on if one wants to do zone
139reclaim regardless of the numa distances in the system.
140
diff --git a/MAINTAINERS b/MAINTAINERS
index ff16eac8cf5..3f8a90ac47d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -867,6 +867,15 @@ L: ebtables-devel@lists.sourceforge.net
867W: http://ebtables.sourceforge.net/ 867W: http://ebtables.sourceforge.net/
868S: Maintained 868S: Maintained
869 869
870EDAC-CORE
871P: Doug Thompson
872M: norsk5@xmission.com, dthompson@linuxnetworx.com
873P: Dave Peterson
874M: dsp@llnl.gov, dave_peterson@pobox.com
875L: bluesmoke-devel@lists.sourceforge.net
876W: bluesmoke.sourceforge.net
877S: Maintained
878
870EEPRO100 NETWORK DRIVER 879EEPRO100 NETWORK DRIVER
871P: Andrey V. Savochkin 880P: Andrey V. Savochkin
872M: saw@saw.sw.com.sg 881M: saw@saw.sw.com.sg
@@ -1398,7 +1407,7 @@ IRDA SUBSYSTEM
1398P: Jean Tourrilhes 1407P: Jean Tourrilhes
1399L: irda-users@lists.sourceforge.net (subscribers-only) 1408L: irda-users@lists.sourceforge.net (subscribers-only)
1400W: http://irda.sourceforge.net/ 1409W: http://irda.sourceforge.net/
1401S: Maintained 1410S: Odd Fixes
1402 1411
1403ISAPNP 1412ISAPNP
1404P: Jaroslav Kysela 1413P: Jaroslav Kysela
@@ -1843,7 +1852,14 @@ M: yoshfuji@linux-ipv6.org
1843P: Patrick McHardy 1852P: Patrick McHardy
1844M: kaber@coreworks.de 1853M: kaber@coreworks.de
1845L: netdev@vger.kernel.org 1854L: netdev@vger.kernel.org
1846T: git kernel.org:/pub/scm/linux/kernel/davem/net-2.6.git 1855T: git kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6.git
1856S: Maintained
1857
1858NETWORKING [WIRELESS]
1859P: John W. Linville
1860M: linville@tuxdriver.com
1861L: netdev@vger.kernel.org
1862T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git
1847S: Maintained 1863S: Maintained
1848 1864
1849IPVS 1865IPVS
@@ -2536,11 +2552,11 @@ S: Maintained
2536 2552
2537TIPC NETWORK LAYER 2553TIPC NETWORK LAYER
2538P: Per Liden 2554P: Per Liden
2539M: per.liden@nospam.ericsson.com 2555M: per.liden@ericsson.com
2540P: Jon Maloy 2556P: Jon Maloy
2541M: jon.maloy@nospam.ericsson.com 2557M: jon.maloy@ericsson.com
2542P: Allan Stephens 2558P: Allan Stephens
2543M: allan.stephens@nospam.windriver.com 2559M: allan.stephens@windriver.com
2544L: tipc-discussion@lists.sourceforge.net 2560L: tipc-discussion@lists.sourceforge.net
2545W: http://tipc.sourceforge.net/ 2561W: http://tipc.sourceforge.net/
2546W: http://tipc.cslab.ericsson.net/ 2562W: http://tipc.cslab.ericsson.net/
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 01fe990d3e5..7fb14f42a12 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -960,7 +960,7 @@ osf_utimes(char __user *filename, struct timeval32 __user *tvs)
960 return -EFAULT; 960 return -EFAULT;
961 } 961 }
962 962
963 return do_utimes(filename, tvs ? ktvs : NULL); 963 return do_utimes(AT_FDCWD, filename, tvs ? ktvs : NULL);
964} 964}
965 965
966#define MAX_SELECT_SECONDS \ 966#define MAX_SELECT_SECONDS \
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index aaa47400eb9..db3389d8e02 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -334,7 +334,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
334 mov r1, #0x12 334 mov r1, #0x12
335 orr r1, r1, #3 << 10 335 orr r1, r1, #3 << 10
336 add r2, r3, #16384 336 add r2, r3, #16384
3371: cmp r1, r8 @ if virt > start of RAM 3371: cmp r1, r9 @ if virt > start of RAM
338 orrhs r1, r1, #0x0c @ set cacheable, bufferable 338 orrhs r1, r1, #0x0c @ set cacheable, bufferable
339 cmp r1, r10 @ if virt > end of RAM 339 cmp r1, r10 @ if virt > end of RAM
340 bichs r1, r1, #0x0c @ clear cacheable, bufferable 340 bichs r1, r1, #0x0c @ clear cacheable, bufferable
diff --git a/arch/arm/configs/ep80219_defconfig b/arch/arm/configs/ep80219_defconfig
index fbe312e757c..3c73b707c2f 100644
--- a/arch/arm/configs/ep80219_defconfig
+++ b/arch/arm/configs/ep80219_defconfig
@@ -522,6 +522,7 @@ CONFIG_E100=y
522# CONFIG_DL2K is not set 522# CONFIG_DL2K is not set
523CONFIG_E1000=y 523CONFIG_E1000=y
524CONFIG_E1000_NAPI=y 524CONFIG_E1000_NAPI=y
525# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
525# CONFIG_NS83820 is not set 526# CONFIG_NS83820 is not set
526# CONFIG_HAMACHI is not set 527# CONFIG_HAMACHI is not set
527# CONFIG_YELLOWFIN is not set 528# CONFIG_YELLOWFIN is not set
diff --git a/arch/arm/configs/iq31244_defconfig b/arch/arm/configs/iq31244_defconfig
index c07628ceaf0..32467160a6d 100644
--- a/arch/arm/configs/iq31244_defconfig
+++ b/arch/arm/configs/iq31244_defconfig
@@ -493,6 +493,7 @@ CONFIG_NETDEVICES=y
493# CONFIG_DL2K is not set 493# CONFIG_DL2K is not set
494CONFIG_E1000=y 494CONFIG_E1000=y
495CONFIG_E1000_NAPI=y 495CONFIG_E1000_NAPI=y
496# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
496# CONFIG_NS83820 is not set 497# CONFIG_NS83820 is not set
497# CONFIG_HAMACHI is not set 498# CONFIG_HAMACHI is not set
498# CONFIG_YELLOWFIN is not set 499# CONFIG_YELLOWFIN is not set
diff --git a/arch/arm/configs/iq80321_defconfig b/arch/arm/configs/iq80321_defconfig
index 18fa1615fdf..b000da753c4 100644
--- a/arch/arm/configs/iq80321_defconfig
+++ b/arch/arm/configs/iq80321_defconfig
@@ -415,6 +415,7 @@ CONFIG_NETDEVICES=y
415# CONFIG_DL2K is not set 415# CONFIG_DL2K is not set
416CONFIG_E1000=y 416CONFIG_E1000=y
417CONFIG_E1000_NAPI=y 417CONFIG_E1000_NAPI=y
418# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
418# CONFIG_NS83820 is not set 419# CONFIG_NS83820 is not set
419# CONFIG_HAMACHI is not set 420# CONFIG_HAMACHI is not set
420# CONFIG_YELLOWFIN is not set 421# CONFIG_YELLOWFIN is not set
diff --git a/arch/arm/configs/iq80331_defconfig b/arch/arm/configs/iq80331_defconfig
index f50035de1ff..46c79e1efe0 100644
--- a/arch/arm/configs/iq80331_defconfig
+++ b/arch/arm/configs/iq80331_defconfig
@@ -496,6 +496,7 @@ CONFIG_NETDEVICES=y
496# CONFIG_DL2K is not set 496# CONFIG_DL2K is not set
497CONFIG_E1000=y 497CONFIG_E1000=y
498CONFIG_E1000_NAPI=y 498CONFIG_E1000_NAPI=y
499# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
499# CONFIG_NS83820 is not set 500# CONFIG_NS83820 is not set
500# CONFIG_HAMACHI is not set 501# CONFIG_HAMACHI is not set
501# CONFIG_YELLOWFIN is not set 502# CONFIG_YELLOWFIN is not set
diff --git a/arch/arm/configs/iq80332_defconfig b/arch/arm/configs/iq80332_defconfig
index 18b3f372ed6..11959b705d8 100644
--- a/arch/arm/configs/iq80332_defconfig
+++ b/arch/arm/configs/iq80332_defconfig
@@ -496,6 +496,7 @@ CONFIG_NETDEVICES=y
496# CONFIG_DL2K is not set 496# CONFIG_DL2K is not set
497CONFIG_E1000=y 497CONFIG_E1000=y
498CONFIG_E1000_NAPI=y 498CONFIG_E1000_NAPI=y
499# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
499# CONFIG_NS83820 is not set 500# CONFIG_NS83820 is not set
500# CONFIG_HAMACHI is not set 501# CONFIG_HAMACHI is not set
501# CONFIG_YELLOWFIN is not set 502# CONFIG_YELLOWFIN is not set
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 874e6bb7940..d401d908c46 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -735,8 +735,11 @@ __kuser_cmpxchg: @ 0xffff0fc0
735 * The kernel itself must perform the operation. 735 * The kernel itself must perform the operation.
736 * A special ghost syscall is used for that (see traps.c). 736 * A special ghost syscall is used for that (see traps.c).
737 */ 737 */
738 stmfd sp!, {r7, lr}
739 mov r7, #0xff00 @ 0xfff0 into r7 for EABI
740 orr r7, r7, #0xf0
738 swi #0x9ffff0 741 swi #0x9ffff0
739 mov pc, lr 742 ldmfd sp!, {r7, pc}
740 743
741#elif __LINUX_ARM_ARCH__ < 6 744#elif __LINUX_ARM_ARCH__ < 6
742 745
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 765922bcf9e..a0cd0a90a10 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -30,15 +30,21 @@
30#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)) 30#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn))
31 31
32/* 32/*
33 * With EABI, the syscall number has to be loaded into r7.
34 */
35#define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
36#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
37
38/*
33 * For Thumb syscalls, we pass the syscall number via r7. We therefore 39 * For Thumb syscalls, we pass the syscall number via r7. We therefore
34 * need two 16-bit instructions. 40 * need two 16-bit instructions.
35 */ 41 */
36#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) 42#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
37#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) 43#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
38 44
39const unsigned long sigreturn_codes[4] = { 45const unsigned long sigreturn_codes[7] = {
40 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 46 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
41 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 47 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
42}; 48};
43 49
44static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall); 50static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
@@ -189,7 +195,7 @@ struct aux_sigframe {
189struct sigframe { 195struct sigframe {
190 struct sigcontext sc; 196 struct sigcontext sc;
191 unsigned long extramask[_NSIG_WORDS-1]; 197 unsigned long extramask[_NSIG_WORDS-1];
192 unsigned long retcode; 198 unsigned long retcode[2];
193 struct aux_sigframe aux __attribute__((aligned(8))); 199 struct aux_sigframe aux __attribute__((aligned(8)));
194}; 200};
195 201
@@ -198,7 +204,7 @@ struct rt_sigframe {
198 void __user *puc; 204 void __user *puc;
199 struct siginfo info; 205 struct siginfo info;
200 struct ucontext uc; 206 struct ucontext uc;
201 unsigned long retcode; 207 unsigned long retcode[2];
202 struct aux_sigframe aux __attribute__((aligned(8))); 208 struct aux_sigframe aux __attribute__((aligned(8)));
203}; 209};
204 210
@@ -436,12 +442,13 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
436 if (ka->sa.sa_flags & SA_RESTORER) { 442 if (ka->sa.sa_flags & SA_RESTORER) {
437 retcode = (unsigned long)ka->sa.sa_restorer; 443 retcode = (unsigned long)ka->sa.sa_restorer;
438 } else { 444 } else {
439 unsigned int idx = thumb; 445 unsigned int idx = thumb << 1;
440 446
441 if (ka->sa.sa_flags & SA_SIGINFO) 447 if (ka->sa.sa_flags & SA_SIGINFO)
442 idx += 2; 448 idx += 3;
443 449
444 if (__put_user(sigreturn_codes[idx], rc)) 450 if (__put_user(sigreturn_codes[idx], rc) ||
451 __put_user(sigreturn_codes[idx+1], rc+1))
445 return 1; 452 return 1;
446 453
447 if (cpsr & MODE32_BIT) { 454 if (cpsr & MODE32_BIT) {
@@ -456,7 +463,7 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
456 * the return code written onto the stack. 463 * the return code written onto the stack.
457 */ 464 */
458 flush_icache_range((unsigned long)rc, 465 flush_icache_range((unsigned long)rc,
459 (unsigned long)(rc + 1)); 466 (unsigned long)(rc + 2));
460 467
461 retcode = ((unsigned long)rc) + thumb; 468 retcode = ((unsigned long)rc) + thumb;
462 } 469 }
@@ -488,7 +495,7 @@ setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *reg
488 } 495 }
489 496
490 if (err == 0) 497 if (err == 0)
491 err = setup_return(regs, ka, &frame->retcode, frame, usig); 498 err = setup_return(regs, ka, frame->retcode, frame, usig);
492 499
493 return err; 500 return err;
494} 501}
@@ -522,7 +529,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
522 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 529 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
523 530
524 if (err == 0) 531 if (err == 0)
525 err = setup_return(regs, ka, &frame->retcode, frame, usig); 532 err = setup_return(regs, ka, frame->retcode, frame, usig);
526 533
527 if (err == 0) { 534 if (err == 0) {
528 /* 535 /*
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 91d26faca62..9991049c522 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -9,4 +9,4 @@
9 */ 9 */
10#define KERN_SIGRETURN_CODE 0xffff0500 10#define KERN_SIGRETURN_CODE 0xffff0500
11 11
12extern const unsigned long sigreturn_codes[4]; 12extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 6b393691d0e..4bdc9d4526c 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -333,6 +333,7 @@ static struct platform_device *ixp46x_devices[] __initdata = {
333}; 333};
334 334
335unsigned long ixp4xx_exp_bus_size; 335unsigned long ixp4xx_exp_bus_size;
336EXPORT_SYMBOL(ixp4xx_exp_bus_size);
336 337
337void __init ixp4xx_sys_init(void) 338void __init ixp4xx_sys_init(void)
338{ 339{
@@ -352,7 +353,7 @@ void __init ixp4xx_sys_init(void)
352 } 353 }
353 } 354 }
354 355
355 printk("IXP4xx: Using %uMiB expansion bus window size\n", 356 printk("IXP4xx: Using %luMiB expansion bus window size\n",
356 ixp4xx_exp_bus_size >> 20); 357 ixp4xx_exp_bus_size >> 20);
357} 358}
358 359
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index 9d862f86bba..75110ba1042 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -50,10 +50,10 @@ static int omap1_clk_enable_dsp_domain(struct clk *clk)
50{ 50{
51 int retval; 51 int retval;
52 52
53 retval = omap1_clk_use(&api_ck.clk); 53 retval = omap1_clk_enable(&api_ck.clk);
54 if (!retval) { 54 if (!retval) {
55 retval = omap1_clk_enable(clk); 55 retval = omap1_clk_enable_generic(clk);
56 omap1_clk_unuse(&api_ck.clk); 56 omap1_clk_disable(&api_ck.clk);
57 } 57 }
58 58
59 return retval; 59 return retval;
@@ -61,9 +61,9 @@ static int omap1_clk_enable_dsp_domain(struct clk *clk)
61 61
62static void omap1_clk_disable_dsp_domain(struct clk *clk) 62static void omap1_clk_disable_dsp_domain(struct clk *clk)
63{ 63{
64 if (omap1_clk_use(&api_ck.clk) == 0) { 64 if (omap1_clk_enable(&api_ck.clk) == 0) {
65 omap1_clk_disable(clk); 65 omap1_clk_disable_generic(clk);
66 omap1_clk_unuse(&api_ck.clk); 66 omap1_clk_disable(&api_ck.clk);
67 } 67 }
68} 68}
69 69
@@ -72,7 +72,7 @@ static int omap1_clk_enable_uart_functional(struct clk *clk)
72 int ret; 72 int ret;
73 struct uart_clk *uclk; 73 struct uart_clk *uclk;
74 74
75 ret = omap1_clk_enable(clk); 75 ret = omap1_clk_enable_generic(clk);
76 if (ret == 0) { 76 if (ret == 0) {
77 /* Set smart idle acknowledgement mode */ 77 /* Set smart idle acknowledgement mode */
78 uclk = (struct uart_clk *)clk; 78 uclk = (struct uart_clk *)clk;
@@ -91,7 +91,7 @@ static void omap1_clk_disable_uart_functional(struct clk *clk)
91 uclk = (struct uart_clk *)clk; 91 uclk = (struct uart_clk *)clk;
92 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr); 92 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
93 93
94 omap1_clk_disable(clk); 94 omap1_clk_disable_generic(clk);
95} 95}
96 96
97static void omap1_clk_allow_idle(struct clk *clk) 97static void omap1_clk_allow_idle(struct clk *clk)
@@ -230,9 +230,9 @@ static void omap1_ckctl_recalc_dsp_domain(struct clk * clk)
230 * Note that DSP_CKCTL virt addr = phys addr, so 230 * Note that DSP_CKCTL virt addr = phys addr, so
231 * we must use __raw_readw() instead of omap_readw(). 231 * we must use __raw_readw() instead of omap_readw().
232 */ 232 */
233 omap1_clk_use(&api_ck.clk); 233 omap1_clk_enable(&api_ck.clk);
234 dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset)); 234 dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
235 omap1_clk_unuse(&api_ck.clk); 235 omap1_clk_disable(&api_ck.clk);
236 236
237 if (unlikely(clk->rate == clk->parent->rate / dsor)) 237 if (unlikely(clk->rate == clk->parent->rate / dsor))
238 return; /* No change, quick exit */ 238 return; /* No change, quick exit */
@@ -412,12 +412,12 @@ static void omap1_init_ext_clk(struct clk * clk)
412 clk-> rate = 96000000 / dsor; 412 clk-> rate = 96000000 / dsor;
413} 413}
414 414
415static int omap1_clk_use(struct clk *clk) 415static int omap1_clk_enable(struct clk *clk)
416{ 416{
417 int ret = 0; 417 int ret = 0;
418 if (clk->usecount++ == 0) { 418 if (clk->usecount++ == 0) {
419 if (likely(clk->parent)) { 419 if (likely(clk->parent)) {
420 ret = omap1_clk_use(clk->parent); 420 ret = omap1_clk_enable(clk->parent);
421 421
422 if (unlikely(ret != 0)) { 422 if (unlikely(ret != 0)) {
423 clk->usecount--; 423 clk->usecount--;
@@ -432,7 +432,7 @@ static int omap1_clk_use(struct clk *clk)
432 ret = clk->enable(clk); 432 ret = clk->enable(clk);
433 433
434 if (unlikely(ret != 0) && clk->parent) { 434 if (unlikely(ret != 0) && clk->parent) {
435 omap1_clk_unuse(clk->parent); 435 omap1_clk_disable(clk->parent);
436 clk->usecount--; 436 clk->usecount--;
437 } 437 }
438 } 438 }
@@ -440,12 +440,12 @@ static int omap1_clk_use(struct clk *clk)
440 return ret; 440 return ret;
441} 441}
442 442
443static void omap1_clk_unuse(struct clk *clk) 443static void omap1_clk_disable(struct clk *clk)
444{ 444{
445 if (clk->usecount > 0 && !(--clk->usecount)) { 445 if (clk->usecount > 0 && !(--clk->usecount)) {
446 clk->disable(clk); 446 clk->disable(clk);
447 if (likely(clk->parent)) { 447 if (likely(clk->parent)) {
448 omap1_clk_unuse(clk->parent); 448 omap1_clk_disable(clk->parent);
449 if (clk->flags & CLOCK_NO_IDLE_PARENT) 449 if (clk->flags & CLOCK_NO_IDLE_PARENT)
450 if (!cpu_is_omap24xx()) 450 if (!cpu_is_omap24xx())
451 omap1_clk_allow_idle(clk->parent); 451 omap1_clk_allow_idle(clk->parent);
@@ -453,7 +453,7 @@ static void omap1_clk_unuse(struct clk *clk)
453 } 453 }
454} 454}
455 455
456static int omap1_clk_enable(struct clk *clk) 456static int omap1_clk_enable_generic(struct clk *clk)
457{ 457{
458 __u16 regval16; 458 __u16 regval16;
459 __u32 regval32; 459 __u32 regval32;
@@ -492,7 +492,7 @@ static int omap1_clk_enable(struct clk *clk)
492 return 0; 492 return 0;
493} 493}
494 494
495static void omap1_clk_disable(struct clk *clk) 495static void omap1_clk_disable_generic(struct clk *clk)
496{ 496{
497 __u16 regval16; 497 __u16 regval16;
498 __u32 regval32; 498 __u32 regval32;
@@ -654,8 +654,8 @@ late_initcall(omap1_late_clk_reset);
654#endif 654#endif
655 655
656static struct clk_functions omap1_clk_functions = { 656static struct clk_functions omap1_clk_functions = {
657 .clk_use = omap1_clk_use, 657 .clk_enable = omap1_clk_enable,
658 .clk_unuse = omap1_clk_unuse, 658 .clk_disable = omap1_clk_disable,
659 .clk_round_rate = omap1_clk_round_rate, 659 .clk_round_rate = omap1_clk_round_rate,
660 .clk_set_rate = omap1_clk_set_rate, 660 .clk_set_rate = omap1_clk_set_rate,
661}; 661};
@@ -780,9 +780,9 @@ int __init omap1_clk_init(void)
780 * Only enable those clocks we will need, let the drivers 780 * Only enable those clocks we will need, let the drivers
781 * enable other clocks as necessary 781 * enable other clocks as necessary
782 */ 782 */
783 clk_use(&armper_ck.clk); 783 clk_enable(&armper_ck.clk);
784 clk_use(&armxor_ck.clk); 784 clk_enable(&armxor_ck.clk);
785 clk_use(&armtim_ck.clk); /* This should be done by timer code */ 785 clk_enable(&armtim_ck.clk); /* This should be done by timer code */
786 786
787 if (cpu_is_omap1510()) 787 if (cpu_is_omap1510())
788 clk_enable(&arm_gpio_ck); 788 clk_enable(&arm_gpio_ck);
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index f3bdfb50e01..4f18d1b9444 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -13,8 +13,8 @@
13#ifndef __ARCH_ARM_MACH_OMAP1_CLOCK_H 13#ifndef __ARCH_ARM_MACH_OMAP1_CLOCK_H
14#define __ARCH_ARM_MACH_OMAP1_CLOCK_H 14#define __ARCH_ARM_MACH_OMAP1_CLOCK_H
15 15
16static int omap1_clk_enable(struct clk * clk); 16static int omap1_clk_enable_generic(struct clk * clk);
17static void omap1_clk_disable(struct clk * clk); 17static void omap1_clk_disable_generic(struct clk * clk);
18static void omap1_ckctl_recalc(struct clk * clk); 18static void omap1_ckctl_recalc(struct clk * clk);
19static void omap1_watchdog_recalc(struct clk * clk); 19static void omap1_watchdog_recalc(struct clk * clk);
20static void omap1_ckctl_recalc_dsp_domain(struct clk * clk); 20static void omap1_ckctl_recalc_dsp_domain(struct clk * clk);
@@ -30,8 +30,8 @@ static long omap1_round_ext_clk_rate(struct clk * clk, unsigned long rate);
30static void omap1_init_ext_clk(struct clk * clk); 30static void omap1_init_ext_clk(struct clk * clk);
31static int omap1_select_table_rate(struct clk * clk, unsigned long rate); 31static int omap1_select_table_rate(struct clk * clk, unsigned long rate);
32static long omap1_round_to_table_rate(struct clk * clk, unsigned long rate); 32static long omap1_round_to_table_rate(struct clk * clk, unsigned long rate);
33static int omap1_clk_use(struct clk *clk); 33static int omap1_clk_enable(struct clk *clk);
34static void omap1_clk_unuse(struct clk *clk); 34static void omap1_clk_disable(struct clk *clk);
35 35
36struct mpu_rate { 36struct mpu_rate {
37 unsigned long rate; 37 unsigned long rate;
@@ -152,8 +152,8 @@ static struct clk ck_ref = {
152 .rate = 12000000, 152 .rate = 12000000,
153 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 153 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
154 ALWAYS_ENABLED, 154 ALWAYS_ENABLED,
155 .enable = &omap1_clk_enable, 155 .enable = &omap1_clk_enable_generic,
156 .disable = &omap1_clk_disable, 156 .disable = &omap1_clk_disable_generic,
157}; 157};
158 158
159static struct clk ck_dpll1 = { 159static struct clk ck_dpll1 = {
@@ -161,8 +161,8 @@ static struct clk ck_dpll1 = {
161 .parent = &ck_ref, 161 .parent = &ck_ref,
162 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 162 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
163 RATE_PROPAGATES | ALWAYS_ENABLED, 163 RATE_PROPAGATES | ALWAYS_ENABLED,
164 .enable = &omap1_clk_enable, 164 .enable = &omap1_clk_enable_generic,
165 .disable = &omap1_clk_disable, 165 .disable = &omap1_clk_disable_generic,
166}; 166};
167 167
168static struct arm_idlect1_clk ck_dpll1out = { 168static struct arm_idlect1_clk ck_dpll1out = {
@@ -173,8 +173,8 @@ static struct arm_idlect1_clk ck_dpll1out = {
173 .enable_reg = (void __iomem *)ARM_IDLECT2, 173 .enable_reg = (void __iomem *)ARM_IDLECT2,
174 .enable_bit = EN_CKOUT_ARM, 174 .enable_bit = EN_CKOUT_ARM,
175 .recalc = &followparent_recalc, 175 .recalc = &followparent_recalc,
176 .enable = &omap1_clk_enable, 176 .enable = &omap1_clk_enable_generic,
177 .disable = &omap1_clk_disable, 177 .disable = &omap1_clk_disable_generic,
178 }, 178 },
179 .idlect_shift = 12, 179 .idlect_shift = 12,
180}; 180};
@@ -186,8 +186,8 @@ static struct clk arm_ck = {
186 RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED, 186 RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED,
187 .rate_offset = CKCTL_ARMDIV_OFFSET, 187 .rate_offset = CKCTL_ARMDIV_OFFSET,
188 .recalc = &omap1_ckctl_recalc, 188 .recalc = &omap1_ckctl_recalc,
189 .enable = &omap1_clk_enable, 189 .enable = &omap1_clk_enable_generic,
190 .disable = &omap1_clk_disable, 190 .disable = &omap1_clk_disable_generic,
191}; 191};
192 192
193static struct arm_idlect1_clk armper_ck = { 193static struct arm_idlect1_clk armper_ck = {
@@ -200,8 +200,8 @@ static struct arm_idlect1_clk armper_ck = {
200 .enable_bit = EN_PERCK, 200 .enable_bit = EN_PERCK,
201 .rate_offset = CKCTL_PERDIV_OFFSET, 201 .rate_offset = CKCTL_PERDIV_OFFSET,
202 .recalc = &omap1_ckctl_recalc, 202 .recalc = &omap1_ckctl_recalc,
203 .enable = &omap1_clk_enable, 203 .enable = &omap1_clk_enable_generic,
204 .disable = &omap1_clk_disable, 204 .disable = &omap1_clk_disable_generic,
205 }, 205 },
206 .idlect_shift = 2, 206 .idlect_shift = 2,
207}; 207};
@@ -213,8 +213,8 @@ static struct clk arm_gpio_ck = {
213 .enable_reg = (void __iomem *)ARM_IDLECT2, 213 .enable_reg = (void __iomem *)ARM_IDLECT2,
214 .enable_bit = EN_GPIOCK, 214 .enable_bit = EN_GPIOCK,
215 .recalc = &followparent_recalc, 215 .recalc = &followparent_recalc,
216 .enable = &omap1_clk_enable, 216 .enable = &omap1_clk_enable_generic,
217 .disable = &omap1_clk_disable, 217 .disable = &omap1_clk_disable_generic,
218}; 218};
219 219
220static struct arm_idlect1_clk armxor_ck = { 220static struct arm_idlect1_clk armxor_ck = {
@@ -226,8 +226,8 @@ static struct arm_idlect1_clk armxor_ck = {
226 .enable_reg = (void __iomem *)ARM_IDLECT2, 226 .enable_reg = (void __iomem *)ARM_IDLECT2,
227 .enable_bit = EN_XORPCK, 227 .enable_bit = EN_XORPCK,
228 .recalc = &followparent_recalc, 228 .recalc = &followparent_recalc,
229 .enable = &omap1_clk_enable, 229 .enable = &omap1_clk_enable_generic,
230 .disable = &omap1_clk_disable, 230 .disable = &omap1_clk_disable_generic,
231 }, 231 },
232 .idlect_shift = 1, 232 .idlect_shift = 1,
233}; 233};
@@ -241,8 +241,8 @@ static struct arm_idlect1_clk armtim_ck = {
241 .enable_reg = (void __iomem *)ARM_IDLECT2, 241 .enable_reg = (void __iomem *)ARM_IDLECT2,
242 .enable_bit = EN_TIMCK, 242 .enable_bit = EN_TIMCK,
243 .recalc = &followparent_recalc, 243 .recalc = &followparent_recalc,
244 .enable = &omap1_clk_enable, 244 .enable = &omap1_clk_enable_generic,
245 .disable = &omap1_clk_disable, 245 .disable = &omap1_clk_disable_generic,
246 }, 246 },
247 .idlect_shift = 9, 247 .idlect_shift = 9,
248}; 248};
@@ -256,8 +256,8 @@ static struct arm_idlect1_clk armwdt_ck = {
256 .enable_reg = (void __iomem *)ARM_IDLECT2, 256 .enable_reg = (void __iomem *)ARM_IDLECT2,
257 .enable_bit = EN_WDTCK, 257 .enable_bit = EN_WDTCK,
258 .recalc = &omap1_watchdog_recalc, 258 .recalc = &omap1_watchdog_recalc,
259 .enable = &omap1_clk_enable, 259 .enable = &omap1_clk_enable_generic,
260 .disable = &omap1_clk_disable, 260 .disable = &omap1_clk_disable_generic,
261 }, 261 },
262 .idlect_shift = 0, 262 .idlect_shift = 0,
263}; 263};
@@ -272,8 +272,8 @@ static struct clk arminth_ck16xx = {
272 * 272 *
273 * 1510 version is in TC clocks. 273 * 1510 version is in TC clocks.
274 */ 274 */
275 .enable = &omap1_clk_enable, 275 .enable = &omap1_clk_enable_generic,
276 .disable = &omap1_clk_disable, 276 .disable = &omap1_clk_disable_generic,
277}; 277};
278 278
279static struct clk dsp_ck = { 279static struct clk dsp_ck = {
@@ -285,8 +285,8 @@ static struct clk dsp_ck = {
285 .enable_bit = EN_DSPCK, 285 .enable_bit = EN_DSPCK,
286 .rate_offset = CKCTL_DSPDIV_OFFSET, 286 .rate_offset = CKCTL_DSPDIV_OFFSET,
287 .recalc = &omap1_ckctl_recalc, 287 .recalc = &omap1_ckctl_recalc,
288 .enable = &omap1_clk_enable, 288 .enable = &omap1_clk_enable_generic,
289 .disable = &omap1_clk_disable, 289 .disable = &omap1_clk_disable_generic,
290}; 290};
291 291
292static struct clk dspmmu_ck = { 292static struct clk dspmmu_ck = {
@@ -296,8 +296,8 @@ static struct clk dspmmu_ck = {
296 RATE_CKCTL | ALWAYS_ENABLED, 296 RATE_CKCTL | ALWAYS_ENABLED,
297 .rate_offset = CKCTL_DSPMMUDIV_OFFSET, 297 .rate_offset = CKCTL_DSPMMUDIV_OFFSET,
298 .recalc = &omap1_ckctl_recalc, 298 .recalc = &omap1_ckctl_recalc,
299 .enable = &omap1_clk_enable, 299 .enable = &omap1_clk_enable_generic,
300 .disable = &omap1_clk_disable, 300 .disable = &omap1_clk_disable_generic,
301}; 301};
302 302
303static struct clk dspper_ck = { 303static struct clk dspper_ck = {
@@ -349,8 +349,8 @@ static struct arm_idlect1_clk tc_ck = {
349 CLOCK_IDLE_CONTROL, 349 CLOCK_IDLE_CONTROL,
350 .rate_offset = CKCTL_TCDIV_OFFSET, 350 .rate_offset = CKCTL_TCDIV_OFFSET,
351 .recalc = &omap1_ckctl_recalc, 351 .recalc = &omap1_ckctl_recalc,
352 .enable = &omap1_clk_enable, 352 .enable = &omap1_clk_enable_generic,
353 .disable = &omap1_clk_disable, 353 .disable = &omap1_clk_disable_generic,
354 }, 354 },
355 .idlect_shift = 6, 355 .idlect_shift = 6,
356}; 356};
@@ -364,8 +364,8 @@ static struct clk arminth_ck1510 = {
364 * 364 *
365 * 16xx version is in MPU clocks. 365 * 16xx version is in MPU clocks.
366 */ 366 */
367 .enable = &omap1_clk_enable, 367 .enable = &omap1_clk_enable_generic,
368 .disable = &omap1_clk_disable, 368 .disable = &omap1_clk_disable_generic,
369}; 369};
370 370
371static struct clk tipb_ck = { 371static struct clk tipb_ck = {
@@ -374,8 +374,8 @@ static struct clk tipb_ck = {
374 .parent = &tc_ck.clk, 374 .parent = &tc_ck.clk,
375 .flags = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED, 375 .flags = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
376 .recalc = &followparent_recalc, 376 .recalc = &followparent_recalc,
377 .enable = &omap1_clk_enable, 377 .enable = &omap1_clk_enable_generic,
378 .disable = &omap1_clk_disable, 378 .disable = &omap1_clk_disable_generic,
379}; 379};
380 380
381static struct clk l3_ocpi_ck = { 381static struct clk l3_ocpi_ck = {
@@ -386,8 +386,8 @@ static struct clk l3_ocpi_ck = {
386 .enable_reg = (void __iomem *)ARM_IDLECT3, 386 .enable_reg = (void __iomem *)ARM_IDLECT3,
387 .enable_bit = EN_OCPI_CK, 387 .enable_bit = EN_OCPI_CK,
388 .recalc = &followparent_recalc, 388 .recalc = &followparent_recalc,
389 .enable = &omap1_clk_enable, 389 .enable = &omap1_clk_enable_generic,
390 .disable = &omap1_clk_disable, 390 .disable = &omap1_clk_disable_generic,
391}; 391};
392 392
393static struct clk tc1_ck = { 393static struct clk tc1_ck = {
@@ -397,8 +397,8 @@ static struct clk tc1_ck = {
397 .enable_reg = (void __iomem *)ARM_IDLECT3, 397 .enable_reg = (void __iomem *)ARM_IDLECT3,
398 .enable_bit = EN_TC1_CK, 398 .enable_bit = EN_TC1_CK,
399 .recalc = &followparent_recalc, 399 .recalc = &followparent_recalc,
400 .enable = &omap1_clk_enable, 400 .enable = &omap1_clk_enable_generic,
401 .disable = &omap1_clk_disable, 401 .disable = &omap1_clk_disable_generic,
402}; 402};
403 403
404static struct clk tc2_ck = { 404static struct clk tc2_ck = {
@@ -408,8 +408,8 @@ static struct clk tc2_ck = {
408 .enable_reg = (void __iomem *)ARM_IDLECT3, 408 .enable_reg = (void __iomem *)ARM_IDLECT3,
409 .enable_bit = EN_TC2_CK, 409 .enable_bit = EN_TC2_CK,
410 .recalc = &followparent_recalc, 410 .recalc = &followparent_recalc,
411 .enable = &omap1_clk_enable, 411 .enable = &omap1_clk_enable_generic,
412 .disable = &omap1_clk_disable, 412 .disable = &omap1_clk_disable_generic,
413}; 413};
414 414
415static struct clk dma_ck = { 415static struct clk dma_ck = {
@@ -419,8 +419,8 @@ static struct clk dma_ck = {
419 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 419 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
420 ALWAYS_ENABLED, 420 ALWAYS_ENABLED,
421 .recalc = &followparent_recalc, 421 .recalc = &followparent_recalc,
422 .enable = &omap1_clk_enable, 422 .enable = &omap1_clk_enable_generic,
423 .disable = &omap1_clk_disable, 423 .disable = &omap1_clk_disable_generic,
424}; 424};
425 425
426static struct clk dma_lcdfree_ck = { 426static struct clk dma_lcdfree_ck = {
@@ -428,8 +428,8 @@ static struct clk dma_lcdfree_ck = {
428 .parent = &tc_ck.clk, 428 .parent = &tc_ck.clk,
429 .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, 429 .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
430 .recalc = &followparent_recalc, 430 .recalc = &followparent_recalc,
431 .enable = &omap1_clk_enable, 431 .enable = &omap1_clk_enable_generic,
432 .disable = &omap1_clk_disable, 432 .disable = &omap1_clk_disable_generic,
433}; 433};
434 434
435static struct arm_idlect1_clk api_ck = { 435static struct arm_idlect1_clk api_ck = {
@@ -441,8 +441,8 @@ static struct arm_idlect1_clk api_ck = {
441 .enable_reg = (void __iomem *)ARM_IDLECT2, 441 .enable_reg = (void __iomem *)ARM_IDLECT2,
442 .enable_bit = EN_APICK, 442 .enable_bit = EN_APICK,
443 .recalc = &followparent_recalc, 443 .recalc = &followparent_recalc,
444 .enable = &omap1_clk_enable, 444 .enable = &omap1_clk_enable_generic,
445 .disable = &omap1_clk_disable, 445 .disable = &omap1_clk_disable_generic,
446 }, 446 },
447 .idlect_shift = 8, 447 .idlect_shift = 8,
448}; 448};
@@ -455,8 +455,8 @@ static struct arm_idlect1_clk lb_ck = {
455 .enable_reg = (void __iomem *)ARM_IDLECT2, 455 .enable_reg = (void __iomem *)ARM_IDLECT2,
456 .enable_bit = EN_LBCK, 456 .enable_bit = EN_LBCK,
457 .recalc = &followparent_recalc, 457 .recalc = &followparent_recalc,
458 .enable = &omap1_clk_enable, 458 .enable = &omap1_clk_enable_generic,
459 .disable = &omap1_clk_disable, 459 .disable = &omap1_clk_disable_generic,
460 }, 460 },
461 .idlect_shift = 4, 461 .idlect_shift = 4,
462}; 462};
@@ -466,8 +466,8 @@ static struct clk rhea1_ck = {
466 .parent = &tc_ck.clk, 466 .parent = &tc_ck.clk,
467 .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, 467 .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
468 .recalc = &followparent_recalc, 468 .recalc = &followparent_recalc,
469 .enable = &omap1_clk_enable, 469 .enable = &omap1_clk_enable_generic,
470 .disable = &omap1_clk_disable, 470 .disable = &omap1_clk_disable_generic,
471}; 471};
472 472
473static struct clk rhea2_ck = { 473static struct clk rhea2_ck = {
@@ -475,8 +475,8 @@ static struct clk rhea2_ck = {
475 .parent = &tc_ck.clk, 475 .parent = &tc_ck.clk,
476 .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, 476 .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
477 .recalc = &followparent_recalc, 477 .recalc = &followparent_recalc,
478 .enable = &omap1_clk_enable, 478 .enable = &omap1_clk_enable_generic,
479 .disable = &omap1_clk_disable, 479 .disable = &omap1_clk_disable_generic,
480}; 480};
481 481
482static struct clk lcd_ck_16xx = { 482static struct clk lcd_ck_16xx = {
@@ -487,8 +487,8 @@ static struct clk lcd_ck_16xx = {
487 .enable_bit = EN_LCDCK, 487 .enable_bit = EN_LCDCK,
488 .rate_offset = CKCTL_LCDDIV_OFFSET, 488 .rate_offset = CKCTL_LCDDIV_OFFSET,
489 .recalc = &omap1_ckctl_recalc, 489 .recalc = &omap1_ckctl_recalc,
490 .enable = &omap1_clk_enable, 490 .enable = &omap1_clk_enable_generic,
491 .disable = &omap1_clk_disable, 491 .disable = &omap1_clk_disable_generic,
492}; 492};
493 493
494static struct arm_idlect1_clk lcd_ck_1510 = { 494static struct arm_idlect1_clk lcd_ck_1510 = {
@@ -501,8 +501,8 @@ static struct arm_idlect1_clk lcd_ck_1510 = {
501 .enable_bit = EN_LCDCK, 501 .enable_bit = EN_LCDCK,
502 .rate_offset = CKCTL_LCDDIV_OFFSET, 502 .rate_offset = CKCTL_LCDDIV_OFFSET,
503 .recalc = &omap1_ckctl_recalc, 503 .recalc = &omap1_ckctl_recalc,
504 .enable = &omap1_clk_enable, 504 .enable = &omap1_clk_enable_generic,
505 .disable = &omap1_clk_disable, 505 .disable = &omap1_clk_disable_generic,
506 }, 506 },
507 .idlect_shift = 3, 507 .idlect_shift = 3,
508}; 508};
@@ -518,8 +518,8 @@ static struct clk uart1_1510 = {
518 .enable_bit = 29, /* Chooses between 12MHz and 48MHz */ 518 .enable_bit = 29, /* Chooses between 12MHz and 48MHz */
519 .set_rate = &omap1_set_uart_rate, 519 .set_rate = &omap1_set_uart_rate,
520 .recalc = &omap1_uart_recalc, 520 .recalc = &omap1_uart_recalc,
521 .enable = &omap1_clk_enable, 521 .enable = &omap1_clk_enable_generic,
522 .disable = &omap1_clk_disable, 522 .disable = &omap1_clk_disable_generic,
523}; 523};
524 524
525static struct uart_clk uart1_16xx = { 525static struct uart_clk uart1_16xx = {
@@ -550,8 +550,8 @@ static struct clk uart2_ck = {
550 .enable_bit = 30, /* Chooses between 12MHz and 48MHz */ 550 .enable_bit = 30, /* Chooses between 12MHz and 48MHz */
551 .set_rate = &omap1_set_uart_rate, 551 .set_rate = &omap1_set_uart_rate,
552 .recalc = &omap1_uart_recalc, 552 .recalc = &omap1_uart_recalc,
553 .enable = &omap1_clk_enable, 553 .enable = &omap1_clk_enable_generic,
554 .disable = &omap1_clk_disable, 554 .disable = &omap1_clk_disable_generic,
555}; 555};
556 556
557static struct clk uart3_1510 = { 557static struct clk uart3_1510 = {
@@ -565,8 +565,8 @@ static struct clk uart3_1510 = {
565 .enable_bit = 31, /* Chooses between 12MHz and 48MHz */ 565 .enable_bit = 31, /* Chooses between 12MHz and 48MHz */
566 .set_rate = &omap1_set_uart_rate, 566 .set_rate = &omap1_set_uart_rate,
567 .recalc = &omap1_uart_recalc, 567 .recalc = &omap1_uart_recalc,
568 .enable = &omap1_clk_enable, 568 .enable = &omap1_clk_enable_generic,
569 .disable = &omap1_clk_disable, 569 .disable = &omap1_clk_disable_generic,
570}; 570};
571 571
572static struct uart_clk uart3_16xx = { 572static struct uart_clk uart3_16xx = {
@@ -593,8 +593,8 @@ static struct clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
593 RATE_FIXED | ENABLE_REG_32BIT, 593 RATE_FIXED | ENABLE_REG_32BIT,
594 .enable_reg = (void __iomem *)ULPD_CLOCK_CTRL, 594 .enable_reg = (void __iomem *)ULPD_CLOCK_CTRL,
595 .enable_bit = USB_MCLK_EN_BIT, 595 .enable_bit = USB_MCLK_EN_BIT,
596 .enable = &omap1_clk_enable, 596 .enable = &omap1_clk_enable_generic,
597 .disable = &omap1_clk_disable, 597 .disable = &omap1_clk_disable_generic,
598}; 598};
599 599
600static struct clk usb_hhc_ck1510 = { 600static struct clk usb_hhc_ck1510 = {
@@ -605,8 +605,8 @@ static struct clk usb_hhc_ck1510 = {
605 RATE_FIXED | ENABLE_REG_32BIT, 605 RATE_FIXED | ENABLE_REG_32BIT,
606 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0, 606 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
607 .enable_bit = USB_HOST_HHC_UHOST_EN, 607 .enable_bit = USB_HOST_HHC_UHOST_EN,
608 .enable = &omap1_clk_enable, 608 .enable = &omap1_clk_enable_generic,
609 .disable = &omap1_clk_disable, 609 .disable = &omap1_clk_disable_generic,
610}; 610};
611 611
612static struct clk usb_hhc_ck16xx = { 612static struct clk usb_hhc_ck16xx = {
@@ -618,8 +618,8 @@ static struct clk usb_hhc_ck16xx = {
618 RATE_FIXED | ENABLE_REG_32BIT, 618 RATE_FIXED | ENABLE_REG_32BIT,
619 .enable_reg = (void __iomem *)OTG_BASE + 0x08 /* OTG_SYSCON_2 */, 619 .enable_reg = (void __iomem *)OTG_BASE + 0x08 /* OTG_SYSCON_2 */,
620 .enable_bit = 8 /* UHOST_EN */, 620 .enable_bit = 8 /* UHOST_EN */,
621 .enable = &omap1_clk_enable, 621 .enable = &omap1_clk_enable_generic,
622 .disable = &omap1_clk_disable, 622 .disable = &omap1_clk_disable_generic,
623}; 623};
624 624
625static struct clk usb_dc_ck = { 625static struct clk usb_dc_ck = {
@@ -629,8 +629,8 @@ static struct clk usb_dc_ck = {
629 .flags = CLOCK_IN_OMAP16XX | RATE_FIXED, 629 .flags = CLOCK_IN_OMAP16XX | RATE_FIXED,
630 .enable_reg = (void __iomem *)SOFT_REQ_REG, 630 .enable_reg = (void __iomem *)SOFT_REQ_REG,
631 .enable_bit = 4, 631 .enable_bit = 4,
632 .enable = &omap1_clk_enable, 632 .enable = &omap1_clk_enable_generic,
633 .disable = &omap1_clk_disable, 633 .disable = &omap1_clk_disable_generic,
634}; 634};
635 635
636static struct clk mclk_1510 = { 636static struct clk mclk_1510 = {
@@ -638,8 +638,8 @@ static struct clk mclk_1510 = {
638 /* Direct from ULPD, no parent. May be enabled by ext hardware. */ 638 /* Direct from ULPD, no parent. May be enabled by ext hardware. */
639 .rate = 12000000, 639 .rate = 12000000,
640 .flags = CLOCK_IN_OMAP1510 | RATE_FIXED, 640 .flags = CLOCK_IN_OMAP1510 | RATE_FIXED,
641 .enable = &omap1_clk_enable, 641 .enable = &omap1_clk_enable_generic,
642 .disable = &omap1_clk_disable, 642 .disable = &omap1_clk_disable_generic,
643}; 643};
644 644
645static struct clk mclk_16xx = { 645static struct clk mclk_16xx = {
@@ -651,8 +651,8 @@ static struct clk mclk_16xx = {
651 .set_rate = &omap1_set_ext_clk_rate, 651 .set_rate = &omap1_set_ext_clk_rate,
652 .round_rate = &omap1_round_ext_clk_rate, 652 .round_rate = &omap1_round_ext_clk_rate,
653 .init = &omap1_init_ext_clk, 653 .init = &omap1_init_ext_clk,
654 .enable = &omap1_clk_enable, 654 .enable = &omap1_clk_enable_generic,
655 .disable = &omap1_clk_disable, 655 .disable = &omap1_clk_disable_generic,
656}; 656};
657 657
658static struct clk bclk_1510 = { 658static struct clk bclk_1510 = {
@@ -660,8 +660,8 @@ static struct clk bclk_1510 = {
660 /* Direct from ULPD, no parent. May be enabled by ext hardware. */ 660 /* Direct from ULPD, no parent. May be enabled by ext hardware. */
661 .rate = 12000000, 661 .rate = 12000000,
662 .flags = CLOCK_IN_OMAP1510 | RATE_FIXED, 662 .flags = CLOCK_IN_OMAP1510 | RATE_FIXED,
663 .enable = &omap1_clk_enable, 663 .enable = &omap1_clk_enable_generic,
664 .disable = &omap1_clk_disable, 664 .disable = &omap1_clk_disable_generic,
665}; 665};
666 666
667static struct clk bclk_16xx = { 667static struct clk bclk_16xx = {
@@ -673,8 +673,8 @@ static struct clk bclk_16xx = {
673 .set_rate = &omap1_set_ext_clk_rate, 673 .set_rate = &omap1_set_ext_clk_rate,
674 .round_rate = &omap1_round_ext_clk_rate, 674 .round_rate = &omap1_round_ext_clk_rate,
675 .init = &omap1_init_ext_clk, 675 .init = &omap1_init_ext_clk,
676 .enable = &omap1_clk_enable, 676 .enable = &omap1_clk_enable_generic,
677 .disable = &omap1_clk_disable, 677 .disable = &omap1_clk_disable_generic,
678}; 678};
679 679
680static struct clk mmc1_ck = { 680static struct clk mmc1_ck = {
@@ -686,8 +686,8 @@ static struct clk mmc1_ck = {
686 RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT, 686 RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
687 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0, 687 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
688 .enable_bit = 23, 688 .enable_bit = 23,
689 .enable = &omap1_clk_enable, 689 .enable = &omap1_clk_enable_generic,
690 .disable = &omap1_clk_disable, 690 .disable = &omap1_clk_disable_generic,
691}; 691};
692 692
693static struct clk mmc2_ck = { 693static struct clk mmc2_ck = {
@@ -699,8 +699,8 @@ static struct clk mmc2_ck = {
699 RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT, 699 RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
700 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0, 700 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
701 .enable_bit = 20, 701 .enable_bit = 20,
702 .enable = &omap1_clk_enable, 702 .enable = &omap1_clk_enable_generic,
703 .disable = &omap1_clk_disable, 703 .disable = &omap1_clk_disable_generic,
704}; 704};
705 705
706static struct clk virtual_ck_mpu = { 706static struct clk virtual_ck_mpu = {
@@ -711,8 +711,8 @@ static struct clk virtual_ck_mpu = {
711 .recalc = &followparent_recalc, 711 .recalc = &followparent_recalc,
712 .set_rate = &omap1_select_table_rate, 712 .set_rate = &omap1_select_table_rate,
713 .round_rate = &omap1_round_to_table_rate, 713 .round_rate = &omap1_round_to_table_rate,
714 .enable = &omap1_clk_enable, 714 .enable = &omap1_clk_enable_generic,
715 .disable = &omap1_clk_disable, 715 .disable = &omap1_clk_disable_generic,
716}; 716};
717 717
718static struct clk * onchip_clks[] = { 718static struct clk * onchip_clks[] = {
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index 7a68f098a02..e924e0c6a4c 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -146,7 +146,7 @@ void __init omap_serial_init(void)
146 if (IS_ERR(uart1_ck)) 146 if (IS_ERR(uart1_ck))
147 printk("Could not get uart1_ck\n"); 147 printk("Could not get uart1_ck\n");
148 else { 148 else {
149 clk_use(uart1_ck); 149 clk_enable(uart1_ck);
150 if (cpu_is_omap1510()) 150 if (cpu_is_omap1510())
151 clk_set_rate(uart1_ck, 12000000); 151 clk_set_rate(uart1_ck, 12000000);
152 } 152 }
@@ -166,7 +166,7 @@ void __init omap_serial_init(void)
166 if (IS_ERR(uart2_ck)) 166 if (IS_ERR(uart2_ck))
167 printk("Could not get uart2_ck\n"); 167 printk("Could not get uart2_ck\n");
168 else { 168 else {
169 clk_use(uart2_ck); 169 clk_enable(uart2_ck);
170 if (cpu_is_omap1510()) 170 if (cpu_is_omap1510())
171 clk_set_rate(uart2_ck, 12000000); 171 clk_set_rate(uart2_ck, 12000000);
172 else 172 else
@@ -188,7 +188,7 @@ void __init omap_serial_init(void)
188 if (IS_ERR(uart3_ck)) 188 if (IS_ERR(uart3_ck))
189 printk("Could not get uart3_ck\n"); 189 printk("Could not get uart3_ck\n");
190 else { 190 else {
191 clk_use(uart3_ck); 191 clk_enable(uart3_ck);
192 if (cpu_is_omap1510()) 192 if (cpu_is_omap1510())
193 clk_set_rate(uart3_ck, 12000000); 193 clk_set_rate(uart3_ck, 12000000);
194 } 194 }
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 5407b954915..180f675c906 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -111,7 +111,7 @@ static void omap2_clk_fixed_enable(struct clk *clk)
111/* Enables clock without considering parent dependencies or use count 111/* Enables clock without considering parent dependencies or use count
112 * REVISIT: Maybe change this to use clk->enable like on omap1? 112 * REVISIT: Maybe change this to use clk->enable like on omap1?
113 */ 113 */
114static int omap2_clk_enable(struct clk * clk) 114static int _omap2_clk_enable(struct clk * clk)
115{ 115{
116 u32 regval32; 116 u32 regval32;
117 117
@@ -150,7 +150,7 @@ static void omap2_clk_fixed_disable(struct clk *clk)
150} 150}
151 151
152/* Disables clock without considering parent dependencies or use count */ 152/* Disables clock without considering parent dependencies or use count */
153static void omap2_clk_disable(struct clk *clk) 153static void _omap2_clk_disable(struct clk *clk)
154{ 154{
155 u32 regval32; 155 u32 regval32;
156 156
@@ -167,23 +167,23 @@ static void omap2_clk_disable(struct clk *clk)
167 __raw_writel(regval32, clk->enable_reg); 167 __raw_writel(regval32, clk->enable_reg);
168} 168}
169 169
170static int omap2_clk_use(struct clk *clk) 170static int omap2_clk_enable(struct clk *clk)
171{ 171{
172 int ret = 0; 172 int ret = 0;
173 173
174 if (clk->usecount++ == 0) { 174 if (clk->usecount++ == 0) {
175 if (likely((u32)clk->parent)) 175 if (likely((u32)clk->parent))
176 ret = omap2_clk_use(clk->parent); 176 ret = omap2_clk_enable(clk->parent);
177 177
178 if (unlikely(ret != 0)) { 178 if (unlikely(ret != 0)) {
179 clk->usecount--; 179 clk->usecount--;
180 return ret; 180 return ret;
181 } 181 }
182 182
183 ret = omap2_clk_enable(clk); 183 ret = _omap2_clk_enable(clk);
184 184
185 if (unlikely(ret != 0) && clk->parent) { 185 if (unlikely(ret != 0) && clk->parent) {
186 omap2_clk_unuse(clk->parent); 186 omap2_clk_disable(clk->parent);
187 clk->usecount--; 187 clk->usecount--;
188 } 188 }
189 } 189 }
@@ -191,12 +191,12 @@ static int omap2_clk_use(struct clk *clk)
191 return ret; 191 return ret;
192} 192}
193 193
194static void omap2_clk_unuse(struct clk *clk) 194static void omap2_clk_disable(struct clk *clk)
195{ 195{
196 if (clk->usecount > 0 && !(--clk->usecount)) { 196 if (clk->usecount > 0 && !(--clk->usecount)) {
197 omap2_clk_disable(clk); 197 _omap2_clk_disable(clk);
198 if (likely((u32)clk->parent)) 198 if (likely((u32)clk->parent))
199 omap2_clk_unuse(clk->parent); 199 omap2_clk_disable(clk->parent);
200 } 200 }
201} 201}
202 202
@@ -873,7 +873,7 @@ static int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
873 reg = (void __iomem *)src_sel; 873 reg = (void __iomem *)src_sel;
874 874
875 if (clk->usecount > 0) 875 if (clk->usecount > 0)
876 omap2_clk_disable(clk); 876 _omap2_clk_disable(clk);
877 877
878 /* Set new source value (previous dividers if any in effect) */ 878 /* Set new source value (previous dividers if any in effect) */
879 reg_val = __raw_readl(reg) & ~(field_mask << src_off); 879 reg_val = __raw_readl(reg) & ~(field_mask << src_off);
@@ -884,7 +884,7 @@ static int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
884 __raw_writel(0x1, (void __iomem *)&PRCM_CLKCFG_CTRL); 884 __raw_writel(0x1, (void __iomem *)&PRCM_CLKCFG_CTRL);
885 885
886 if (clk->usecount > 0) 886 if (clk->usecount > 0)
887 omap2_clk_enable(clk); 887 _omap2_clk_enable(clk);
888 888
889 clk->parent = new_parent; 889 clk->parent = new_parent;
890 890
@@ -999,8 +999,6 @@ static int omap2_select_table_rate(struct clk * clk, unsigned long rate)
999static struct clk_functions omap2_clk_functions = { 999static struct clk_functions omap2_clk_functions = {
1000 .clk_enable = omap2_clk_enable, 1000 .clk_enable = omap2_clk_enable,
1001 .clk_disable = omap2_clk_disable, 1001 .clk_disable = omap2_clk_disable,
1002 .clk_use = omap2_clk_use,
1003 .clk_unuse = omap2_clk_unuse,
1004 .clk_round_rate = omap2_clk_round_rate, 1002 .clk_round_rate = omap2_clk_round_rate,
1005 .clk_set_rate = omap2_clk_set_rate, 1003 .clk_set_rate = omap2_clk_set_rate,
1006 .clk_set_parent = omap2_clk_set_parent, 1004 .clk_set_parent = omap2_clk_set_parent,
@@ -1045,7 +1043,7 @@ static void __init omap2_disable_unused_clocks(void)
1045 continue; 1043 continue;
1046 1044
1047 printk(KERN_INFO "Disabling unused clock \"%s\"\n", ck->name); 1045 printk(KERN_INFO "Disabling unused clock \"%s\"\n", ck->name);
1048 omap2_clk_disable(ck); 1046 _omap2_clk_disable(ck);
1049 } 1047 }
1050} 1048}
1051late_initcall(omap2_disable_unused_clocks); 1049late_initcall(omap2_disable_unused_clocks);
@@ -1120,10 +1118,10 @@ int __init omap2_clk_init(void)
1120 * Only enable those clocks we will need, let the drivers 1118 * Only enable those clocks we will need, let the drivers
1121 * enable other clocks as necessary 1119 * enable other clocks as necessary
1122 */ 1120 */
1123 clk_use(&sync_32k_ick); 1121 clk_enable(&sync_32k_ick);
1124 clk_use(&omapctrl_ick); 1122 clk_enable(&omapctrl_ick);
1125 if (cpu_is_omap2430()) 1123 if (cpu_is_omap2430())
1126 clk_use(&sdrc_ick); 1124 clk_enable(&sdrc_ick);
1127 1125
1128 return 0; 1126 return 0;
1129} 1127}
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 4aeab5591bd..6cab20b1d3c 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -24,7 +24,7 @@ static void omap2_propagate_rate(struct clk * clk);
24static void omap2_mpu_recalc(struct clk * clk); 24static void omap2_mpu_recalc(struct clk * clk);
25static int omap2_select_table_rate(struct clk * clk, unsigned long rate); 25static int omap2_select_table_rate(struct clk * clk, unsigned long rate);
26static long omap2_round_to_table_rate(struct clk * clk, unsigned long rate); 26static long omap2_round_to_table_rate(struct clk * clk, unsigned long rate);
27static void omap2_clk_unuse(struct clk *clk); 27static void omap2_clk_disable(struct clk *clk);
28static void omap2_sys_clk_recalc(struct clk * clk); 28static void omap2_sys_clk_recalc(struct clk * clk);
29static u32 omap2_clksel_to_divisor(u32 div_sel, u32 field_val); 29static u32 omap2_clksel_to_divisor(u32 div_sel, u32 field_val);
30static u32 omap2_clksel_get_divisor(struct clk *clk); 30static u32 omap2_clksel_get_divisor(struct clk *clk);
@@ -859,7 +859,7 @@ static struct clk core_l3_ck = { /* Used for ick and fck, interconnect */
859 859
860static struct clk usb_l4_ick = { /* FS-USB interface clock */ 860static struct clk usb_l4_ick = { /* FS-USB interface clock */
861 .name = "usb_l4_ick", 861 .name = "usb_l4_ick",
862 .parent = &core_ck, 862 .parent = &core_l3_ck,
863 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | 863 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
864 RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP | 864 RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP |
865 CONFIG_PARTICIPANT, 865 CONFIG_PARTICIPANT,
@@ -1045,7 +1045,7 @@ static struct clk gpt1_ick = {
1045 .name = "gpt1_ick", 1045 .name = "gpt1_ick",
1046 .parent = &l4_ck, 1046 .parent = &l4_ck,
1047 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, 1047 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
1048 .enable_reg = (void __iomem *)&CM_ICLKEN_WKUP, /* Bit4 */ 1048 .enable_reg = (void __iomem *)&CM_ICLKEN_WKUP, /* Bit0 */
1049 .enable_bit = 0, 1049 .enable_bit = 0,
1050 .recalc = &omap2_followparent_recalc, 1050 .recalc = &omap2_followparent_recalc,
1051}; 1051};
@@ -1055,7 +1055,7 @@ static struct clk gpt1_fck = {
1055 .parent = &func_32k_ck, 1055 .parent = &func_32k_ck,
1056 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | 1056 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
1057 CM_WKUP_SEL1, 1057 CM_WKUP_SEL1,
1058 .enable_reg = (void __iomem *)&CM_FCLKEN_WKUP, 1058 .enable_reg = (void __iomem *)&CM_FCLKEN_WKUP, /* Bit0 */
1059 .enable_bit = 0, 1059 .enable_bit = 0,
1060 .src_offset = 0, 1060 .src_offset = 0,
1061 .recalc = &omap2_followparent_recalc, 1061 .recalc = &omap2_followparent_recalc,
@@ -1065,7 +1065,7 @@ static struct clk gpt2_ick = {
1065 .name = "gpt2_ick", 1065 .name = "gpt2_ick",
1066 .parent = &l4_ck, 1066 .parent = &l4_ck,
1067 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, 1067 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
1068 .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* bit4 */ 1068 .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* Bit4 */
1069 .enable_bit = 0, 1069 .enable_bit = 0,
1070 .recalc = &omap2_followparent_recalc, 1070 .recalc = &omap2_followparent_recalc,
1071}; 1071};
@@ -1839,7 +1839,7 @@ static struct clk usb_fck = {
1839 1839
1840static struct clk usbhs_ick = { 1840static struct clk usbhs_ick = {
1841 .name = "usbhs_ick", 1841 .name = "usbhs_ick",
1842 .parent = &l4_ck, 1842 .parent = &core_l3_ck,
1843 .flags = CLOCK_IN_OMAP243X, 1843 .flags = CLOCK_IN_OMAP243X,
1844 .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE, 1844 .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
1845 .enable_bit = 6, 1845 .enable_bit = 6,
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index e1bd46a96e1..24dd374224a 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -119,14 +119,14 @@ void __init omap_serial_init()
119 if (IS_ERR(uart1_ick)) 119 if (IS_ERR(uart1_ick))
120 printk("Could not get uart1_ick\n"); 120 printk("Could not get uart1_ick\n");
121 else { 121 else {
122 clk_use(uart1_ick); 122 clk_enable(uart1_ick);
123 } 123 }
124 124
125 uart1_fck = clk_get(NULL, "uart1_fck"); 125 uart1_fck = clk_get(NULL, "uart1_fck");
126 if (IS_ERR(uart1_fck)) 126 if (IS_ERR(uart1_fck))
127 printk("Could not get uart1_fck\n"); 127 printk("Could not get uart1_fck\n");
128 else { 128 else {
129 clk_use(uart1_fck); 129 clk_enable(uart1_fck);
130 } 130 }
131 break; 131 break;
132 case 1: 132 case 1:
@@ -134,14 +134,14 @@ void __init omap_serial_init()
134 if (IS_ERR(uart2_ick)) 134 if (IS_ERR(uart2_ick))
135 printk("Could not get uart2_ick\n"); 135 printk("Could not get uart2_ick\n");
136 else { 136 else {
137 clk_use(uart2_ick); 137 clk_enable(uart2_ick);
138 } 138 }
139 139
140 uart2_fck = clk_get(NULL, "uart2_fck"); 140 uart2_fck = clk_get(NULL, "uart2_fck");
141 if (IS_ERR(uart2_fck)) 141 if (IS_ERR(uart2_fck))
142 printk("Could not get uart2_fck\n"); 142 printk("Could not get uart2_fck\n");
143 else { 143 else {
144 clk_use(uart2_fck); 144 clk_enable(uart2_fck);
145 } 145 }
146 break; 146 break;
147 case 2: 147 case 2:
@@ -149,14 +149,14 @@ void __init omap_serial_init()
149 if (IS_ERR(uart3_ick)) 149 if (IS_ERR(uart3_ick))
150 printk("Could not get uart3_ick\n"); 150 printk("Could not get uart3_ick\n");
151 else { 151 else {
152 clk_use(uart3_ick); 152 clk_enable(uart3_ick);
153 } 153 }
154 154
155 uart3_fck = clk_get(NULL, "uart3_fck"); 155 uart3_fck = clk_get(NULL, "uart3_fck");
156 if (IS_ERR(uart3_fck)) 156 if (IS_ERR(uart3_fck))
157 printk("Could not get uart3_fck\n"); 157 printk("Could not get uart3_fck\n");
158 else { 158 else {
159 clk_use(uart3_fck); 159 clk_enable(uart3_fck);
160 } 160 }
161 break; 161 break;
162 } 162 }
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 23d36b1c40f..1d2f5ac2f69 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -104,7 +104,7 @@ static void __init omap2_gp_timer_init(void)
104 if (IS_ERR(sys_ck)) 104 if (IS_ERR(sys_ck))
105 printk(KERN_ERR "Could not get sys_ck\n"); 105 printk(KERN_ERR "Could not get sys_ck\n");
106 else { 106 else {
107 clk_use(sys_ck); 107 clk_enable(sys_ck);
108 tick_period = clk_get_rate(sys_ck) / 100; 108 tick_period = clk_get_rate(sys_ck) / 100;
109 clk_put(sys_ck); 109 clk_put(sys_ck);
110 } 110 }
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 7ebc5a29db8..3c2bfc0efda 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -34,7 +34,7 @@ DEFINE_SPINLOCK(clockfw_lock);
34static struct clk_functions *arch_clock; 34static struct clk_functions *arch_clock;
35 35
36/*------------------------------------------------------------------------- 36/*-------------------------------------------------------------------------
37 * Standard clock functions defined in asm/hardware/clock.h 37 * Standard clock functions defined in include/linux/clk.h
38 *-------------------------------------------------------------------------*/ 38 *-------------------------------------------------------------------------*/
39 39
40struct clk * clk_get(struct device *dev, const char *id) 40struct clk * clk_get(struct device *dev, const char *id)
@@ -60,12 +60,8 @@ int clk_enable(struct clk *clk)
60 int ret = 0; 60 int ret = 0;
61 61
62 spin_lock_irqsave(&clockfw_lock, flags); 62 spin_lock_irqsave(&clockfw_lock, flags);
63 if (clk->enable) 63 if (arch_clock->clk_enable)
64 ret = clk->enable(clk);
65 else if (arch_clock->clk_enable)
66 ret = arch_clock->clk_enable(clk); 64 ret = arch_clock->clk_enable(clk);
67 else
68 printk(KERN_ERR "Could not enable clock %s\n", clk->name);
69 spin_unlock_irqrestore(&clockfw_lock, flags); 65 spin_unlock_irqrestore(&clockfw_lock, flags);
70 66
71 return ret; 67 return ret;
@@ -77,41 +73,12 @@ void clk_disable(struct clk *clk)
77 unsigned long flags; 73 unsigned long flags;
78 74
79 spin_lock_irqsave(&clockfw_lock, flags); 75 spin_lock_irqsave(&clockfw_lock, flags);
80 if (clk->disable) 76 if (arch_clock->clk_disable)
81 clk->disable(clk);
82 else if (arch_clock->clk_disable)
83 arch_clock->clk_disable(clk); 77 arch_clock->clk_disable(clk);
84 else
85 printk(KERN_ERR "Could not disable clock %s\n", clk->name);
86 spin_unlock_irqrestore(&clockfw_lock, flags); 78 spin_unlock_irqrestore(&clockfw_lock, flags);
87} 79}
88EXPORT_SYMBOL(clk_disable); 80EXPORT_SYMBOL(clk_disable);
89 81
90int clk_use(struct clk *clk)
91{
92 unsigned long flags;
93 int ret = 0;
94
95 spin_lock_irqsave(&clockfw_lock, flags);
96 if (arch_clock->clk_use)
97 ret = arch_clock->clk_use(clk);
98 spin_unlock_irqrestore(&clockfw_lock, flags);
99
100 return ret;
101}
102EXPORT_SYMBOL(clk_use);
103
104void clk_unuse(struct clk *clk)
105{
106 unsigned long flags;
107
108 spin_lock_irqsave(&clockfw_lock, flags);
109 if (arch_clock->clk_unuse)
110 arch_clock->clk_unuse(clk);
111 spin_unlock_irqrestore(&clockfw_lock, flags);
112}
113EXPORT_SYMBOL(clk_unuse);
114
115int clk_get_usecount(struct clk *clk) 82int clk_get_usecount(struct clk *clk)
116{ 83{
117 unsigned long flags; 84 unsigned long flags;
@@ -146,7 +113,7 @@ void clk_put(struct clk *clk)
146EXPORT_SYMBOL(clk_put); 113EXPORT_SYMBOL(clk_put);
147 114
148/*------------------------------------------------------------------------- 115/*-------------------------------------------------------------------------
149 * Optional clock functions defined in asm/hardware/clock.h 116 * Optional clock functions defined in include/linux/clk.h
150 *-------------------------------------------------------------------------*/ 117 *-------------------------------------------------------------------------*/
151 118
152long clk_round_rate(struct clk *clk, unsigned long rate) 119long clk_round_rate(struct clk *clk, unsigned long rate)
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index ca3681a824a..b4d5b9e4bfc 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -853,19 +853,19 @@ static int __init _omap_gpio_init(void)
853 if (IS_ERR(gpio_ick)) 853 if (IS_ERR(gpio_ick))
854 printk("Could not get arm_gpio_ck\n"); 854 printk("Could not get arm_gpio_ck\n");
855 else 855 else
856 clk_use(gpio_ick); 856 clk_enable(gpio_ick);
857 } 857 }
858 if (cpu_is_omap24xx()) { 858 if (cpu_is_omap24xx()) {
859 gpio_ick = clk_get(NULL, "gpios_ick"); 859 gpio_ick = clk_get(NULL, "gpios_ick");
860 if (IS_ERR(gpio_ick)) 860 if (IS_ERR(gpio_ick))
861 printk("Could not get gpios_ick\n"); 861 printk("Could not get gpios_ick\n");
862 else 862 else
863 clk_use(gpio_ick); 863 clk_enable(gpio_ick);
864 gpio_fck = clk_get(NULL, "gpios_fck"); 864 gpio_fck = clk_get(NULL, "gpios_fck");
865 if (IS_ERR(gpio_ick)) 865 if (IS_ERR(gpio_ick))
866 printk("Could not get gpios_fck\n"); 866 printk("Could not get gpios_fck\n");
867 else 867 else
868 clk_use(gpio_fck); 868 clk_enable(gpio_fck);
869 } 869 }
870 870
871#ifdef CONFIG_ARCH_OMAP15XX 871#ifdef CONFIG_ARCH_OMAP15XX
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index be0e0f32a59..1cd2cace7e1 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -190,11 +190,11 @@ static int omap_mcbsp_check(unsigned int id)
190static void omap_mcbsp_dsp_request(void) 190static void omap_mcbsp_dsp_request(void)
191{ 191{
192 if (cpu_is_omap1510() || cpu_is_omap16xx()) { 192 if (cpu_is_omap1510() || cpu_is_omap16xx()) {
193 clk_use(mcbsp_dsp_ck); 193 clk_enable(mcbsp_dsp_ck);
194 clk_use(mcbsp_api_ck); 194 clk_enable(mcbsp_api_ck);
195 195
196 /* enable 12MHz clock to mcbsp 1 & 3 */ 196 /* enable 12MHz clock to mcbsp 1 & 3 */
197 clk_use(mcbsp_dspxor_ck); 197 clk_enable(mcbsp_dspxor_ck);
198 198
199 /* 199 /*
200 * DSP external peripheral reset 200 * DSP external peripheral reset
@@ -208,9 +208,9 @@ static void omap_mcbsp_dsp_request(void)
208static void omap_mcbsp_dsp_free(void) 208static void omap_mcbsp_dsp_free(void)
209{ 209{
210 if (cpu_is_omap1510() || cpu_is_omap16xx()) { 210 if (cpu_is_omap1510() || cpu_is_omap16xx()) {
211 clk_unuse(mcbsp_dspxor_ck); 211 clk_disable(mcbsp_dspxor_ck);
212 clk_unuse(mcbsp_dsp_ck); 212 clk_disable(mcbsp_dsp_ck);
213 clk_unuse(mcbsp_api_ck); 213 clk_disable(mcbsp_api_ck);
214 } 214 }
215} 215}
216 216
diff --git a/arch/arm/plat-omap/ocpi.c b/arch/arm/plat-omap/ocpi.c
index e40fcc8b43d..5cc6775c789 100644
--- a/arch/arm/plat-omap/ocpi.c
+++ b/arch/arm/plat-omap/ocpi.c
@@ -88,7 +88,7 @@ static int __init omap_ocpi_init(void)
88 if (IS_ERR(ocpi_ck)) 88 if (IS_ERR(ocpi_ck))
89 return PTR_ERR(ocpi_ck); 89 return PTR_ERR(ocpi_ck);
90 90
91 clk_use(ocpi_ck); 91 clk_enable(ocpi_ck);
92 ocpi_enable(); 92 ocpi_enable();
93 printk("OMAP OCPI interconnect driver loaded\n"); 93 printk("OMAP OCPI interconnect driver loaded\n");
94 94
@@ -102,7 +102,7 @@ static void __exit omap_ocpi_exit(void)
102 if (!cpu_is_omap16xx()) 102 if (!cpu_is_omap16xx())
103 return; 103 return;
104 104
105 clk_unuse(ocpi_ck); 105 clk_disable(ocpi_ck);
106 clk_put(ocpi_ck); 106 clk_put(ocpi_ck);
107} 107}
108 108
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 5b7146f54fd..679c1d5cc95 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -35,74 +35,22 @@ struct fdpic_func_descriptor {
35 unsigned long GOT; 35 unsigned long GOT;
36}; 36};
37 37
38static int do_signal(sigset_t *oldset);
39
40/* 38/*
41 * Atomically swap in the new signal mask, and wait for a signal. 39 * Atomically swap in the new signal mask, and wait for a signal.
42 */ 40 */
43asmlinkage int sys_sigsuspend(int history0, int history1, old_sigset_t mask) 41asmlinkage int sys_sigsuspend(int history0, int history1, old_sigset_t mask)
44{ 42{
45 sigset_t saveset;
46
47 mask &= _BLOCKABLE; 43 mask &= _BLOCKABLE;
48 spin_lock_irq(&current->sighand->siglock); 44 spin_lock_irq(&current->sighand->siglock);
49 saveset = current->blocked; 45 current->saved_sigmask = current->blocked;
50 siginitset(&current->blocked, mask); 46 siginitset(&current->blocked, mask);
51 recalc_sigpending(); 47 recalc_sigpending();
52 spin_unlock_irq(&current->sighand->siglock); 48 spin_unlock_irq(&current->sighand->siglock);
53 49
54 __frame->gr8 = -EINTR; 50 current->state = TASK_INTERRUPTIBLE;
55 while (1) { 51 schedule();
56 current->state = TASK_INTERRUPTIBLE; 52 set_thread_flag(TIF_RESTORE_SIGMASK);
57 schedule(); 53 return -ERESTARTNOHAND;
58 if (do_signal(&saveset))
59 /* return the signal number as the return value of this function
60 * - this is an utterly evil hack. syscalls should not invoke do_signal()
61 * as entry.S sets regs->gr8 to the return value of the system call
62 * - we can't just use sigpending() as we'd have to discard SIG_IGN signals
63 * and call waitpid() if SIGCHLD needed discarding
64 * - this only works on the i386 because it passes arguments to the signal
65 * handler on the stack, and the return value in EAX is effectively
66 * discarded
67 */
68 return __frame->gr8;
69 }
70}
71
72asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
73{
74 sigset_t saveset, newset;
75
76 /* XXX: Don't preclude handling different sized sigset_t's. */
77 if (sigsetsize != sizeof(sigset_t))
78 return -EINVAL;
79
80 if (copy_from_user(&newset, unewset, sizeof(newset)))
81 return -EFAULT;
82 sigdelsetmask(&newset, ~_BLOCKABLE);
83
84 spin_lock_irq(&current->sighand->siglock);
85 saveset = current->blocked;
86 current->blocked = newset;
87 recalc_sigpending();
88 spin_unlock_irq(&current->sighand->siglock);
89
90 __frame->gr8 = -EINTR;
91 while (1) {
92 current->state = TASK_INTERRUPTIBLE;
93 schedule();
94 if (do_signal(&saveset))
95 /* return the signal number as the return value of this function
96 * - this is an utterly evil hack. syscalls should not invoke do_signal()
97 * as entry.S sets regs->gr8 to the return value of the system call
98 * - we can't just use sigpending() as we'd have to discard SIG_IGN signals
99 * and call waitpid() if SIGCHLD needed discarding
100 * - this only works on the i386 because it passes arguments to the signal
101 * handler on the stack, and the return value in EAX is effectively
102 * discarded
103 */
104 return __frame->gr8;
105 }
106} 54}
107 55
108asmlinkage int sys_sigaction(int sig, 56asmlinkage int sys_sigaction(int sig,
@@ -372,11 +320,11 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
372 frame->pretcode); 320 frame->pretcode);
373#endif 321#endif
374 322
375 return 1; 323 return 0;
376 324
377give_sigsegv: 325give_sigsegv:
378 force_sig(SIGSEGV, current); 326 force_sig(SIGSEGV, current);
379 return 0; 327 return -EFAULT;
380 328
381} /* end setup_frame() */ 329} /* end setup_frame() */
382 330
@@ -471,11 +419,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
471 frame->pretcode); 419 frame->pretcode);
472#endif 420#endif
473 421
474 return 1; 422 return 0;
475 423
476give_sigsegv: 424give_sigsegv:
477 force_sig(SIGSEGV, current); 425 force_sig(SIGSEGV, current);
478 return 0; 426 return -EFAULT;
479 427
480} /* end setup_rt_frame() */ 428} /* end setup_rt_frame() */
481 429
@@ -516,7 +464,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
516 else 464 else
517 ret = setup_frame(sig, ka, oldset); 465 ret = setup_frame(sig, ka, oldset);
518 466
519 if (ret) { 467 if (ret == 0) {
520 spin_lock_irq(&current->sighand->siglock); 468 spin_lock_irq(&current->sighand->siglock);
521 sigorsets(&current->blocked, &current->blocked, 469 sigorsets(&current->blocked, &current->blocked,
522 &ka->sa.sa_mask); 470 &ka->sa.sa_mask);
@@ -536,10 +484,11 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
536 * want to handle. Thus you cannot kill init even with a SIGKILL even by 484 * want to handle. Thus you cannot kill init even with a SIGKILL even by
537 * mistake. 485 * mistake.
538 */ 486 */
539static int do_signal(sigset_t *oldset) 487static void do_signal(void)
540{ 488{
541 struct k_sigaction ka; 489 struct k_sigaction ka;
542 siginfo_t info; 490 siginfo_t info;
491 sigset_t *oldset;
543 int signr; 492 int signr;
544 493
545 /* 494 /*
@@ -549,43 +498,62 @@ static int do_signal(sigset_t *oldset)
549 * if so. 498 * if so.
550 */ 499 */
551 if (!user_mode(__frame)) 500 if (!user_mode(__frame))
552 return 1; 501 return;
553 502
554 if (try_to_freeze()) 503 if (try_to_freeze())
555 goto no_signal; 504 goto no_signal;
556 505
557 if (!oldset) 506 if (test_thread_flag(TIF_RESTORE_SIGMASK))
507 oldset = &current->saved_sigmask;
508 else
558 oldset = &current->blocked; 509 oldset = &current->blocked;
559 510
560 signr = get_signal_to_deliver(&info, &ka, __frame, NULL); 511 signr = get_signal_to_deliver(&info, &ka, __frame, NULL);
561 if (signr > 0) 512 if (signr > 0) {
562 return handle_signal(signr, &info, &ka, oldset); 513 if (handle_signal(signr, &info, &ka, oldset) == 0) {
514 /* a signal was successfully delivered; the saved
515 * sigmask will have been stored in the signal frame,
516 * and will be restored by sigreturn, so we can simply
517 * clear the TIF_RESTORE_SIGMASK flag */
518 if (test_thread_flag(TIF_RESTORE_SIGMASK))
519 clear_thread_flag(TIF_RESTORE_SIGMASK);
520 }
521
522 return;
523 }
563 524
564no_signal: 525no_signal:
565 /* Did we come from a system call? */ 526 /* Did we come from a system call? */
566 if (__frame->syscallno >= 0) { 527 if (__frame->syscallno >= 0) {
567 /* Restart the system call - no handlers present */ 528 /* Restart the system call - no handlers present */
568 if (__frame->gr8 == -ERESTARTNOHAND || 529 switch (__frame->gr8) {
569 __frame->gr8 == -ERESTARTSYS || 530 case -ERESTARTNOHAND:
570 __frame->gr8 == -ERESTARTNOINTR) { 531 case -ERESTARTSYS:
532 case -ERESTARTNOINTR:
571 __frame->gr8 = __frame->orig_gr8; 533 __frame->gr8 = __frame->orig_gr8;
572 __frame->pc -= 4; 534 __frame->pc -= 4;
573 } 535 break;
574 536
575 if (__frame->gr8 == -ERESTART_RESTARTBLOCK){ 537 case -ERESTART_RESTARTBLOCK:
576 __frame->gr8 = __NR_restart_syscall; 538 __frame->gr8 = __NR_restart_syscall;
577 __frame->pc -= 4; 539 __frame->pc -= 4;
540 break;
578 } 541 }
579 } 542 }
580 543
581 return 0; 544 /* if there's no signal to deliver, we just put the saved sigmask
545 * back */
546 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
547 clear_thread_flag(TIF_RESTORE_SIGMASK);
548 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
549 }
582 550
583} /* end do_signal() */ 551} /* end do_signal() */
584 552
585/*****************************************************************************/ 553/*****************************************************************************/
586/* 554/*
587 * notification of userspace execution resumption 555 * notification of userspace execution resumption
588 * - triggered by current->work.notify_resume 556 * - triggered by the TIF_WORK_MASK flags
589 */ 557 */
590asmlinkage void do_notify_resume(__u32 thread_info_flags) 558asmlinkage void do_notify_resume(__u32 thread_info_flags)
591{ 559{
@@ -594,7 +562,7 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
594 clear_thread_flag(TIF_SINGLESTEP); 562 clear_thread_flag(TIF_SINGLESTEP);
595 563
596 /* deal with pending signal delivery */ 564 /* deal with pending signal delivery */
597 if (thread_info_flags & _TIF_SIGPENDING) 565 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
598 do_signal(NULL); 566 do_signal();
599 567
600} /* end do_notify_resume() */ 568} /* end do_notify_resume() */
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 6a431b92601..3cbe6e9cb9f 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -644,6 +644,8 @@ CONFIG_8139TOO_PIO=y
644# CONFIG_ACENIC is not set 644# CONFIG_ACENIC is not set
645# CONFIG_DL2K is not set 645# CONFIG_DL2K is not set
646# CONFIG_E1000 is not set 646# CONFIG_E1000 is not set
647# CONFIG_E1000_NAPI is not set
648# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
647# CONFIG_NS83820 is not set 649# CONFIG_NS83820 is not set
648# CONFIG_HAMACHI is not set 650# CONFIG_HAMACHI is not set
649# CONFIG_YELLOWFIN is not set 651# CONFIG_YELLOWFIN is not set
diff --git a/arch/i386/kernel/quirks.c b/arch/i386/kernel/quirks.c
index aaf89cb2bc5..87ccdac8492 100644
--- a/arch/i386/kernel/quirks.c
+++ b/arch/i386/kernel/quirks.c
@@ -25,8 +25,7 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
25 25
26 /* enable access to config space*/ 26 /* enable access to config space*/
27 pci_read_config_byte(dev, 0xf4, &config); 27 pci_read_config_byte(dev, 0xf4, &config);
28 config |= 0x2; 28 pci_write_config_byte(dev, 0xf4, config|0x2);
29 pci_write_config_byte(dev, 0xf4, config);
30 29
31 /* read xTPR register */ 30 /* read xTPR register */
32 raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word); 31 raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
@@ -42,9 +41,9 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
42#endif 41#endif
43 } 42 }
44 43
45 config &= ~0x2; 44 /* put back the original value for config space*/
46 /* disable access to config space*/ 45 if (!(config & 0x2))
47 pci_write_config_byte(dev, 0xf4, config); 46 pci_write_config_byte(dev, 0xf4, config);
48} 47}
49DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); 48DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
50DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); 49DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index adcd069db91..963616d364e 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -37,51 +37,17 @@
37asmlinkage int 37asmlinkage int
38sys_sigsuspend(int history0, int history1, old_sigset_t mask) 38sys_sigsuspend(int history0, int history1, old_sigset_t mask)
39{ 39{
40 struct pt_regs * regs = (struct pt_regs *) &history0;
41 sigset_t saveset;
42
43 mask &= _BLOCKABLE; 40 mask &= _BLOCKABLE;
44 spin_lock_irq(&current->sighand->siglock); 41 spin_lock_irq(&current->sighand->siglock);
45 saveset = current->blocked; 42 current->saved_sigmask = current->blocked;
46 siginitset(&current->blocked, mask); 43 siginitset(&current->blocked, mask);
47 recalc_sigpending(); 44 recalc_sigpending();
48 spin_unlock_irq(&current->sighand->siglock); 45 spin_unlock_irq(&current->sighand->siglock);
49 46
50 regs->eax = -EINTR; 47 current->state = TASK_INTERRUPTIBLE;
51 while (1) { 48 schedule();
52 current->state = TASK_INTERRUPTIBLE; 49 set_thread_flag(TIF_RESTORE_SIGMASK);
53 schedule(); 50 return -ERESTARTNOHAND;
54 if (do_signal(regs, &saveset))
55 return -EINTR;
56 }
57}
58
59asmlinkage int
60sys_rt_sigsuspend(struct pt_regs regs)
61{
62 sigset_t saveset, newset;
63
64 /* XXX: Don't preclude handling different sized sigset_t's. */
65 if (regs.ecx != sizeof(sigset_t))
66 return -EINVAL;
67
68 if (copy_from_user(&newset, (sigset_t __user *)regs.ebx, sizeof(newset)))
69 return -EFAULT;
70 sigdelsetmask(&newset, ~_BLOCKABLE);
71
72 spin_lock_irq(&current->sighand->siglock);
73 saveset = current->blocked;
74 current->blocked = newset;
75 recalc_sigpending();
76 spin_unlock_irq(&current->sighand->siglock);
77
78 regs.eax = -EINTR;
79 while (1) {
80 current->state = TASK_INTERRUPTIBLE;
81 schedule();
82 if (do_signal(&regs, &saveset))
83 return -EINTR;
84 }
85} 51}
86 52
87asmlinkage int 53asmlinkage int
@@ -433,11 +399,11 @@ static int setup_frame(int sig, struct k_sigaction *ka,
433 current->comm, current->pid, frame, regs->eip, frame->pretcode); 399 current->comm, current->pid, frame, regs->eip, frame->pretcode);
434#endif 400#endif
435 401
436 return 1; 402 return 0;
437 403
438give_sigsegv: 404give_sigsegv:
439 force_sigsegv(sig, current); 405 force_sigsegv(sig, current);
440 return 0; 406 return -EFAULT;
441} 407}
442 408
443static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 409static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
@@ -527,11 +493,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
527 current->comm, current->pid, frame, regs->eip, frame->pretcode); 493 current->comm, current->pid, frame, regs->eip, frame->pretcode);
528#endif 494#endif
529 495
530 return 1; 496 return 0;
531 497
532give_sigsegv: 498give_sigsegv:
533 force_sigsegv(sig, current); 499 force_sigsegv(sig, current);
534 return 0; 500 return -EFAULT;
535} 501}
536 502
537/* 503/*
@@ -581,7 +547,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
581 else 547 else
582 ret = setup_frame(sig, ka, oldset, regs); 548 ret = setup_frame(sig, ka, oldset, regs);
583 549
584 if (ret) { 550 if (ret == 0) {
585 spin_lock_irq(&current->sighand->siglock); 551 spin_lock_irq(&current->sighand->siglock);
586 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 552 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
587 if (!(ka->sa.sa_flags & SA_NODEFER)) 553 if (!(ka->sa.sa_flags & SA_NODEFER))
@@ -598,11 +564,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
598 * want to handle. Thus you cannot kill init even with a SIGKILL even by 564 * want to handle. Thus you cannot kill init even with a SIGKILL even by
599 * mistake. 565 * mistake.
600 */ 566 */
601int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset) 567static void fastcall do_signal(struct pt_regs *regs)
602{ 568{
603 siginfo_t info; 569 siginfo_t info;
604 int signr; 570 int signr;
605 struct k_sigaction ka; 571 struct k_sigaction ka;
572 sigset_t *oldset;
606 573
607 /* 574 /*
608 * We want the common case to go fast, which 575 * We want the common case to go fast, which
@@ -613,12 +580,14 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
613 * CS suffices. 580 * CS suffices.
614 */ 581 */
615 if (!user_mode(regs)) 582 if (!user_mode(regs))
616 return 1; 583 return;
617 584
618 if (try_to_freeze()) 585 if (try_to_freeze())
619 goto no_signal; 586 goto no_signal;
620 587
621 if (!oldset) 588 if (test_thread_flag(TIF_RESTORE_SIGMASK))
589 oldset = &current->saved_sigmask;
590 else
622 oldset = &current->blocked; 591 oldset = &current->blocked;
623 592
624 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 593 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -628,38 +597,55 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
628 * have been cleared if the watchpoint triggered 597 * have been cleared if the watchpoint triggered
629 * inside the kernel. 598 * inside the kernel.
630 */ 599 */
631 if (unlikely(current->thread.debugreg[7])) { 600 if (unlikely(current->thread.debugreg[7]))
632 set_debugreg(current->thread.debugreg[7], 7); 601 set_debugreg(current->thread.debugreg[7], 7);
633 }
634 602
635 /* Whee! Actually deliver the signal. */ 603 /* Whee! Actually deliver the signal. */
636 return handle_signal(signr, &info, &ka, oldset, regs); 604 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
605 /* a signal was successfully delivered; the saved
606 * sigmask will have been stored in the signal frame,
607 * and will be restored by sigreturn, so we can simply
608 * clear the TIF_RESTORE_SIGMASK flag */
609 if (test_thread_flag(TIF_RESTORE_SIGMASK))
610 clear_thread_flag(TIF_RESTORE_SIGMASK);
611 }
612
613 return;
637 } 614 }
638 615
639 no_signal: 616no_signal:
640 /* Did we come from a system call? */ 617 /* Did we come from a system call? */
641 if (regs->orig_eax >= 0) { 618 if (regs->orig_eax >= 0) {
642 /* Restart the system call - no handlers present */ 619 /* Restart the system call - no handlers present */
643 if (regs->eax == -ERESTARTNOHAND || 620 switch (regs->eax) {
644 regs->eax == -ERESTARTSYS || 621 case -ERESTARTNOHAND:
645 regs->eax == -ERESTARTNOINTR) { 622 case -ERESTARTSYS:
623 case -ERESTARTNOINTR:
646 regs->eax = regs->orig_eax; 624 regs->eax = regs->orig_eax;
647 regs->eip -= 2; 625 regs->eip -= 2;
648 } 626 break;
649 if (regs->eax == -ERESTART_RESTARTBLOCK){ 627
628 case -ERESTART_RESTARTBLOCK:
650 regs->eax = __NR_restart_syscall; 629 regs->eax = __NR_restart_syscall;
651 regs->eip -= 2; 630 regs->eip -= 2;
631 break;
652 } 632 }
653 } 633 }
654 return 0; 634
635 /* if there's no signal to deliver, we just put the saved sigmask
636 * back */
637 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
638 clear_thread_flag(TIF_RESTORE_SIGMASK);
639 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
640 }
655} 641}
656 642
657/* 643/*
658 * notification of userspace execution resumption 644 * notification of userspace execution resumption
659 * - triggered by current->work.notify_resume 645 * - triggered by the TIF_WORK_MASK flags
660 */ 646 */
661__attribute__((regparm(3))) 647__attribute__((regparm(3)))
662void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, 648void do_notify_resume(struct pt_regs *regs, void *_unused,
663 __u32 thread_info_flags) 649 __u32 thread_info_flags)
664{ 650{
665 /* Pending single-step? */ 651 /* Pending single-step? */
@@ -667,9 +653,10 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
667 regs->eflags |= TF_MASK; 653 regs->eflags |= TF_MASK;
668 clear_thread_flag(TIF_SINGLESTEP); 654 clear_thread_flag(TIF_SINGLESTEP);
669 } 655 }
656
670 /* deal with pending signal delivery */ 657 /* deal with pending signal delivery */
671 if (thread_info_flags & _TIF_SIGPENDING) 658 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
672 do_signal(regs,oldset); 659 do_signal(regs);
673 660
674 clear_thread_flag(TIF_IRET); 661 clear_thread_flag(TIF_IRET);
675} 662}
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 6ff3e524322..1b665928336 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -294,3 +294,18 @@ ENTRY(sys_call_table)
294 .long sys_inotify_add_watch 294 .long sys_inotify_add_watch
295 .long sys_inotify_rm_watch 295 .long sys_inotify_rm_watch
296 .long sys_migrate_pages 296 .long sys_migrate_pages
297 .long sys_openat /* 295 */
298 .long sys_mkdirat
299 .long sys_mknodat
300 .long sys_fchownat
301 .long sys_futimesat
302 .long sys_newfstatat /* 300 */
303 .long sys_unlinkat
304 .long sys_renameat
305 .long sys_linkat
306 .long sys_symlinkat
307 .long sys_readlinkat /* 305 */
308 .long sys_fchmodat
309 .long sys_faccessat
310 .long sys_pselect6
311 .long sys_ppoll
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 1d07d8072ec..991c07b57c2 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -557,6 +557,7 @@ CONFIG_E100=m
557# CONFIG_DL2K is not set 557# CONFIG_DL2K is not set
558CONFIG_E1000=y 558CONFIG_E1000=y
559# CONFIG_E1000_NAPI is not set 559# CONFIG_E1000_NAPI is not set
560# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
560# CONFIG_NS83820 is not set 561# CONFIG_NS83820 is not set
561# CONFIG_HAMACHI is not set 562# CONFIG_HAMACHI is not set
562# CONFIG_YELLOWFIN is not set 563# CONFIG_YELLOWFIN is not set
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index b1e8f09e9fd..6859119bc9d 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -565,6 +565,7 @@ CONFIG_E100=m
565# CONFIG_DL2K is not set 565# CONFIG_DL2K is not set
566CONFIG_E1000=y 566CONFIG_E1000=y
567# CONFIG_E1000_NAPI is not set 567# CONFIG_E1000_NAPI is not set
568# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
568# CONFIG_NS83820 is not set 569# CONFIG_NS83820 is not set
569# CONFIG_HAMACHI is not set 570# CONFIG_HAMACHI is not set
570# CONFIG_YELLOWFIN is not set 571# CONFIG_YELLOWFIN is not set
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig
index 0856ca67dd5..53899dc8eb5 100644
--- a/arch/ia64/configs/zx1_defconfig
+++ b/arch/ia64/configs/zx1_defconfig
@@ -548,6 +548,7 @@ CONFIG_E100=y
548# CONFIG_DL2K is not set 548# CONFIG_DL2K is not set
549CONFIG_E1000=y 549CONFIG_E1000=y
550# CONFIG_E1000_NAPI is not set 550# CONFIG_E1000_NAPI is not set
551# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
551# CONFIG_NS83820 is not set 552# CONFIG_NS83820 is not set
552# CONFIG_HAMACHI is not set 553# CONFIG_HAMACHI is not set
553# CONFIG_YELLOWFIN is not set 554# CONFIG_YELLOWFIN is not set
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 275a26c6e5a..dcbc78a4cfa 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -565,6 +565,7 @@ CONFIG_E100=m
565# CONFIG_DL2K is not set 565# CONFIG_DL2K is not set
566CONFIG_E1000=y 566CONFIG_E1000=y
567# CONFIG_E1000_NAPI is not set 567# CONFIG_E1000_NAPI is not set
568# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
568# CONFIG_NS83820 is not set 569# CONFIG_NS83820 is not set
569# CONFIG_HAMACHI is not set 570# CONFIG_HAMACHI is not set
570# CONFIG_YELLOWFIN is not set 571# CONFIG_YELLOWFIN is not set
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 3945d378bd7..70dba1f0e2e 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -52,9 +52,9 @@
52#include <linux/compat.h> 52#include <linux/compat.h>
53#include <linux/vfs.h> 53#include <linux/vfs.h>
54#include <linux/mman.h> 54#include <linux/mman.h>
55#include <linux/mutex.h>
55 56
56#include <asm/intrinsics.h> 57#include <asm/intrinsics.h>
57#include <asm/semaphore.h>
58#include <asm/types.h> 58#include <asm/types.h>
59#include <asm/uaccess.h> 59#include <asm/uaccess.h>
60#include <asm/unistd.h> 60#include <asm/unistd.h>
@@ -86,7 +86,7 @@
86 * while doing so. 86 * while doing so.
87 */ 87 */
88/* XXX make per-mm: */ 88/* XXX make per-mm: */
89static DECLARE_MUTEX(ia32_mmap_sem); 89static DEFINE_MUTEX(ia32_mmap_mutex);
90 90
91asmlinkage long 91asmlinkage long
92sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, 92sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
@@ -895,11 +895,11 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot
895 prot = get_prot32(prot); 895 prot = get_prot32(prot);
896 896
897#if PAGE_SHIFT > IA32_PAGE_SHIFT 897#if PAGE_SHIFT > IA32_PAGE_SHIFT
898 down(&ia32_mmap_sem); 898 mutex_lock(&ia32_mmap_mutex);
899 { 899 {
900 addr = emulate_mmap(file, addr, len, prot, flags, offset); 900 addr = emulate_mmap(file, addr, len, prot, flags, offset);
901 } 901 }
902 up(&ia32_mmap_sem); 902 mutex_unlock(&ia32_mmap_mutex);
903#else 903#else
904 down_write(&current->mm->mmap_sem); 904 down_write(&current->mm->mmap_sem);
905 { 905 {
@@ -1000,11 +1000,9 @@ sys32_munmap (unsigned int start, unsigned int len)
1000 if (start >= end) 1000 if (start >= end)
1001 return 0; 1001 return 0;
1002 1002
1003 down(&ia32_mmap_sem); 1003 mutex_lock(&ia32_mmap_mutex);
1004 { 1004 ret = sys_munmap(start, end - start);
1005 ret = sys_munmap(start, end - start); 1005 mutex_unlock(&ia32_mmap_mutex);
1006 }
1007 up(&ia32_mmap_sem);
1008#endif 1006#endif
1009 return ret; 1007 return ret;
1010} 1008}
@@ -1056,7 +1054,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot)
1056 if (retval < 0) 1054 if (retval < 0)
1057 return retval; 1055 return retval;
1058 1056
1059 down(&ia32_mmap_sem); 1057 mutex_lock(&ia32_mmap_mutex);
1060 { 1058 {
1061 if (offset_in_page(start)) { 1059 if (offset_in_page(start)) {
1062 /* start address is 4KB aligned but not page aligned. */ 1060 /* start address is 4KB aligned but not page aligned. */
@@ -1080,7 +1078,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot)
1080 retval = sys_mprotect(start, end - start, prot); 1078 retval = sys_mprotect(start, end - start, prot);
1081 } 1079 }
1082 out: 1080 out:
1083 up(&ia32_mmap_sem); 1081 mutex_unlock(&ia32_mmap_mutex);
1084 return retval; 1082 return retval;
1085#endif 1083#endif
1086} 1084}
@@ -1124,11 +1122,9 @@ sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
1124 old_len = PAGE_ALIGN(old_end) - addr; 1122 old_len = PAGE_ALIGN(old_end) - addr;
1125 new_len = PAGE_ALIGN(new_end) - addr; 1123 new_len = PAGE_ALIGN(new_end) - addr;
1126 1124
1127 down(&ia32_mmap_sem); 1125 mutex_lock(&ia32_mmap_mutex);
1128 { 1126 ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1129 ret = sys_mremap(addr, old_len, new_len, flags, new_addr); 1127 mutex_unlock(&ia32_mmap_mutex);
1130 }
1131 up(&ia32_mmap_sem);
1132 1128
1133 if ((ret >= 0) && (old_len < new_len)) { 1129 if ((ret >= 0) && (old_len < new_len)) {
1134 /* mremap expanded successfully */ 1130 /* mremap expanded successfully */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 2ea4b39efff..9c5194b385d 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -40,6 +40,7 @@
40#include <linux/bitops.h> 40#include <linux/bitops.h>
41#include <linux/capability.h> 41#include <linux/capability.h>
42#include <linux/rcupdate.h> 42#include <linux/rcupdate.h>
43#include <linux/completion.h>
43 44
44#include <asm/errno.h> 45#include <asm/errno.h>
45#include <asm/intrinsics.h> 46#include <asm/intrinsics.h>
@@ -286,7 +287,7 @@ typedef struct pfm_context {
286 287
287 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ 288 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
288 289
289 struct semaphore ctx_restart_sem; /* use for blocking notification mode */ 290 struct completion ctx_restart_done; /* use for blocking notification mode */
290 291
291 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ 292 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
292 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ 293 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
@@ -1991,7 +1992,7 @@ pfm_close(struct inode *inode, struct file *filp)
1991 /* 1992 /*
1992 * force task to wake up from MASKED state 1993 * force task to wake up from MASKED state
1993 */ 1994 */
1994 up(&ctx->ctx_restart_sem); 1995 complete(&ctx->ctx_restart_done);
1995 1996
1996 DPRINT(("waking up ctx_state=%d\n", state)); 1997 DPRINT(("waking up ctx_state=%d\n", state));
1997 1998
@@ -2706,7 +2707,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
2706 /* 2707 /*
2707 * init restart semaphore to locked 2708 * init restart semaphore to locked
2708 */ 2709 */
2709 sema_init(&ctx->ctx_restart_sem, 0); 2710 init_completion(&ctx->ctx_restart_done);
2710 2711
2711 /* 2712 /*
2712 * activation is used in SMP only 2713 * activation is used in SMP only
@@ -3687,7 +3688,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3687 */ 3688 */
3688 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { 3689 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3689 DPRINT(("unblocking [%d] \n", task->pid)); 3690 DPRINT(("unblocking [%d] \n", task->pid));
3690 up(&ctx->ctx_restart_sem); 3691 complete(&ctx->ctx_restart_done);
3691 } else { 3692 } else {
3692 DPRINT(("[%d] armed exit trap\n", task->pid)); 3693 DPRINT(("[%d] armed exit trap\n", task->pid));
3693 3694
@@ -5089,7 +5090,7 @@ pfm_handle_work(void)
5089 * may go through without blocking on SMP systems 5090 * may go through without blocking on SMP systems
5090 * if restart has been received already by the time we call down() 5091 * if restart has been received already by the time we call down()
5091 */ 5092 */
5092 ret = down_interruptible(&ctx->ctx_restart_sem); 5093 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
5093 5094
5094 DPRINT(("after block sleeping ret=%d\n", ret)); 5095 DPRINT(("after block sleeping ret=%d\n", ret));
5095 5096
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index b631cf86ed4..fcd2bad0286 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -210,6 +210,7 @@ uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
210 210
211 dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); 211 dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
212 212
213 touch_softlockup_watchdog();
213 memset((char *)start, 0, length); 214 memset((char *)start, 0, length);
214 215
215 node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); 216 node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET);
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h
index 7c88e9a5851..8182583c762 100644
--- a/arch/ia64/sn/include/xtalk/hubdev.h
+++ b/arch/ia64/sn/include/xtalk/hubdev.h
@@ -51,6 +51,15 @@ struct sn_flush_device_kernel {
51 struct sn_flush_device_common *common; 51 struct sn_flush_device_common *common;
52}; 52};
53 53
54/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included
55 * for older official PROMs to function on the new kernel base. This struct
56 * will be removed when the next official PROM release occurs. */
57
58struct sn_flush_device_war {
59 struct sn_flush_device_common common;
60 u32 filler; /* older PROMs expect the default size of a spinlock_t */
61};
62
54/* 63/*
55 * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel. 64 * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
56 */ 65 */
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 233d55115d3..00700f7e683 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -165,8 +165,45 @@ sn_pcidev_info_get(struct pci_dev *dev)
165 return NULL; 165 return NULL;
166} 166}
167 167
168/* Older PROM flush WAR
169 *
170 * 01/16/06 -- This war will be in place until a new official PROM is released.
171 * Additionally note that the struct sn_flush_device_war also has to be
172 * removed from arch/ia64/sn/include/xtalk/hubdev.h
173 */
174static u8 war_implemented = 0;
175
176static void sn_device_fixup_war(u64 nasid, u64 widget, int device,
177 struct sn_flush_device_common *common)
178{
179 struct sn_flush_device_war *war_list;
180 struct sn_flush_device_war *dev_entry;
181 struct ia64_sal_retval isrv = {0,0,0,0};
182
183 if (!war_implemented) {
184 printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
185 "PROM flush WAR\n");
186 war_implemented = 1;
187 }
188
189 war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
190 if (!war_list)
191 BUG();
192
193 SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
194 nasid, widget, __pa(war_list), 0, 0, 0 ,0);
195 if (isrv.status)
196 panic("sn_device_fixup_war failed: %s\n",
197 ia64_sal_strerror(isrv.status));
198
199 dev_entry = war_list + device;
200 memcpy(common,dev_entry, sizeof(*common));
201
202 kfree(war_list);
203}
204
168/* 205/*
169 * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for 206 * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
170 * each node in the system. 207 * each node in the system.
171 */ 208 */
172static void sn_fixup_ionodes(void) 209static void sn_fixup_ionodes(void)
@@ -246,8 +283,19 @@ static void sn_fixup_ionodes(void)
246 widget, 283 widget,
247 device, 284 device,
248 (u64)(dev_entry->common)); 285 (u64)(dev_entry->common));
249 if (status) 286 if (status) {
250 BUG(); 287 if (sn_sal_rev() < 0x0450) {
288 /* shortlived WAR for older
289 * PROM images
290 */
291 sn_device_fixup_war(nasid,
292 widget,
293 device,
294 dev_entry->common);
295 }
296 else
297 BUG();
298 }
251 299
252 spin_lock_init(&dev_entry->sfdl_flush_lock); 300 spin_lock_init(&dev_entry->sfdl_flush_lock);
253 } 301 }
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
index 6546db6abdb..9ab684d1bb5 100644
--- a/arch/ia64/sn/kernel/mca.c
+++ b/arch/ia64/sn/kernel/mca.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/timer.h> 11#include <linux/timer.h>
12#include <linux/vmalloc.h> 12#include <linux/vmalloc.h>
13#include <linux/mutex.h>
13#include <asm/mca.h> 14#include <asm/mca.h>
14#include <asm/sal.h> 15#include <asm/sal.h>
15#include <asm/sn/sn_sal.h> 16#include <asm/sn/sn_sal.h>
@@ -27,7 +28,7 @@ void sn_init_cpei_timer(void);
27/* Printing oemdata from mca uses data that is not passed through SAL, it is 28/* Printing oemdata from mca uses data that is not passed through SAL, it is
28 * global. Only one user at a time. 29 * global. Only one user at a time.
29 */ 30 */
30static DECLARE_MUTEX(sn_oemdata_mutex); 31static DEFINE_MUTEX(sn_oemdata_mutex);
31static u8 **sn_oemdata; 32static u8 **sn_oemdata;
32static u64 *sn_oemdata_size, sn_oemdata_bufsize; 33static u64 *sn_oemdata_size, sn_oemdata_bufsize;
33 34
@@ -89,7 +90,7 @@ static int
89sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, 90sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
90 u64 * oemdata_size) 91 u64 * oemdata_size)
91{ 92{
92 down(&sn_oemdata_mutex); 93 mutex_lock(&sn_oemdata_mutex);
93 sn_oemdata = oemdata; 94 sn_oemdata = oemdata;
94 sn_oemdata_size = oemdata_size; 95 sn_oemdata_size = oemdata_size;
95 sn_oemdata_bufsize = 0; 96 sn_oemdata_bufsize = 0;
@@ -107,7 +108,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
107 *sn_oemdata_size = 0; 108 *sn_oemdata_size = 0;
108 ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); 109 ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
109 } 110 }
110 up(&sn_oemdata_mutex); 111 mutex_unlock(&sn_oemdata_mutex);
111 return 0; 112 return 0;
112} 113}
113 114
diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c
index 3be52a34c80..b7ea46645e1 100644
--- a/arch/ia64/sn/kernel/xp_main.c
+++ b/arch/ia64/sn/kernel/xp_main.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/mutex.h>
22#include <asm/sn/intr.h> 23#include <asm/sn/intr.h>
23#include <asm/sn/sn_sal.h> 24#include <asm/sn/sn_sal.h>
24#include <asm/sn/xp.h> 25#include <asm/sn/xp.h>
@@ -136,13 +137,13 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
136 137
137 registration = &xpc_registrations[ch_number]; 138 registration = &xpc_registrations[ch_number];
138 139
139 if (down_interruptible(&registration->sema) != 0) { 140 if (mutex_lock_interruptible(&registration->mutex) != 0) {
140 return xpcInterrupted; 141 return xpcInterrupted;
141 } 142 }
142 143
143 /* if XPC_CHANNEL_REGISTERED(ch_number) */ 144 /* if XPC_CHANNEL_REGISTERED(ch_number) */
144 if (registration->func != NULL) { 145 if (registration->func != NULL) {
145 up(&registration->sema); 146 mutex_unlock(&registration->mutex);
146 return xpcAlreadyRegistered; 147 return xpcAlreadyRegistered;
147 } 148 }
148 149
@@ -154,7 +155,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
154 registration->key = key; 155 registration->key = key;
155 registration->func = func; 156 registration->func = func;
156 157
157 up(&registration->sema); 158 mutex_unlock(&registration->mutex);
158 159
159 xpc_interface.connect(ch_number); 160 xpc_interface.connect(ch_number);
160 161
@@ -190,11 +191,11 @@ xpc_disconnect(int ch_number)
190 * figured XPC's users will just turn around and call xpc_disconnect() 191 * figured XPC's users will just turn around and call xpc_disconnect()
191 * again anyways, so we might as well wait, if need be. 192 * again anyways, so we might as well wait, if need be.
192 */ 193 */
193 down(&registration->sema); 194 mutex_lock(&registration->mutex);
194 195
195 /* if !XPC_CHANNEL_REGISTERED(ch_number) */ 196 /* if !XPC_CHANNEL_REGISTERED(ch_number) */
196 if (registration->func == NULL) { 197 if (registration->func == NULL) {
197 up(&registration->sema); 198 mutex_unlock(&registration->mutex);
198 return; 199 return;
199 } 200 }
200 201
@@ -208,7 +209,7 @@ xpc_disconnect(int ch_number)
208 209
209 xpc_interface.disconnect(ch_number); 210 xpc_interface.disconnect(ch_number);
210 211
211 up(&registration->sema); 212 mutex_unlock(&registration->mutex);
212 213
213 return; 214 return;
214} 215}
@@ -250,9 +251,9 @@ xp_init(void)
250 xp_nofault_PIOR_target = SH1_IPI_ACCESS; 251 xp_nofault_PIOR_target = SH1_IPI_ACCESS;
251 } 252 }
252 253
253 /* initialize the connection registration semaphores */ 254 /* initialize the connection registration mutex */
254 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { 255 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
255 sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ 256 mutex_init(&xpc_registrations[ch_number].mutex);
256 } 257 }
257 258
258 return 0; 259 return 0;
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 0c0a6890240..8d950c778bb 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -22,6 +22,8 @@
22#include <linux/cache.h> 22#include <linux/cache.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/mutex.h>
26#include <linux/completion.h>
25#include <asm/sn/bte.h> 27#include <asm/sn/bte.h>
26#include <asm/sn/sn_sal.h> 28#include <asm/sn/sn_sal.h>
27#include <asm/sn/xpc.h> 29#include <asm/sn/xpc.h>
@@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
56 atomic_set(&ch->n_to_notify, 0); 58 atomic_set(&ch->n_to_notify, 0);
57 59
58 spin_lock_init(&ch->lock); 60 spin_lock_init(&ch->lock);
59 sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ 61 mutex_init(&ch->msg_to_pull_mutex);
60 sema_init(&ch->wdisconnect_sema, 0); /* event wait */ 62 init_completion(&ch->wdisconnect_wait);
61 63
62 atomic_set(&ch->n_on_msg_allocate_wq, 0); 64 atomic_set(&ch->n_on_msg_allocate_wq, 0);
63 init_waitqueue_head(&ch->msg_allocate_wq); 65 init_waitqueue_head(&ch->msg_allocate_wq);
@@ -534,7 +536,6 @@ static enum xpc_retval
534xpc_allocate_msgqueues(struct xpc_channel *ch) 536xpc_allocate_msgqueues(struct xpc_channel *ch)
535{ 537{
536 unsigned long irq_flags; 538 unsigned long irq_flags;
537 int i;
538 enum xpc_retval ret; 539 enum xpc_retval ret;
539 540
540 541
@@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
552 return ret; 553 return ret;
553 } 554 }
554 555
555 for (i = 0; i < ch->local_nentries; i++) {
556 /* use a semaphore as an event wait queue */
557 sema_init(&ch->notify_queue[i].sema, 0);
558 }
559
560 spin_lock_irqsave(&ch->lock, irq_flags); 556 spin_lock_irqsave(&ch->lock, irq_flags);
561 ch->flags |= XPC_C_SETUP; 557 ch->flags |= XPC_C_SETUP;
562 spin_unlock_irqrestore(&ch->lock, irq_flags); 558 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -799,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
799 } 795 }
800 796
801 if (ch->flags & XPC_C_WDISCONNECT) { 797 if (ch->flags & XPC_C_WDISCONNECT) {
802 spin_unlock_irqrestore(&ch->lock, *irq_flags); 798 /* we won't lose the CPU since we're holding ch->lock */
803 up(&ch->wdisconnect_sema); 799 complete(&ch->wdisconnect_wait);
804 spin_lock_irqsave(&ch->lock, *irq_flags);
805
806 } else if (ch->delayed_IPI_flags) { 800 } else if (ch->delayed_IPI_flags) {
807 if (part->act_state != XPC_P_DEACTIVATING) { 801 if (part->act_state != XPC_P_DEACTIVATING) {
808 /* time to take action on any delayed IPI flags */ 802 /* time to take action on any delayed IPI flags */
@@ -1092,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch)
1092 struct xpc_registration *registration = &xpc_registrations[ch->number]; 1086 struct xpc_registration *registration = &xpc_registrations[ch->number];
1093 1087
1094 1088
1095 if (down_trylock(&registration->sema) != 0) { 1089 if (mutex_trylock(&registration->mutex) == 0) {
1096 return xpcRetry; 1090 return xpcRetry;
1097 } 1091 }
1098 1092
1099 if (!XPC_CHANNEL_REGISTERED(ch->number)) { 1093 if (!XPC_CHANNEL_REGISTERED(ch->number)) {
1100 up(&registration->sema); 1094 mutex_unlock(&registration->mutex);
1101 return xpcUnregistered; 1095 return xpcUnregistered;
1102 } 1096 }
1103 1097
@@ -1108,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1108 1102
1109 if (ch->flags & XPC_C_DISCONNECTING) { 1103 if (ch->flags & XPC_C_DISCONNECTING) {
1110 spin_unlock_irqrestore(&ch->lock, irq_flags); 1104 spin_unlock_irqrestore(&ch->lock, irq_flags);
1111 up(&registration->sema); 1105 mutex_unlock(&registration->mutex);
1112 return ch->reason; 1106 return ch->reason;
1113 } 1107 }
1114 1108
@@ -1140,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1140 * channel lock be locked and will unlock and relock 1134 * channel lock be locked and will unlock and relock
1141 * the channel lock as needed. 1135 * the channel lock as needed.
1142 */ 1136 */
1143 up(&registration->sema); 1137 mutex_unlock(&registration->mutex);
1144 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 1138 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1145 &irq_flags); 1139 &irq_flags);
1146 spin_unlock_irqrestore(&ch->lock, irq_flags); 1140 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1155,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1155 atomic_inc(&xpc_partitions[ch->partid].nchannels_active); 1149 atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
1156 } 1150 }
1157 1151
1158 up(&registration->sema); 1152 mutex_unlock(&registration->mutex);
1159 1153
1160 1154
1161 /* initiate the connection */ 1155 /* initiate the connection */
@@ -2089,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2089 enum xpc_retval ret; 2083 enum xpc_retval ret;
2090 2084
2091 2085
2092 if (down_interruptible(&ch->msg_to_pull_sema) != 0) { 2086 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
2093 /* we were interrupted by a signal */ 2087 /* we were interrupted by a signal */
2094 return NULL; 2088 return NULL;
2095 } 2089 }
@@ -2125,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2125 2119
2126 XPC_DEACTIVATE_PARTITION(part, ret); 2120 XPC_DEACTIVATE_PARTITION(part, ret);
2127 2121
2128 up(&ch->msg_to_pull_sema); 2122 mutex_unlock(&ch->msg_to_pull_mutex);
2129 return NULL; 2123 return NULL;
2130 } 2124 }
2131 2125
@@ -2134,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2134 ch->next_msg_to_pull += nmsgs; 2128 ch->next_msg_to_pull += nmsgs;
2135 } 2129 }
2136 2130
2137 up(&ch->msg_to_pull_sema); 2131 mutex_unlock(&ch->msg_to_pull_mutex);
2138 2132
2139 /* return the message we were looking for */ 2133 /* return the message we were looking for */
2140 msg_offset = (get % ch->remote_nentries) * ch->msg_size; 2134 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 8930586e0eb..c75f8aeefc2 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -55,6 +55,7 @@
55#include <linux/slab.h> 55#include <linux/slab.h>
56#include <linux/delay.h> 56#include <linux/delay.h>
57#include <linux/reboot.h> 57#include <linux/reboot.h>
58#include <linux/completion.h>
58#include <asm/sn/intr.h> 59#include <asm/sn/intr.h>
59#include <asm/sn/sn_sal.h> 60#include <asm/sn/sn_sal.h>
60#include <asm/kdebug.h> 61#include <asm/kdebug.h>
@@ -177,10 +178,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
177static unsigned long xpc_hb_check_timeout; 178static unsigned long xpc_hb_check_timeout;
178 179
179/* notification that the xpc_hb_checker thread has exited */ 180/* notification that the xpc_hb_checker thread has exited */
180static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); 181static DECLARE_COMPLETION(xpc_hb_checker_exited);
181 182
182/* notification that the xpc_discovery thread has exited */ 183/* notification that the xpc_discovery thread has exited */
183static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); 184static DECLARE_COMPLETION(xpc_discovery_exited);
184 185
185 186
186static struct timer_list xpc_hb_timer; 187static struct timer_list xpc_hb_timer;
@@ -321,7 +322,7 @@ xpc_hb_checker(void *ignore)
321 322
322 323
323 /* mark this thread as having exited */ 324 /* mark this thread as having exited */
324 up(&xpc_hb_checker_exited); 325 complete(&xpc_hb_checker_exited);
325 return 0; 326 return 0;
326} 327}
327 328
@@ -341,7 +342,7 @@ xpc_initiate_discovery(void *ignore)
341 dev_dbg(xpc_part, "discovery thread is exiting\n"); 342 dev_dbg(xpc_part, "discovery thread is exiting\n");
342 343
343 /* mark this thread as having exited */ 344 /* mark this thread as having exited */
344 up(&xpc_discovery_exited); 345 complete(&xpc_discovery_exited);
345 return 0; 346 return 0;
346} 347}
347 348
@@ -893,7 +894,7 @@ xpc_disconnect_wait(int ch_number)
893 continue; 894 continue;
894 } 895 }
895 896
896 (void) down(&ch->wdisconnect_sema); 897 wait_for_completion(&ch->wdisconnect_wait);
897 898
898 spin_lock_irqsave(&ch->lock, irq_flags); 899 spin_lock_irqsave(&ch->lock, irq_flags);
899 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); 900 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
@@ -946,10 +947,10 @@ xpc_do_exit(enum xpc_retval reason)
946 free_irq(SGI_XPC_ACTIVATE, NULL); 947 free_irq(SGI_XPC_ACTIVATE, NULL);
947 948
948 /* wait for the discovery thread to exit */ 949 /* wait for the discovery thread to exit */
949 down(&xpc_discovery_exited); 950 wait_for_completion(&xpc_discovery_exited);
950 951
951 /* wait for the heartbeat checker thread to exit */ 952 /* wait for the heartbeat checker thread to exit */
952 down(&xpc_hb_checker_exited); 953 wait_for_completion(&xpc_hb_checker_exited);
953 954
954 955
955 /* sleep for a 1/3 of a second or so */ 956 /* sleep for a 1/3 of a second or so */
@@ -1367,7 +1368,7 @@ xpc_init(void)
1367 dev_err(xpc_part, "failed while forking discovery thread\n"); 1368 dev_err(xpc_part, "failed while forking discovery thread\n");
1368 1369
1369 /* mark this new thread as a non-starter */ 1370 /* mark this new thread as a non-starter */
1370 up(&xpc_discovery_exited); 1371 complete(&xpc_discovery_exited);
1371 1372
1372 xpc_do_exit(xpcUnloading); 1373 xpc_do_exit(xpcUnloading);
1373 return -EBUSY; 1374 return -EBUSY;
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 77a1262751d..2fac27049bf 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -24,13 +24,15 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
24{ 24{
25 struct ia64_sal_retval ret_stuff; 25 struct ia64_sal_retval ret_stuff;
26 u64 busnum; 26 u64 busnum;
27 u64 segment;
27 28
28 ret_stuff.status = 0; 29 ret_stuff.status = 0;
29 ret_stuff.v0 = 0; 30 ret_stuff.v0 = 0;
30 31
32 segment = soft->pbi_buscommon.bs_persist_segment;
31 busnum = soft->pbi_buscommon.bs_persist_busnum; 33 busnum = soft->pbi_buscommon.bs_persist_busnum;
32 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum, 34 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
33 (u64) device, (u64) resp, 0, 0, 0, 0); 35 busnum, (u64) device, (u64) resp, 0, 0, 0);
34 36
35 return (int)ret_stuff.v0; 37 return (int)ret_stuff.v0;
36} 38}
@@ -41,14 +43,16 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
41{ 43{
42 struct ia64_sal_retval ret_stuff; 44 struct ia64_sal_retval ret_stuff;
43 u64 busnum; 45 u64 busnum;
46 u64 segment;
44 47
45 ret_stuff.status = 0; 48 ret_stuff.status = 0;
46 ret_stuff.v0 = 0; 49 ret_stuff.v0 = 0;
47 50
51 segment = soft->pbi_buscommon.bs_persist_segment;
48 busnum = soft->pbi_buscommon.bs_persist_busnum; 52 busnum = soft->pbi_buscommon.bs_persist_busnum;
49 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE, 53 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE,
50 (u64) busnum, (u64) device, (u64) action, 54 segment, busnum, (u64) device, (u64) action,
51 (u64) resp, 0, 0, 0); 55 (u64) resp, 0, 0);
52 56
53 return (int)ret_stuff.v0; 57 return (int)ret_stuff.v0;
54} 58}
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index 955ef5084f3..959ad3c4e37 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -602,6 +602,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
602# CONFIG_DL2K is not set 602# CONFIG_DL2K is not set
603CONFIG_E1000=m 603CONFIG_E1000=m
604CONFIG_E1000_NAPI=y 604CONFIG_E1000_NAPI=y
605# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
605# CONFIG_NS83820 is not set 606# CONFIG_NS83820 is not set
606# CONFIG_HAMACHI is not set 607# CONFIG_HAMACHI is not set
607# CONFIG_YELLOWFIN is not set 608# CONFIG_YELLOWFIN is not set
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index 9d86b6b1ebd..0b1c8c1fa8a 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -626,6 +626,7 @@ CONFIG_ACENIC=m
626# CONFIG_DL2K is not set 626# CONFIG_DL2K is not set
627CONFIG_E1000=m 627CONFIG_E1000=m
628# CONFIG_E1000_NAPI is not set 628# CONFIG_E1000_NAPI is not set
629# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
629# CONFIG_NS83820 is not set 630# CONFIG_NS83820 is not set
630# CONFIG_HAMACHI is not set 631# CONFIG_HAMACHI is not set
631# CONFIG_YELLOWFIN is not set 632# CONFIG_YELLOWFIN is not set
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index b657f7e4476..063b84f2cbe 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -533,6 +533,7 @@ CONFIG_MII=y
533# CONFIG_DL2K is not set 533# CONFIG_DL2K is not set
534CONFIG_E1000=m 534CONFIG_E1000=m
535# CONFIG_E1000_NAPI is not set 535# CONFIG_E1000_NAPI is not set
536# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
536# CONFIG_NS83820 is not set 537# CONFIG_NS83820 is not set
537# CONFIG_HAMACHI is not set 538# CONFIG_HAMACHI is not set
538# CONFIG_YELLOWFIN is not set 539# CONFIG_YELLOWFIN is not set
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 3c22ccb1851..d6fed3f5658 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -675,6 +675,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
675# CONFIG_DL2K is not set 675# CONFIG_DL2K is not set
676CONFIG_E1000=y 676CONFIG_E1000=y
677# CONFIG_E1000_NAPI is not set 677# CONFIG_E1000_NAPI is not set
678# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
678# CONFIG_NS83820 is not set 679# CONFIG_NS83820 is not set
679# CONFIG_HAMACHI is not set 680# CONFIG_HAMACHI is not set
680# CONFIG_YELLOWFIN is not set 681# CONFIG_YELLOWFIN is not set
diff --git a/arch/powerpc/configs/iseries_defconfig b/arch/powerpc/configs/iseries_defconfig
index 751a622fb7a..c775027947f 100644
--- a/arch/powerpc/configs/iseries_defconfig
+++ b/arch/powerpc/configs/iseries_defconfig
@@ -567,6 +567,7 @@ CONFIG_ACENIC=m
567# CONFIG_DL2K is not set 567# CONFIG_DL2K is not set
568CONFIG_E1000=m 568CONFIG_E1000=m
569# CONFIG_E1000_NAPI is not set 569# CONFIG_E1000_NAPI is not set
570# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
570# CONFIG_NS83820 is not set 571# CONFIG_NS83820 is not set
571# CONFIG_HAMACHI is not set 572# CONFIG_HAMACHI is not set
572# CONFIG_YELLOWFIN is not set 573# CONFIG_YELLOWFIN is not set
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 07b6d3d2336..68194c03f6d 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -454,6 +454,7 @@ CONFIG_AMD8111_ETH=y
454# CONFIG_DL2K is not set 454# CONFIG_DL2K is not set
455CONFIG_E1000=y 455CONFIG_E1000=y
456# CONFIG_E1000_NAPI is not set 456# CONFIG_E1000_NAPI is not set
457# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
457# CONFIG_NS83820 is not set 458# CONFIG_NS83820 is not set
458# CONFIG_HAMACHI is not set 459# CONFIG_HAMACHI is not set
459# CONFIG_YELLOWFIN is not set 460# CONFIG_YELLOWFIN is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 0b2b55a79c3..6f6c6bed1aa 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -724,6 +724,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
724# CONFIG_DL2K is not set 724# CONFIG_DL2K is not set
725CONFIG_E1000=y 725CONFIG_E1000=y
726# CONFIG_E1000_NAPI is not set 726# CONFIG_E1000_NAPI is not set
727# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
727# CONFIG_NS83820 is not set 728# CONFIG_NS83820 is not set
728# CONFIG_HAMACHI is not set 729# CONFIG_HAMACHI is not set
729# CONFIG_YELLOWFIN is not set 730# CONFIG_YELLOWFIN is not set
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index a50ce0fa924..aa9893a1f6e 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -671,6 +671,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
671# CONFIG_DL2K is not set 671# CONFIG_DL2K is not set
672CONFIG_E1000=y 672CONFIG_E1000=y
673# CONFIG_E1000_NAPI is not set 673# CONFIG_E1000_NAPI is not set
674# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
674# CONFIG_NS83820 is not set 675# CONFIG_NS83820 is not set
675# CONFIG_HAMACHI is not set 676# CONFIG_HAMACHI is not set
676# CONFIG_YELLOWFIN is not set 677# CONFIG_YELLOWFIN is not set
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index d8da2a35c0a..f20a67261ec 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -227,7 +227,7 @@ ret_from_syscall:
227 MTMSRD(r10) 227 MTMSRD(r10)
228 lwz r9,TI_FLAGS(r12) 228 lwz r9,TI_FLAGS(r12)
229 li r8,-_LAST_ERRNO 229 li r8,-_LAST_ERRNO
230 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL) 230 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK)
231 bne- syscall_exit_work 231 bne- syscall_exit_work
232 cmplw 0,r3,r8 232 cmplw 0,r3,r8
233 blt+ syscall_exit_cont 233 blt+ syscall_exit_cont
@@ -357,7 +357,7 @@ save_user_nvgprs_cont:
357 lwz r5,_MSR(r1) 357 lwz r5,_MSR(r1)
358 andi. r5,r5,MSR_PR 358 andi. r5,r5,MSR_PR
359 beq ret_from_except 359 beq ret_from_except
360 andi. r0,r9,_TIF_SIGPENDING 360 andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
361 beq ret_from_except 361 beq ret_from_except
362 b do_user_signal 362 b do_user_signal
3638: 3638:
@@ -683,7 +683,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
683 /* Check current_thread_info()->flags */ 683 /* Check current_thread_info()->flags */
684 rlwinm r9,r1,0,0,(31-THREAD_SHIFT) 684 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
685 lwz r9,TI_FLAGS(r9) 685 lwz r9,TI_FLAGS(r9)
686 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL) 686 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK)
687 bne do_work 687 bne do_work
688 688
689restore_user: 689restore_user:
@@ -917,7 +917,7 @@ recheck:
917 lwz r9,TI_FLAGS(r9) 917 lwz r9,TI_FLAGS(r9)
918 andi. r0,r9,_TIF_NEED_RESCHED 918 andi. r0,r9,_TIF_NEED_RESCHED
919 bne- do_resched 919 bne- do_resched
920 andi. r0,r9,_TIF_SIGPENDING 920 andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
921 beq restore_user 921 beq restore_user
922do_user_signal: /* r10 contains MSR_KERNEL here */ 922do_user_signal: /* r10 contains MSR_KERNEL here */
923 ori r10,r10,MSR_EE 923 ori r10,r10,MSR_EE
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 54203631886..388f861b8ed 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -160,7 +160,7 @@ syscall_exit:
160 mtmsrd r10,1 160 mtmsrd r10,1
161 ld r9,TI_FLAGS(r12) 161 ld r9,TI_FLAGS(r12)
162 li r11,-_LAST_ERRNO 162 li r11,-_LAST_ERRNO
163 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR) 163 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR|_TIF_RESTORE_SIGMASK)
164 bne- syscall_exit_work 164 bne- syscall_exit_work
165 cmpld r3,r11 165 cmpld r3,r11
166 ld r5,_CCR(r1) 166 ld r5,_CCR(r1)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 177bba78fb0..3747ab0dac3 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -252,8 +252,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs);
252/* 252/*
253 * Atomically swap in the new signal mask, and wait for a signal. 253 * Atomically swap in the new signal mask, and wait for a signal.
254 */ 254 */
255long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7, 255long sys_sigsuspend(old_sigset_t mask)
256 struct pt_regs *regs)
257{ 256{
258 sigset_t saveset; 257 sigset_t saveset;
259 258
@@ -264,55 +263,10 @@ long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
264 recalc_sigpending(); 263 recalc_sigpending();
265 spin_unlock_irq(&current->sighand->siglock); 264 spin_unlock_irq(&current->sighand->siglock);
266 265
267 regs->result = -EINTR; 266 current->state = TASK_INTERRUPTIBLE;
268 regs->gpr[3] = EINTR; 267 schedule();
269 regs->ccr |= 0x10000000; 268 set_thread_flag(TIF_RESTORE_SIGMASK);
270 while (1) { 269 return -ERESTARTNOHAND;
271 current->state = TASK_INTERRUPTIBLE;
272 schedule();
273 if (do_signal(&saveset, regs)) {
274 set_thread_flag(TIF_RESTOREALL);
275 return 0;
276 }
277 }
278}
279
280long sys_rt_sigsuspend(
281#ifdef CONFIG_PPC64
282 compat_sigset_t __user *unewset,
283#else
284 sigset_t __user *unewset,
285#endif
286 size_t sigsetsize, int p3, int p4,
287 int p6, int p7, struct pt_regs *regs)
288{
289 sigset_t saveset, newset;
290
291 /* XXX: Don't preclude handling different sized sigset_t's. */
292 if (sigsetsize != sizeof(sigset_t))
293 return -EINVAL;
294
295 if (get_sigset_t(&newset, unewset))
296 return -EFAULT;
297 sigdelsetmask(&newset, ~_BLOCKABLE);
298
299 spin_lock_irq(&current->sighand->siglock);
300 saveset = current->blocked;
301 current->blocked = newset;
302 recalc_sigpending();
303 spin_unlock_irq(&current->sighand->siglock);
304
305 regs->result = -EINTR;
306 regs->gpr[3] = EINTR;
307 regs->ccr |= 0x10000000;
308 while (1) {
309 current->state = TASK_INTERRUPTIBLE;
310 schedule();
311 if (do_signal(&saveset, regs)) {
312 set_thread_flag(TIF_RESTOREALL);
313 return 0;
314 }
315 }
316} 270}
317 271
318#ifdef CONFIG_PPC32 272#ifdef CONFIG_PPC32
@@ -1174,7 +1128,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
1174{ 1128{
1175 siginfo_t info; 1129 siginfo_t info;
1176 struct k_sigaction ka; 1130 struct k_sigaction ka;
1177 unsigned int frame, newsp; 1131 unsigned int newsp;
1178 int signr, ret; 1132 int signr, ret;
1179 1133
1180#ifdef CONFIG_PPC32 1134#ifdef CONFIG_PPC32
@@ -1185,11 +1139,11 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
1185 } 1139 }
1186#endif 1140#endif
1187 1141
1188 if (!oldset) 1142 if (test_thread_flag(TIF_RESTORE_SIGMASK))
1143 oldset = &current->saved_sigmask;
1144 else if (!oldset)
1189 oldset = &current->blocked; 1145 oldset = &current->blocked;
1190 1146
1191 newsp = frame = 0;
1192
1193 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 1147 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
1194#ifdef CONFIG_PPC32 1148#ifdef CONFIG_PPC32
1195no_signal: 1149no_signal:
@@ -1219,8 +1173,14 @@ no_signal:
1219 } 1173 }
1220 } 1174 }
1221 1175
1222 if (signr == 0) 1176 if (signr == 0) {
1177 /* No signal to deliver -- put the saved sigmask back */
1178 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
1179 clear_thread_flag(TIF_RESTORE_SIGMASK);
1180 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
1181 }
1223 return 0; /* no signals delivered */ 1182 return 0; /* no signals delivered */
1183 }
1224 1184
1225 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size 1185 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
1226 && !on_sig_stack(regs->gpr[1])) 1186 && !on_sig_stack(regs->gpr[1]))
@@ -1253,6 +1213,10 @@ no_signal:
1253 sigaddset(&current->blocked, signr); 1213 sigaddset(&current->blocked, signr);
1254 recalc_sigpending(); 1214 recalc_sigpending();
1255 spin_unlock_irq(&current->sighand->siglock); 1215 spin_unlock_irq(&current->sighand->siglock);
1216 /* A signal was successfully delivered; the saved sigmask is in
1217 its frame, and we can clear the TIF_RESTORE_SIGMASK flag */
1218 if (test_thread_flag(TIF_RESTORE_SIGMASK))
1219 clear_thread_flag(TIF_RESTORE_SIGMASK);
1256 } 1220 }
1257 1221
1258 return ret; 1222 return ret;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 7b9d999e211..b3193116e68 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -67,42 +67,6 @@ struct rt_sigframe {
67 char abigap[288]; 67 char abigap[288];
68} __attribute__ ((aligned (16))); 68} __attribute__ ((aligned (16)));
69 69
70
71/*
72 * Atomically swap in the new signal mask, and wait for a signal.
73 */
74long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4,
75 int p6, int p7, struct pt_regs *regs)
76{
77 sigset_t saveset, newset;
78
79 /* XXX: Don't preclude handling different sized sigset_t's. */
80 if (sigsetsize != sizeof(sigset_t))
81 return -EINVAL;
82
83 if (copy_from_user(&newset, unewset, sizeof(newset)))
84 return -EFAULT;
85 sigdelsetmask(&newset, ~_BLOCKABLE);
86
87 spin_lock_irq(&current->sighand->siglock);
88 saveset = current->blocked;
89 current->blocked = newset;
90 recalc_sigpending();
91 spin_unlock_irq(&current->sighand->siglock);
92
93 regs->result = -EINTR;
94 regs->gpr[3] = EINTR;
95 regs->ccr |= 0x10000000;
96 while (1) {
97 current->state = TASK_INTERRUPTIBLE;
98 schedule();
99 if (do_signal(&saveset, regs)) {
100 set_thread_flag(TIF_RESTOREALL);
101 return 0;
102 }
103 }
104}
105
106long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r5, 70long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r5,
107 unsigned long r6, unsigned long r7, unsigned long r8, 71 unsigned long r6, unsigned long r7, unsigned long r8,
108 struct pt_regs *regs) 72 struct pt_regs *regs)
@@ -556,11 +520,15 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
556 if (test_thread_flag(TIF_32BIT)) 520 if (test_thread_flag(TIF_32BIT))
557 return do_signal32(oldset, regs); 521 return do_signal32(oldset, regs);
558 522
559 if (!oldset) 523 if (test_thread_flag(TIF_RESTORE_SIGMASK))
524 oldset = &current->saved_sigmask;
525 else if (!oldset)
560 oldset = &current->blocked; 526 oldset = &current->blocked;
561 527
562 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 528 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
563 if (signr > 0) { 529 if (signr > 0) {
530 int ret;
531
564 /* Whee! Actually deliver the signal. */ 532 /* Whee! Actually deliver the signal. */
565 if (TRAP(regs) == 0x0C00) 533 if (TRAP(regs) == 0x0C00)
566 syscall_restart(regs, &ka); 534 syscall_restart(regs, &ka);
@@ -573,7 +541,14 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
573 if (current->thread.dabr) 541 if (current->thread.dabr)
574 set_dabr(current->thread.dabr); 542 set_dabr(current->thread.dabr);
575 543
576 return handle_signal(signr, &ka, &info, oldset, regs); 544 ret = handle_signal(signr, &ka, &info, oldset, regs);
545
546 /* If a signal was successfully delivered, the saved sigmask is in
547 its frame, and we can clear the TIF_RESTORE_SIGMASK flag */
548 if (ret && test_thread_flag(TIF_RESTORE_SIGMASK))
549 clear_thread_flag(TIF_RESTORE_SIGMASK);
550
551 return ret;
577 } 552 }
578 553
579 if (TRAP(regs) == 0x0C00) { /* System Call! */ 554 if (TRAP(regs) == 0x0C00) { /* System Call! */
@@ -589,6 +564,11 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
589 regs->result = 0; 564 regs->result = 0;
590 } 565 }
591 } 566 }
567 /* No signal to deliver -- put the saved sigmask back */
568 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
569 clear_thread_flag(TIF_RESTORE_SIGMASK);
570 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
571 }
592 572
593 return 0; 573 return 0;
594} 574}
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 68013179a50..007b15ee36d 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -321,3 +321,5 @@ SYSCALL(inotify_add_watch)
321SYSCALL(inotify_rm_watch) 321SYSCALL(inotify_rm_watch)
322SYSCALL(spu_run) 322SYSCALL(spu_run)
323SYSCALL(spu_create) 323SYSCALL(spu_create)
324COMPAT_SYS(pselect6)
325COMPAT_SYS(ppoll)
diff --git a/arch/ppc/configs/bamboo_defconfig b/arch/ppc/configs/bamboo_defconfig
index 0ba4e70d50b..41fd3938fa5 100644
--- a/arch/ppc/configs/bamboo_defconfig
+++ b/arch/ppc/configs/bamboo_defconfig
@@ -499,6 +499,7 @@ CONFIG_NATSEMI=y
499# CONFIG_DL2K is not set 499# CONFIG_DL2K is not set
500CONFIG_E1000=y 500CONFIG_E1000=y
501# CONFIG_E1000_NAPI is not set 501# CONFIG_E1000_NAPI is not set
502# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
502# CONFIG_NS83820 is not set 503# CONFIG_NS83820 is not set
503# CONFIG_HAMACHI is not set 504# CONFIG_HAMACHI is not set
504# CONFIG_YELLOWFIN is not set 505# CONFIG_YELLOWFIN is not set
diff --git a/arch/ppc/configs/katana_defconfig b/arch/ppc/configs/katana_defconfig
index 0f3bb9af9c2..7311fe6b42d 100644
--- a/arch/ppc/configs/katana_defconfig
+++ b/arch/ppc/configs/katana_defconfig
@@ -488,6 +488,7 @@ CONFIG_E100=y
488# CONFIG_DL2K is not set 488# CONFIG_DL2K is not set
489CONFIG_E1000=y 489CONFIG_E1000=y
490# CONFIG_E1000_NAPI is not set 490# CONFIG_E1000_NAPI is not set
491# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
491# CONFIG_NS83820 is not set 492# CONFIG_NS83820 is not set
492# CONFIG_HAMACHI is not set 493# CONFIG_HAMACHI is not set
493# CONFIG_YELLOWFIN is not set 494# CONFIG_YELLOWFIN is not set
diff --git a/arch/ppc/configs/mpc834x_sys_defconfig b/arch/ppc/configs/mpc834x_sys_defconfig
index 673dc64ebcb..b96a6d6dad0 100644
--- a/arch/ppc/configs/mpc834x_sys_defconfig
+++ b/arch/ppc/configs/mpc834x_sys_defconfig
@@ -402,6 +402,7 @@ CONFIG_E100=y
402# CONFIG_DL2K is not set 402# CONFIG_DL2K is not set
403CONFIG_E1000=y 403CONFIG_E1000=y
404# CONFIG_E1000_NAPI is not set 404# CONFIG_E1000_NAPI is not set
405# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
405# CONFIG_NS83820 is not set 406# CONFIG_NS83820 is not set
406# CONFIG_HAMACHI is not set 407# CONFIG_HAMACHI is not set
407# CONFIG_YELLOWFIN is not set 408# CONFIG_YELLOWFIN is not set
diff --git a/arch/ppc/configs/power3_defconfig b/arch/ppc/configs/power3_defconfig
index 93da595a473..a1ef929bca5 100644
--- a/arch/ppc/configs/power3_defconfig
+++ b/arch/ppc/configs/power3_defconfig
@@ -442,6 +442,7 @@ CONFIG_E100=y
442# CONFIG_DL2K is not set 442# CONFIG_DL2K is not set
443CONFIG_E1000=y 443CONFIG_E1000=y
444# CONFIG_E1000_NAPI is not set 444# CONFIG_E1000_NAPI is not set
445# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
445# CONFIG_NS83820 is not set 446# CONFIG_NS83820 is not set
446# CONFIG_HAMACHI is not set 447# CONFIG_HAMACHI is not set
447# CONFIG_YELLOWFIN is not set 448# CONFIG_YELLOWFIN is not set
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 03ecb4e4614..c51d08d218e 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1277,62 +1277,6 @@ sys_sigstack:
1277 mov %l5, %o7 1277 mov %l5, %o7
1278 1278
1279 .align 4 1279 .align 4
1280 .globl sys_sigpause
1281sys_sigpause:
1282 /* Note: %o0 already has correct value... */
1283 call do_sigpause
1284 add %sp, STACKFRAME_SZ, %o1
1285
1286 ld [%curptr + TI_FLAGS], %l5
1287 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1288 be 1f
1289 nop
1290
1291 call syscall_trace
1292 nop
1293
12941:
1295 /* We are returning to a signal handler. */
1296 RESTORE_ALL
1297
1298 .align 4
1299 .globl sys_sigsuspend
1300sys_sigsuspend:
1301 call do_sigsuspend
1302 add %sp, STACKFRAME_SZ, %o0
1303
1304 ld [%curptr + TI_FLAGS], %l5
1305 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1306 be 1f
1307 nop
1308
1309 call syscall_trace
1310 nop
1311
13121:
1313 /* We are returning to a signal handler. */
1314 RESTORE_ALL
1315
1316 .align 4
1317 .globl sys_rt_sigsuspend
1318sys_rt_sigsuspend:
1319 /* Note: %o0, %o1 already have correct value... */
1320 call do_rt_sigsuspend
1321 add %sp, STACKFRAME_SZ, %o2
1322
1323 ld [%curptr + TI_FLAGS], %l5
1324 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1325 be 1f
1326 nop
1327
1328 call syscall_trace
1329 nop
1330
13311:
1332 /* We are returning to a signal handler. */
1333 RESTORE_ALL
1334
1335 .align 4
1336 .globl sys_sigreturn 1280 .globl sys_sigreturn
1337sys_sigreturn: 1281sys_sigreturn:
1338 call do_sigreturn 1282 call do_sigreturn
diff --git a/arch/sparc/kernel/rtrap.S b/arch/sparc/kernel/rtrap.S
index f7460d897e7..77ca6fd8125 100644
--- a/arch/sparc/kernel/rtrap.S
+++ b/arch/sparc/kernel/rtrap.S
@@ -68,15 +68,14 @@ ret_trap_lockless_ipi:
68 68
69 ld [%curptr + TI_FLAGS], %g2 69 ld [%curptr + TI_FLAGS], %g2
70signal_p: 70signal_p:
71 andcc %g2, (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING), %g0 71 andcc %g2, (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %g0
72 bz,a ret_trap_continue 72 bz,a ret_trap_continue
73 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr 73 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
74 74
75 clr %o0 75 mov %l5, %o1
76 mov %l5, %o2 76 mov %l6, %o2
77 mov %l6, %o3
78 call do_signal 77 call do_signal
79 add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr 78 add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
80 79
81 /* Fall through. */ 80 /* Fall through. */
82 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr 81 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c
index 5f34d7dc2b8..0748d8147bb 100644
--- a/arch/sparc/kernel/signal.c
+++ b/arch/sparc/kernel/signal.c
@@ -35,9 +35,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
35 void *fpqueue, unsigned long *fpqdepth); 35 void *fpqueue, unsigned long *fpqdepth);
36extern void fpload(unsigned long *fpregs, unsigned long *fsr); 36extern void fpload(unsigned long *fpregs, unsigned long *fsr);
37 37
38asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
39 unsigned long orig_o0, int restart_syscall);
40
41/* Signal frames: the original one (compatible with SunOS): 38/* Signal frames: the original one (compatible with SunOS):
42 * 39 *
43 * Set up a signal frame... Make the stack look the way SunOS 40 * Set up a signal frame... Make the stack look the way SunOS
@@ -95,98 +92,30 @@ struct rt_signal_frame {
95#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7))) 92#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7)))
96#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) 93#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
97 94
98/* 95static int _sigpause_common(old_sigset_t set)
99 * atomically swap in the new signal mask, and wait for a signal.
100 * This is really tricky on the Sparc, watch out...
101 */
102asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
103{ 96{
104 sigset_t saveset;
105
106 set &= _BLOCKABLE; 97 set &= _BLOCKABLE;
107 spin_lock_irq(&current->sighand->siglock); 98 spin_lock_irq(&current->sighand->siglock);
108 saveset = current->blocked; 99 current->saved_sigmask = current->blocked;
109 siginitset(&current->blocked, set); 100 siginitset(&current->blocked, set);
110 recalc_sigpending(); 101 recalc_sigpending();
111 spin_unlock_irq(&current->sighand->siglock); 102 spin_unlock_irq(&current->sighand->siglock);
112 103
113 regs->pc = regs->npc; 104 current->state = TASK_INTERRUPTIBLE;
114 regs->npc += 4; 105 schedule();
115 106 set_thread_flag(TIF_RESTORE_SIGMASK);
116 /* Condition codes and return value where set here for sigpause,
117 * and so got used by setup_frame, which again causes sigreturn()
118 * to return -EINTR.
119 */
120 while (1) {
121 current->state = TASK_INTERRUPTIBLE;
122 schedule();
123 /*
124 * Return -EINTR and set condition code here,
125 * so the interrupted system call actually returns
126 * these.
127 */
128 regs->psr |= PSR_C;
129 regs->u_regs[UREG_I0] = EINTR;
130 if (do_signal(&saveset, regs, 0, 0))
131 return;
132 }
133}
134 107
135asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs) 108 return -ERESTARTNOHAND;
136{
137 _sigpause_common(set, regs);
138} 109}
139 110
140asmlinkage void do_sigsuspend (struct pt_regs *regs) 111asmlinkage int sys_sigpause(unsigned int set)
141{ 112{
142 _sigpause_common(regs->u_regs[UREG_I0], regs); 113 return _sigpause_common(set);
143} 114}
144 115
145asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, 116asmlinkage int sys_sigsuspend(old_sigset_t set)
146 struct pt_regs *regs)
147{ 117{
148 sigset_t oldset, set; 118 return _sigpause_common(set);
149
150 /* XXX: Don't preclude handling different sized sigset_t's. */
151 if (sigsetsize != sizeof(sigset_t)) {
152 regs->psr |= PSR_C;
153 regs->u_regs[UREG_I0] = EINVAL;
154 return;
155 }
156
157 if (copy_from_user(&set, uset, sizeof(set))) {
158 regs->psr |= PSR_C;
159 regs->u_regs[UREG_I0] = EFAULT;
160 return;
161 }
162
163 sigdelsetmask(&set, ~_BLOCKABLE);
164 spin_lock_irq(&current->sighand->siglock);
165 oldset = current->blocked;
166 current->blocked = set;
167 recalc_sigpending();
168 spin_unlock_irq(&current->sighand->siglock);
169
170 regs->pc = regs->npc;
171 regs->npc += 4;
172
173 /* Condition codes and return value where set here for sigpause,
174 * and so got used by setup_frame, which again causes sigreturn()
175 * to return -EINTR.
176 */
177 while (1) {
178 current->state = TASK_INTERRUPTIBLE;
179 schedule();
180 /*
181 * Return -EINTR and set condition code here,
182 * so the interrupted system call actually returns
183 * these.
184 */
185 regs->psr |= PSR_C;
186 regs->u_regs[UREG_I0] = EINTR;
187 if (do_signal(&oldset, regs, 0, 0))
188 return;
189 }
190} 119}
191 120
192static inline int 121static inline int
@@ -1067,13 +996,13 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
1067 * want to handle. Thus you cannot kill init even with a SIGKILL even by 996 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1068 * mistake. 997 * mistake.
1069 */ 998 */
1070asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, 999asmlinkage void do_signal(struct pt_regs * regs, unsigned long orig_i0, int restart_syscall)
1071 unsigned long orig_i0, int restart_syscall)
1072{ 1000{
1073 siginfo_t info; 1001 siginfo_t info;
1074 struct sparc_deliver_cookie cookie; 1002 struct sparc_deliver_cookie cookie;
1075 struct k_sigaction ka; 1003 struct k_sigaction ka;
1076 int signr; 1004 int signr;
1005 sigset_t *oldset;
1077 1006
1078 /* 1007 /*
1079 * XXX Disable svr4 signal handling until solaris emulation works. 1008 * XXX Disable svr4 signal handling until solaris emulation works.
@@ -1089,7 +1018,9 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
1089 cookie.restart_syscall = restart_syscall; 1018 cookie.restart_syscall = restart_syscall;
1090 cookie.orig_i0 = orig_i0; 1019 cookie.orig_i0 = orig_i0;
1091 1020
1092 if (!oldset) 1021 if (test_thread_flag(TIF_RESTORE_SIGMASK))
1022 oldset = &current->saved_sigmask;
1023 else
1093 oldset = &current->blocked; 1024 oldset = &current->blocked;
1094 1025
1095 signr = get_signal_to_deliver(&info, &ka, regs, &cookie); 1026 signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
@@ -1098,7 +1029,14 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
1098 syscall_restart(cookie.orig_i0, regs, &ka.sa); 1029 syscall_restart(cookie.orig_i0, regs, &ka.sa);
1099 handle_signal(signr, &ka, &info, oldset, 1030 handle_signal(signr, &ka, &info, oldset,
1100 regs, svr4_signal); 1031 regs, svr4_signal);
1101 return 1; 1032 /* a signal was successfully delivered; the saved
1033 * sigmask will have been stored in the signal frame,
1034 * and will be restored by sigreturn, so we can simply
1035 * clear the TIF_RESTORE_SIGMASK flag.
1036 */
1037 if (test_thread_flag(TIF_RESTORE_SIGMASK))
1038 clear_thread_flag(TIF_RESTORE_SIGMASK);
1039 return;
1102 } 1040 }
1103 if (cookie.restart_syscall && 1041 if (cookie.restart_syscall &&
1104 (regs->u_regs[UREG_I0] == ERESTARTNOHAND || 1042 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
@@ -1115,7 +1053,14 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
1115 regs->pc -= 4; 1053 regs->pc -= 4;
1116 regs->npc -= 4; 1054 regs->npc -= 4;
1117 } 1055 }
1118 return 0; 1056
1057 /* if there's no signal to deliver, we just put the saved sigmask
1058 * back
1059 */
1060 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
1061 clear_thread_flag(TIF_RESTORE_SIGMASK);
1062 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
1063 }
1119} 1064}
1120 1065
1121asmlinkage int 1066asmlinkage int
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 0b0d492c953..19b25399d7e 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -66,7 +66,6 @@ struct poll {
66 66
67extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *); 67extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *);
68extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *); 68extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *);
69void _sigpause_common (unsigned int set, struct pt_regs *);
70extern void (*__copy_1page)(void *, const void *); 69extern void (*__copy_1page)(void *, const void *);
71extern void __memmove(void *, const void *, __kernel_size_t); 70extern void __memmove(void *, const void *, __kernel_size_t);
72extern void (*bzero_1page)(void *); 71extern void (*bzero_1page)(void *);
@@ -227,7 +226,6 @@ EXPORT_SYMBOL(kunmap_atomic);
227/* Solaris/SunOS binary compatibility */ 226/* Solaris/SunOS binary compatibility */
228EXPORT_SYMBOL(svr4_setcontext); 227EXPORT_SYMBOL(svr4_setcontext);
229EXPORT_SYMBOL(svr4_getcontext); 228EXPORT_SYMBOL(svr4_getcontext);
230EXPORT_SYMBOL(_sigpause_common);
231 229
232EXPORT_SYMBOL(dump_thread); 230EXPORT_SYMBOL(dump_thread);
233 231
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
index e457a40838f..6877ae4cd1d 100644
--- a/arch/sparc/kernel/systbls.S
+++ b/arch/sparc/kernel/systbls.S
@@ -75,7 +75,10 @@ sys_call_table:
75/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy 75/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
76/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink 76/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
77/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid 77/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
78/*280*/ .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl 78/*280*/ .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat
79/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_newfstatat
80/*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
81/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll
79 82
80#ifdef CONFIG_SUNOS_EMUL 83#ifdef CONFIG_SUNOS_EMUL
81 /* Now the SunOS syscall table. */ 84 /* Now the SunOS syscall table. */
@@ -181,6 +184,11 @@ sunos_sys_table:
181 .long sunos_nosys, sunos_nosys, sunos_nosys 184 .long sunos_nosys, sunos_nosys, sunos_nosys
182 .long sunos_nosys 185 .long sunos_nosys
183/*280*/ .long sunos_nosys, sunos_nosys, sunos_nosys 186/*280*/ .long sunos_nosys, sunos_nosys, sunos_nosys
187 .long sunos_nosys, sunos_nosys, sunos_nosys
188 .long sunos_nosys, sunos_nosys, sunos_nosys
184 .long sunos_nosys 189 .long sunos_nosys
190/*290*/ .long sunos_nosys, sunos_nosys, sunos_nosys
191 .long sunos_nosys, sunos_nosys, sunos_nosys
192 .long sunos_nosys, sunos_nosys, sunos_nosys
185 193
186#endif 194#endif
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index a3fb3376ffa..9ceddad0fb4 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.15 3# Linux kernel version: 2.6.16-rc1
4# Mon Jan 9 14:36:29 2006 4# Wed Jan 18 13:41:02 2006
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -233,6 +233,11 @@ CONFIG_VLAN_8021Q=m
233# CONFIG_ATALK is not set 233# CONFIG_ATALK is not set
234# CONFIG_X25 is not set 234# CONFIG_X25 is not set
235# CONFIG_LAPB is not set 235# CONFIG_LAPB is not set
236
237#
238# TIPC Configuration (EXPERIMENTAL)
239#
240# CONFIG_TIPC is not set
236# CONFIG_NET_DIVERT is not set 241# CONFIG_NET_DIVERT is not set
237# CONFIG_ECONET is not set 242# CONFIG_ECONET is not set
238# CONFIG_WAN_ROUTER is not set 243# CONFIG_WAN_ROUTER is not set
@@ -420,8 +425,7 @@ CONFIG_ISCSI_TCP=m
420# CONFIG_SCSI_QLOGIC_FC is not set 425# CONFIG_SCSI_QLOGIC_FC is not set
421# CONFIG_SCSI_QLOGIC_1280 is not set 426# CONFIG_SCSI_QLOGIC_1280 is not set
422# CONFIG_SCSI_QLOGICPTI is not set 427# CONFIG_SCSI_QLOGICPTI is not set
423CONFIG_SCSI_QLA2XXX=y 428# CONFIG_SCSI_QLA_FC is not set
424# CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE is not set
425# CONFIG_SCSI_LPFC is not set 429# CONFIG_SCSI_LPFC is not set
426# CONFIG_SCSI_DC395x is not set 430# CONFIG_SCSI_DC395x is not set
427# CONFIG_SCSI_DC390T is not set 431# CONFIG_SCSI_DC390T is not set
@@ -529,6 +533,7 @@ CONFIG_NET_PCI=y
529# CONFIG_DL2K is not set 533# CONFIG_DL2K is not set
530CONFIG_E1000=m 534CONFIG_E1000=m
531CONFIG_E1000_NAPI=y 535CONFIG_E1000_NAPI=y
536# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
532# CONFIG_MYRI_SBUS is not set 537# CONFIG_MYRI_SBUS is not set
533# CONFIG_NS83820 is not set 538# CONFIG_NS83820 is not set
534# CONFIG_HAMACHI is not set 539# CONFIG_HAMACHI is not set
@@ -652,7 +657,6 @@ CONFIG_SERIAL_SUNSU_CONSOLE=y
652CONFIG_SERIAL_SUNSAB=m 657CONFIG_SERIAL_SUNSAB=m
653CONFIG_SERIAL_CORE=y 658CONFIG_SERIAL_CORE=y
654CONFIG_SERIAL_CORE_CONSOLE=y 659CONFIG_SERIAL_CORE_CONSOLE=y
655# CONFIG_SERIAL_JSM is not set
656CONFIG_UNIX98_PTYS=y 660CONFIG_UNIX98_PTYS=y
657# CONFIG_LEGACY_PTYS is not set 661# CONFIG_LEGACY_PTYS is not set
658 662
@@ -738,6 +742,12 @@ CONFIG_I2C_ALGOBIT=y
738# CONFIG_I2C_DEBUG_CHIP is not set 742# CONFIG_I2C_DEBUG_CHIP is not set
739 743
740# 744#
745# SPI support
746#
747# CONFIG_SPI is not set
748# CONFIG_SPI_MASTER is not set
749
750#
741# Dallas's 1-wire bus 751# Dallas's 1-wire bus
742# 752#
743# CONFIG_W1 is not set 753# CONFIG_W1 is not set
@@ -1014,6 +1024,7 @@ CONFIG_USB_UHCI_HCD=m
1014# 1024#
1015CONFIG_USB_HID=y 1025CONFIG_USB_HID=y
1016CONFIG_USB_HIDINPUT=y 1026CONFIG_USB_HIDINPUT=y
1027# CONFIG_USB_HIDINPUT_POWERBOOK is not set
1017# CONFIG_HID_FF is not set 1028# CONFIG_HID_FF is not set
1018CONFIG_USB_HIDDEV=y 1029CONFIG_USB_HIDDEV=y
1019# CONFIG_USB_AIPTEK is not set 1030# CONFIG_USB_AIPTEK is not set
@@ -1268,12 +1279,13 @@ CONFIG_KPROBES=y
1268# Kernel hacking 1279# Kernel hacking
1269# 1280#
1270CONFIG_PRINTK_TIME=y 1281CONFIG_PRINTK_TIME=y
1271CONFIG_DEBUG_KERNEL=y
1272CONFIG_MAGIC_SYSRQ=y 1282CONFIG_MAGIC_SYSRQ=y
1283CONFIG_DEBUG_KERNEL=y
1273CONFIG_LOG_BUF_SHIFT=18 1284CONFIG_LOG_BUF_SHIFT=18
1274CONFIG_DETECT_SOFTLOCKUP=y 1285CONFIG_DETECT_SOFTLOCKUP=y
1275CONFIG_SCHEDSTATS=y 1286CONFIG_SCHEDSTATS=y
1276# CONFIG_DEBUG_SLAB is not set 1287# CONFIG_DEBUG_SLAB is not set
1288# CONFIG_DEBUG_MUTEXES is not set
1277# CONFIG_DEBUG_SPINLOCK is not set 1289# CONFIG_DEBUG_SPINLOCK is not set
1278# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1290# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1279# CONFIG_DEBUG_KOBJECT is not set 1291# CONFIG_DEBUG_KOBJECT is not set
@@ -1281,6 +1293,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
1281# CONFIG_DEBUG_INFO is not set 1293# CONFIG_DEBUG_INFO is not set
1282CONFIG_DEBUG_FS=y 1294CONFIG_DEBUG_FS=y
1283# CONFIG_DEBUG_VM is not set 1295# CONFIG_DEBUG_VM is not set
1296CONFIG_FORCED_INLINING=y
1284# CONFIG_RCU_TORTURE_TEST is not set 1297# CONFIG_RCU_TORTURE_TEST is not set
1285# CONFIG_DEBUG_STACK_USAGE is not set 1298# CONFIG_DEBUG_STACK_USAGE is not set
1286# CONFIG_DEBUG_DCFLUSH is not set 1299# CONFIG_DEBUG_DCFLUSH is not set
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 71000299188..e50e56e4ab6 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1416,7 +1416,6 @@ execve_merge:
1416 add %sp, PTREGS_OFF, %o0 1416 add %sp, PTREGS_OFF, %o0
1417 1417
1418 .globl sys_pipe, sys_sigpause, sys_nis_syscall 1418 .globl sys_pipe, sys_sigpause, sys_nis_syscall
1419 .globl sys_sigsuspend, sys_rt_sigsuspend
1420 .globl sys_rt_sigreturn 1419 .globl sys_rt_sigreturn
1421 .globl sys_ptrace 1420 .globl sys_ptrace
1422 .globl sys_sigaltstack 1421 .globl sys_sigaltstack
@@ -1440,28 +1439,6 @@ sys32_sigaltstack:
1440 mov %i6, %o2 1439 mov %i6, %o2
1441#endif 1440#endif
1442 .align 32 1441 .align 32
1443sys_sigsuspend: add %sp, PTREGS_OFF, %o0
1444 call do_sigsuspend
1445 add %o7, 1f-.-4, %o7
1446 nop
1447sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
1448 add %sp, PTREGS_OFF, %o2
1449 call do_rt_sigsuspend
1450 add %o7, 1f-.-4, %o7
1451 nop
1452#ifdef CONFIG_COMPAT
1453 .globl sys32_rt_sigsuspend
1454sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
1455 srl %o0, 0, %o0
1456 add %sp, PTREGS_OFF, %o2
1457 call do_rt_sigsuspend32
1458 add %o7, 1f-.-4, %o7
1459#endif
1460 /* NOTE: %o0 has a correct value already */
1461sys_sigpause: add %sp, PTREGS_OFF, %o1
1462 call do_sigpause
1463 add %o7, 1f-.-4, %o7
1464 nop
1465#ifdef CONFIG_COMPAT 1442#ifdef CONFIG_COMPAT
1466 .globl sys32_sigreturn 1443 .globl sys32_sigreturn
1467sys32_sigreturn: 1444sys32_sigreturn:
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 1dc3650c5ca..059b0d02522 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -164,6 +164,7 @@ void machine_restart(char * cmd)
164 panic("Reboot failed!"); 164 panic("Reboot failed!");
165} 165}
166 166
167#ifdef CONFIG_COMPAT
167static void show_regwindow32(struct pt_regs *regs) 168static void show_regwindow32(struct pt_regs *regs)
168{ 169{
169 struct reg_window32 __user *rw; 170 struct reg_window32 __user *rw;
@@ -189,6 +190,9 @@ static void show_regwindow32(struct pt_regs *regs)
189 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3], 190 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
190 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]); 191 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
191} 192}
193#else
194#define show_regwindow32(regs) do { } while (0)
195#endif
192 196
193static void show_regwindow(struct pt_regs *regs) 197static void show_regwindow(struct pt_regs *regs)
194{ 198{
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 090dcca00d2..b80eba0081c 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -53,14 +53,13 @@ __handle_user_windows:
53 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 53 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
54 ldx [%g6 + TI_FLAGS], %l0 54 ldx [%g6 + TI_FLAGS], %l0
55 55
561: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0 561: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
57 be,pt %xcc, __handle_user_windows_continue 57 be,pt %xcc, __handle_user_windows_continue
58 nop 58 nop
59 clr %o0 59 mov %l5, %o1
60 mov %l5, %o2 60 mov %l6, %o2
61 mov %l6, %o3 61 add %sp, PTREGS_OFF, %o0
62 add %sp, PTREGS_OFF, %o1 62 mov %l0, %o3
63 mov %l0, %o4
64 63
65 call do_notify_resume 64 call do_notify_resume
66 wrpr %g0, RTRAP_PSTATE, %pstate 65 wrpr %g0, RTRAP_PSTATE, %pstate
@@ -96,15 +95,14 @@ __handle_perfctrs:
96 wrpr %g0, RTRAP_PSTATE, %pstate 95 wrpr %g0, RTRAP_PSTATE, %pstate
97 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 96 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
98 ldx [%g6 + TI_FLAGS], %l0 97 ldx [%g6 + TI_FLAGS], %l0
991: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0 981: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
100 99
101 be,pt %xcc, __handle_perfctrs_continue 100 be,pt %xcc, __handle_perfctrs_continue
102 sethi %hi(TSTATE_PEF), %o0 101 sethi %hi(TSTATE_PEF), %o0
103 clr %o0 102 mov %l5, %o1
104 mov %l5, %o2 103 mov %l6, %o2
105 mov %l6, %o3 104 add %sp, PTREGS_OFF, %o0
106 add %sp, PTREGS_OFF, %o1 105 mov %l0, %o3
107 mov %l0, %o4
108 call do_notify_resume 106 call do_notify_resume
109 107
110 wrpr %g0, RTRAP_PSTATE, %pstate 108 wrpr %g0, RTRAP_PSTATE, %pstate
@@ -129,11 +127,10 @@ __handle_userfpu:
129 ba,a,pt %xcc, __handle_userfpu_continue 127 ba,a,pt %xcc, __handle_userfpu_continue
130 128
131__handle_signal: 129__handle_signal:
132 clr %o0 130 mov %l5, %o1
133 mov %l5, %o2 131 mov %l6, %o2
134 mov %l6, %o3 132 add %sp, PTREGS_OFF, %o0
135 add %sp, PTREGS_OFF, %o1 133 mov %l0, %o3
136 mov %l0, %o4
137 call do_notify_resume 134 call do_notify_resume
138 wrpr %g0, RTRAP_PSTATE, %pstate 135 wrpr %g0, RTRAP_PSTATE, %pstate
139 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 136 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
@@ -200,7 +197,7 @@ __handle_preemption_continue:
200 andcc %l1, %o0, %g0 197 andcc %l1, %o0, %g0
201 andcc %l0, _TIF_NEED_RESCHED, %g0 198 andcc %l0, _TIF_NEED_RESCHED, %g0
202 bne,pn %xcc, __handle_preemption 199 bne,pn %xcc, __handle_preemption
203 andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0 200 andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
204 bne,pn %xcc, __handle_signal 201 bne,pn %xcc, __handle_signal
205__handle_signal_continue: 202__handle_signal_continue:
206 ldub [%g6 + TI_WSAVED], %o2 203 ldub [%g6 + TI_WSAVED], %o2
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 250745896ae..054461e6946 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -561,6 +561,8 @@ static int __init set_preferred_console(void)
561 serial_console = 1; 561 serial_console = 1;
562 } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) { 562 } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
563 serial_console = 2; 563 serial_console = 2;
564 } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) {
565 serial_console = 3;
564 } else { 566 } else {
565 prom_printf("Inconsistent console: " 567 prom_printf("Inconsistent console: "
566 "input %d, output %d\n", 568 "input %d, output %d\n",
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index 60f5dfabb1e..ca11a4c457d 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -36,9 +36,6 @@
36 36
37#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 37#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
38 38
39static int do_signal(sigset_t *oldset, struct pt_regs * regs,
40 unsigned long orig_o0, int ret_from_syscall);
41
42/* {set, get}context() needed for 64-bit SparcLinux userland. */ 39/* {set, get}context() needed for 64-bit SparcLinux userland. */
43asmlinkage void sparc64_set_context(struct pt_regs *regs) 40asmlinkage void sparc64_set_context(struct pt_regs *regs)
44{ 41{
@@ -242,114 +239,29 @@ struct rt_signal_frame {
242/* Align macros */ 239/* Align macros */
243#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) 240#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
244 241
245/* 242static long _sigpause_common(old_sigset_t set)
246 * atomically swap in the new signal mask, and wait for a signal.
247 * This is really tricky on the Sparc, watch out...
248 */
249asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
250{ 243{
251 sigset_t saveset;
252
253#ifdef CONFIG_SPARC32_COMPAT
254 if (test_thread_flag(TIF_32BIT)) {
255 extern asmlinkage void _sigpause32_common(compat_old_sigset_t,
256 struct pt_regs *);
257 _sigpause32_common(set, regs);
258 return;
259 }
260#endif
261 set &= _BLOCKABLE; 244 set &= _BLOCKABLE;
262 spin_lock_irq(&current->sighand->siglock); 245 spin_lock_irq(&current->sighand->siglock);
263 saveset = current->blocked; 246 current->saved_sigmask = current->blocked;
264 siginitset(&current->blocked, set); 247 siginitset(&current->blocked, set);
265 recalc_sigpending(); 248 recalc_sigpending();
266 spin_unlock_irq(&current->sighand->siglock); 249 spin_unlock_irq(&current->sighand->siglock);
267
268 if (test_thread_flag(TIF_32BIT)) {
269 regs->tpc = (regs->tnpc & 0xffffffff);
270 regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
271 } else {
272 regs->tpc = regs->tnpc;
273 regs->tnpc += 4;
274 }
275 250
276 /* Condition codes and return value where set here for sigpause, 251 current->state = TASK_INTERRUPTIBLE;
277 * and so got used by setup_frame, which again causes sigreturn() 252 schedule();
278 * to return -EINTR. 253 set_thread_flag(TIF_RESTORE_SIGMASK);
279 */ 254 return -ERESTARTNOHAND;
280 while (1) {
281 current->state = TASK_INTERRUPTIBLE;
282 schedule();
283 /*
284 * Return -EINTR and set condition code here,
285 * so the interrupted system call actually returns
286 * these.
287 */
288 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
289 regs->u_regs[UREG_I0] = EINTR;
290 if (do_signal(&saveset, regs, 0, 0))
291 return;
292 }
293} 255}
294 256
295asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs) 257asmlinkage long sys_sigpause(unsigned int set)
296{ 258{
297 _sigpause_common(set, regs); 259 return _sigpause_common(set);
298} 260}
299 261
300asmlinkage void do_sigsuspend(struct pt_regs *regs) 262asmlinkage long sys_sigsuspend(old_sigset_t set)
301{ 263{
302 _sigpause_common(regs->u_regs[UREG_I0], regs); 264 return _sigpause_common(set);
303}
304
305asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, struct pt_regs *regs)
306{
307 sigset_t oldset, set;
308
309 /* XXX: Don't preclude handling different sized sigset_t's. */
310 if (sigsetsize != sizeof(sigset_t)) {
311 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
312 regs->u_regs[UREG_I0] = EINVAL;
313 return;
314 }
315 if (copy_from_user(&set, uset, sizeof(set))) {
316 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
317 regs->u_regs[UREG_I0] = EFAULT;
318 return;
319 }
320
321 sigdelsetmask(&set, ~_BLOCKABLE);
322 spin_lock_irq(&current->sighand->siglock);
323 oldset = current->blocked;
324 current->blocked = set;
325 recalc_sigpending();
326 spin_unlock_irq(&current->sighand->siglock);
327
328 if (test_thread_flag(TIF_32BIT)) {
329 regs->tpc = (regs->tnpc & 0xffffffff);
330 regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
331 } else {
332 regs->tpc = regs->tnpc;
333 regs->tnpc += 4;
334 }
335
336 /* Condition codes and return value where set here for sigpause,
337 * and so got used by setup_frame, which again causes sigreturn()
338 * to return -EINTR.
339 */
340 while (1) {
341 current->state = TASK_INTERRUPTIBLE;
342 schedule();
343 /*
344 * Return -EINTR and set condition code here,
345 * so the interrupted system call actually returns
346 * these.
347 */
348 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
349 regs->u_regs[UREG_I0] = EINTR;
350 if (do_signal(&oldset, regs, 0, 0))
351 return;
352 }
353} 265}
354 266
355static inline int 267static inline int
@@ -607,26 +519,29 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
607 * want to handle. Thus you cannot kill init even with a SIGKILL even by 519 * want to handle. Thus you cannot kill init even with a SIGKILL even by
608 * mistake. 520 * mistake.
609 */ 521 */
610static int do_signal(sigset_t *oldset, struct pt_regs * regs, 522static void do_signal(struct pt_regs *regs, unsigned long orig_i0, int restart_syscall)
611 unsigned long orig_i0, int restart_syscall)
612{ 523{
613 siginfo_t info; 524 siginfo_t info;
614 struct signal_deliver_cookie cookie; 525 struct signal_deliver_cookie cookie;
615 struct k_sigaction ka; 526 struct k_sigaction ka;
616 int signr; 527 int signr;
528 sigset_t *oldset;
617 529
618 cookie.restart_syscall = restart_syscall; 530 cookie.restart_syscall = restart_syscall;
619 cookie.orig_i0 = orig_i0; 531 cookie.orig_i0 = orig_i0;
620 532
621 if (!oldset) 533 if (test_thread_flag(TIF_RESTORE_SIGMASK))
534 oldset = &current->saved_sigmask;
535 else
622 oldset = &current->blocked; 536 oldset = &current->blocked;
623 537
624#ifdef CONFIG_SPARC32_COMPAT 538#ifdef CONFIG_SPARC32_COMPAT
625 if (test_thread_flag(TIF_32BIT)) { 539 if (test_thread_flag(TIF_32BIT)) {
626 extern int do_signal32(sigset_t *, struct pt_regs *, 540 extern void do_signal32(sigset_t *, struct pt_regs *,
627 unsigned long, int); 541 unsigned long, int);
628 return do_signal32(oldset, regs, orig_i0, 542 do_signal32(oldset, regs, orig_i0,
629 cookie.restart_syscall); 543 cookie.restart_syscall);
544 return;
630 } 545 }
631#endif 546#endif
632 547
@@ -635,7 +550,15 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
635 if (cookie.restart_syscall) 550 if (cookie.restart_syscall)
636 syscall_restart(orig_i0, regs, &ka.sa); 551 syscall_restart(orig_i0, regs, &ka.sa);
637 handle_signal(signr, &ka, &info, oldset, regs); 552 handle_signal(signr, &ka, &info, oldset, regs);
638 return 1; 553
554 /* a signal was successfully delivered; the saved
555 * sigmask will have been stored in the signal frame,
556 * and will be restored by sigreturn, so we can simply
557 * clear the TIF_RESTORE_SIGMASK flag.
558 */
559 if (test_thread_flag(TIF_RESTORE_SIGMASK))
560 clear_thread_flag(TIF_RESTORE_SIGMASK);
561 return;
639 } 562 }
640 if (cookie.restart_syscall && 563 if (cookie.restart_syscall &&
641 (regs->u_regs[UREG_I0] == ERESTARTNOHAND || 564 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
@@ -652,15 +575,21 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
652 regs->tpc -= 4; 575 regs->tpc -= 4;
653 regs->tnpc -= 4; 576 regs->tnpc -= 4;
654 } 577 }
655 return 0; 578
579 /* if there's no signal to deliver, we just put the saved sigmask
580 * back
581 */
582 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
583 clear_thread_flag(TIF_RESTORE_SIGMASK);
584 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
585 }
656} 586}
657 587
658void do_notify_resume(sigset_t *oldset, struct pt_regs *regs, 588void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, int restart_syscall,
659 unsigned long orig_i0, int restart_syscall,
660 unsigned long thread_info_flags) 589 unsigned long thread_info_flags)
661{ 590{
662 if (thread_info_flags & _TIF_SIGPENDING) 591 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
663 do_signal(oldset, regs, orig_i0, restart_syscall); 592 do_signal(regs, orig_i0, restart_syscall);
664} 593}
665 594
666void ptrace_signal_deliver(struct pt_regs *regs, void *cookie) 595void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index 009a86e5ded..708ba9b42cd 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -32,9 +32,6 @@
32 32
33#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 33#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
34 34
35int do_signal32(sigset_t *oldset, struct pt_regs *regs,
36 unsigned long orig_o0, int ret_from_syscall);
37
38/* Signal frames: the original one (compatible with SunOS): 35/* Signal frames: the original one (compatible with SunOS):
39 * 36 *
40 * Set up a signal frame... Make the stack look the way SunOS 37 * Set up a signal frame... Make the stack look the way SunOS
@@ -226,102 +223,6 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
226 return 0; 223 return 0;
227} 224}
228 225
229/*
230 * atomically swap in the new signal mask, and wait for a signal.
231 * This is really tricky on the Sparc, watch out...
232 */
233asmlinkage void _sigpause32_common(compat_old_sigset_t set, struct pt_regs *regs)
234{
235 sigset_t saveset;
236
237 set &= _BLOCKABLE;
238 spin_lock_irq(&current->sighand->siglock);
239 saveset = current->blocked;
240 siginitset(&current->blocked, set);
241 recalc_sigpending();
242 spin_unlock_irq(&current->sighand->siglock);
243
244 regs->tpc = regs->tnpc;
245 regs->tnpc += 4;
246 if (test_thread_flag(TIF_32BIT)) {
247 regs->tpc &= 0xffffffff;
248 regs->tnpc &= 0xffffffff;
249 }
250
251 /* Condition codes and return value where set here for sigpause,
252 * and so got used by setup_frame, which again causes sigreturn()
253 * to return -EINTR.
254 */
255 while (1) {
256 current->state = TASK_INTERRUPTIBLE;
257 schedule();
258 /*
259 * Return -EINTR and set condition code here,
260 * so the interrupted system call actually returns
261 * these.
262 */
263 regs->tstate |= TSTATE_ICARRY;
264 regs->u_regs[UREG_I0] = EINTR;
265 if (do_signal32(&saveset, regs, 0, 0))
266 return;
267 }
268}
269
270asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs *regs)
271{
272 sigset_t oldset, set;
273 compat_sigset_t set32;
274
275 /* XXX: Don't preclude handling different sized sigset_t's. */
276 if (((compat_size_t)sigsetsize) != sizeof(sigset_t)) {
277 regs->tstate |= TSTATE_ICARRY;
278 regs->u_regs[UREG_I0] = EINVAL;
279 return;
280 }
281 if (copy_from_user(&set32, compat_ptr(uset), sizeof(set32))) {
282 regs->tstate |= TSTATE_ICARRY;
283 regs->u_regs[UREG_I0] = EFAULT;
284 return;
285 }
286 switch (_NSIG_WORDS) {
287 case 4: set.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32);
288 case 3: set.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32);
289 case 2: set.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32);
290 case 1: set.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32);
291 }
292 sigdelsetmask(&set, ~_BLOCKABLE);
293 spin_lock_irq(&current->sighand->siglock);
294 oldset = current->blocked;
295 current->blocked = set;
296 recalc_sigpending();
297 spin_unlock_irq(&current->sighand->siglock);
298
299 regs->tpc = regs->tnpc;
300 regs->tnpc += 4;
301 if (test_thread_flag(TIF_32BIT)) {
302 regs->tpc &= 0xffffffff;
303 regs->tnpc &= 0xffffffff;
304 }
305
306 /* Condition codes and return value where set here for sigpause,
307 * and so got used by setup_frame, which again causes sigreturn()
308 * to return -EINTR.
309 */
310 while (1) {
311 current->state = TASK_INTERRUPTIBLE;
312 schedule();
313 /*
314 * Return -EINTR and set condition code here,
315 * so the interrupted system call actually returns
316 * these.
317 */
318 regs->tstate |= TSTATE_ICARRY;
319 regs->u_regs[UREG_I0] = EINTR;
320 if (do_signal32(&oldset, regs, 0, 0))
321 return;
322 }
323}
324
325static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) 226static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
326{ 227{
327 unsigned long *fpregs = current_thread_info()->fpregs; 228 unsigned long *fpregs = current_thread_info()->fpregs;
@@ -1362,8 +1263,8 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
1362 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1263 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1363 * mistake. 1264 * mistake.
1364 */ 1265 */
1365int do_signal32(sigset_t *oldset, struct pt_regs * regs, 1266void do_signal32(sigset_t *oldset, struct pt_regs * regs,
1366 unsigned long orig_i0, int restart_syscall) 1267 unsigned long orig_i0, int restart_syscall)
1367{ 1268{
1368 siginfo_t info; 1269 siginfo_t info;
1369 struct signal_deliver_cookie cookie; 1270 struct signal_deliver_cookie cookie;
@@ -1380,7 +1281,15 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
1380 syscall_restart32(orig_i0, regs, &ka.sa); 1281 syscall_restart32(orig_i0, regs, &ka.sa);
1381 handle_signal32(signr, &ka, &info, oldset, 1282 handle_signal32(signr, &ka, &info, oldset,
1382 regs, svr4_signal); 1283 regs, svr4_signal);
1383 return 1; 1284
1285 /* a signal was successfully delivered; the saved
1286 * sigmask will have been stored in the signal frame,
1287 * and will be restored by sigreturn, so we can simply
1288 * clear the TIF_RESTORE_SIGMASK flag.
1289 */
1290 if (test_thread_flag(TIF_RESTORE_SIGMASK))
1291 clear_thread_flag(TIF_RESTORE_SIGMASK);
1292 return;
1384 } 1293 }
1385 if (cookie.restart_syscall && 1294 if (cookie.restart_syscall &&
1386 (regs->u_regs[UREG_I0] == ERESTARTNOHAND || 1295 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
@@ -1397,7 +1306,14 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
1397 regs->tpc -= 4; 1306 regs->tpc -= 4;
1398 regs->tnpc -= 4; 1307 regs->tnpc -= 4;
1399 } 1308 }
1400 return 0; 1309
1310 /* if there's no signal to deliver, we just put the saved sigmask
1311 * back
1312 */
1313 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
1314 clear_thread_flag(TIF_RESTORE_SIGMASK);
1315 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
1316 }
1401} 1317}
1402 1318
1403struct sigstack32 { 1319struct sigstack32 {
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index d177d7e5c9d..3c06bfb92a8 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -69,7 +69,6 @@ struct poll {
69 69
70extern void die_if_kernel(char *str, struct pt_regs *regs); 70extern void die_if_kernel(char *str, struct pt_regs *regs);
71extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 71extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
72void _sigpause_common (unsigned int set, struct pt_regs *);
73extern void *__bzero(void *, size_t); 72extern void *__bzero(void *, size_t);
74extern void *__memscan_zero(void *, size_t); 73extern void *__memscan_zero(void *, size_t);
75extern void *__memscan_generic(void *, int, size_t); 74extern void *__memscan_generic(void *, int, size_t);
@@ -236,9 +235,10 @@ EXPORT_SYMBOL(pci_dma_supported);
236/* I/O device mmaping on Sparc64. */ 235/* I/O device mmaping on Sparc64. */
237EXPORT_SYMBOL(io_remap_pfn_range); 236EXPORT_SYMBOL(io_remap_pfn_range);
238 237
238#ifdef CONFIG_COMPAT
239/* Solaris/SunOS binary compatibility */ 239/* Solaris/SunOS binary compatibility */
240EXPORT_SYMBOL(_sigpause_common);
241EXPORT_SYMBOL(verify_compat_iovec); 240EXPORT_SYMBOL(verify_compat_iovec);
241#endif
242 242
243EXPORT_SYMBOL(dump_fpu); 243EXPORT_SYMBOL(dump_fpu);
244EXPORT_SYMBOL(pte_alloc_one_kernel); 244EXPORT_SYMBOL(pte_alloc_one_kernel);
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index d4b7a100cb8..9264ccbaaaf 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -821,7 +821,7 @@ asmlinkage long sys32_utimes(char __user *filename,
821 return -EFAULT; 821 return -EFAULT;
822 } 822 }
823 823
824 return do_utimes(filename, (tvs ? &ktvs[0] : NULL)); 824 return do_utimes(AT_FDCWD, filename, (tvs ? &ktvs[0] : NULL));
825} 825}
826 826
827/* These are here just in case some old sparc32 binary calls it. */ 827/* These are here just in case some old sparc32 binary calls it. */
@@ -1003,7 +1003,7 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
1003asmlinkage long sparc32_open(const char __user *filename, 1003asmlinkage long sparc32_open(const char __user *filename,
1004 int flags, int mode) 1004 int flags, int mode)
1005{ 1005{
1006 return do_sys_open(filename, flags, mode); 1006 return do_sys_open(AT_FDCWD, filename, flags, mode);
1007} 1007}
1008 1008
1009extern unsigned long do_mremap(unsigned long addr, 1009extern unsigned long do_mremap(unsigned long addr,
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index 98d24bc0004..5ed1a17cd5b 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -41,7 +41,7 @@ sys_call_table32:
41/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid 41/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid
42 .word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall 42 .word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
43/*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending 43/*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending
44 .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, sys32_rt_sigsuspend, sys_setresuid, sys_getresuid 44 .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid
45/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall 45/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
46 .word sys32_getgroups, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd 46 .word sys32_getgroups, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
47/*120*/ .word compat_sys_readv, compat_sys_writev, sys32_settimeofday, sys32_fchown16, sys_fchmod 47/*120*/ .word compat_sys_readv, compat_sys_writev, sys32_settimeofday, sys32_fchown16, sys_fchmod
@@ -76,7 +76,10 @@ sys_call_table32:
76 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy 76 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
77/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink 77/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
78 .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid 78 .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
79/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl 79/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
80 .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, compat_sys_newfstatat
81/*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
82 .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll
80 83
81#endif /* CONFIG_COMPAT */ 84#endif /* CONFIG_COMPAT */
82 85
@@ -142,7 +145,10 @@ sys_call_table:
142 .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy 145 .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
143/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink 146/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
144 .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid 147 .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
145/*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl 148/*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat
149 .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, compat_sys_newfstatat
150/*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
151 .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll
146 152
147#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \ 153#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
148 defined(CONFIG_SOLARIS_EMUL_MODULE) 154 defined(CONFIG_SOLARIS_EMUL_MODULE)
@@ -239,13 +245,20 @@ sunos_sys_table:
239/*250*/ .word sunos_nosys, sunos_nosys, sunos_nosys 245/*250*/ .word sunos_nosys, sunos_nosys, sunos_nosys
240 .word sunos_nosys, sunos_nosys, sunos_nosys 246 .word sunos_nosys, sunos_nosys, sunos_nosys
241 .word sunos_nosys, sunos_nosys, sunos_nosys 247 .word sunos_nosys, sunos_nosys, sunos_nosys
248 .word sunos_nosys
249/*260*/ .word sunos_nosys, sunos_nosys, sunos_nosys
242 .word sunos_nosys, sunos_nosys, sunos_nosys 250 .word sunos_nosys, sunos_nosys, sunos_nosys
243 .word sunos_nosys, sunos_nosys, sunos_nosys 251 .word sunos_nosys, sunos_nosys, sunos_nosys
252 .word sunos_nosys
253/*270*/ .word sunos_nosys, sunos_nosys, sunos_nosys
244 .word sunos_nosys, sunos_nosys, sunos_nosys 254 .word sunos_nosys, sunos_nosys, sunos_nosys
245 .word sunos_nosys, sunos_nosys, sunos_nosys 255 .word sunos_nosys, sunos_nosys, sunos_nosys
256 .word sunos_nosys
257/*280*/ .word sunos_nosys, sunos_nosys, sunos_nosys
246 .word sunos_nosys, sunos_nosys, sunos_nosys 258 .word sunos_nosys, sunos_nosys, sunos_nosys
247 .word sunos_nosys, sunos_nosys, sunos_nosys 259 .word sunos_nosys, sunos_nosys, sunos_nosys
260 .word sunos_nosys
261/*290*/ .word sunos_nosys, sunos_nosys, sunos_nosys
248 .word sunos_nosys, sunos_nosys, sunos_nosys 262 .word sunos_nosys, sunos_nosys, sunos_nosys
249 .word sunos_nosys, sunos_nosys, sunos_nosys 263 .word sunos_nosys, sunos_nosys, sunos_nosys
250 .word sunos_nosys
251#endif 264#endif
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
index eae5db8dda5..ac6d035dd15 100644
--- a/arch/sparc64/prom/console.c
+++ b/arch/sparc64/prom/console.c
@@ -99,8 +99,12 @@ prom_query_input_device(void)
99 if (!strncmp(propb, "keyboard", 8)) 99 if (!strncmp(propb, "keyboard", 8))
100 return PROMDEV_ITTYA; 100 return PROMDEV_ITTYA;
101 101
102 if (!strncmp (propb, "rsc", 3))
103 return PROMDEV_IRSC;
104
102 if (strncmp (propb, "tty", 3) || !propb[3]) 105 if (strncmp (propb, "tty", 3) || !propb[3])
103 return PROMDEV_I_UNK; 106 return PROMDEV_I_UNK;
107
104 switch (propb[3]) { 108 switch (propb[3]) {
105 case 'a': return PROMDEV_ITTYA; 109 case 'a': return PROMDEV_ITTYA;
106 case 'b': return PROMDEV_ITTYB; 110 case 'b': return PROMDEV_ITTYB;
@@ -136,8 +140,12 @@ prom_query_output_device(void)
136 if (!strncmp(propb, "screen", 6)) 140 if (!strncmp(propb, "screen", 6))
137 return PROMDEV_OTTYA; 141 return PROMDEV_OTTYA;
138 142
143 if (!strncmp (propb, "rsc", 3))
144 return PROMDEV_ORSC;
145
139 if (strncmp (propb, "tty", 3) || !propb[3]) 146 if (strncmp (propb, "tty", 3) || !propb[3])
140 return PROMDEV_O_UNK; 147 return PROMDEV_O_UNK;
148
141 switch (propb[3]) { 149 switch (propb[3]) {
142 case 'a': return PROMDEV_OTTYA; 150 case 'a': return PROMDEV_OTTYA;
143 case 'b': return PROMDEV_OTTYB; 151 case 'b': return PROMDEV_OTTYB;
diff --git a/arch/sparc64/solaris/entry64.S b/arch/sparc64/solaris/entry64.S
index 4b6ae583c0a..eb314ed23cd 100644
--- a/arch/sparc64/solaris/entry64.S
+++ b/arch/sparc64/solaris/entry64.S
@@ -180,6 +180,8 @@ solaris_sigsuspend:
180 nop 180 nop
181 call sys_sigsuspend 181 call sys_sigsuspend
182 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] 182 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
183 b,pt %xcc, ret_from_solaris
184 nop
183 185
184 .globl solaris_getpid 186 .globl solaris_getpid
185solaris_getpid: 187solaris_getpid:
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 8ff3bcbce5f..5982fe2753e 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -143,6 +143,7 @@ config HOSTFS
143 143
144config HPPFS 144config HPPFS
145 tristate "HoneyPot ProcFS (EXPERIMENTAL)" 145 tristate "HoneyPot ProcFS (EXPERIMENTAL)"
146 depends on EXPERIMENTAL
146 help 147 help
147 hppfs (HoneyPot ProcFS) is a filesystem which allows UML /proc 148 hppfs (HoneyPot ProcFS) is a filesystem which allows UML /proc
148 entries to be overridden, removed, or fabricated from the host. 149 entries to be overridden, removed, or fabricated from the host.
@@ -155,10 +156,6 @@ config HPPFS
155 You only need this if you are setting up a UML honeypot. Otherwise, 156 You only need this if you are setting up a UML honeypot. Otherwise,
156 it is safe to say 'N' here. 157 it is safe to say 'N' here.
157 158
158 If you are actively using it, please report any problems, since it's
159 getting fixed. In this moment, it is experimental on 2.6 (it works on
160 2.4).
161
162config MCONSOLE 159config MCONSOLE
163 bool "Management console" 160 bool "Management console"
164 default y 161 default y
@@ -243,8 +240,16 @@ config NEST_LEVEL
243 Only change this if you are running nested UMLs. 240 Only change this if you are running nested UMLs.
244 241
245config HIGHMEM 242config HIGHMEM
246 bool "Highmem support" 243 bool "Highmem support (EXPERIMENTAL)"
247 depends on !64BIT 244 depends on !64BIT && EXPERIMENTAL
245 default n
246 help
247 This was used to allow UML to run with big amounts of memory.
248 Currently it is unstable, so if unsure say N.
249
250 To use big amounts of memory, it is recommended to disable TT mode (i.e.
251 CONFIG_MODE_TT) and enable static linking (i.e. CONFIG_STATIC_LINK) -
252 this should allow the guest to use up to 2.75G of memory.
248 253
249config KERNEL_STACK_ORDER 254config KERNEL_STACK_ORDER
250 int "Kernel stack size order" 255 int "Kernel stack size order"
@@ -269,17 +274,13 @@ endmenu
269 274
270source "init/Kconfig" 275source "init/Kconfig"
271 276
272source "net/Kconfig" 277source "drivers/block/Kconfig"
273
274source "drivers/base/Kconfig"
275 278
276source "arch/um/Kconfig.char" 279source "arch/um/Kconfig.char"
277 280
278source "drivers/block/Kconfig" 281source "drivers/base/Kconfig"
279 282
280config NETDEVICES 283source "net/Kconfig"
281 bool
282 default NET
283 284
284source "arch/um/Kconfig.net" 285source "arch/um/Kconfig.net"
285 286
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
index c71b39a677a..ef79ed25aec 100644
--- a/arch/um/Kconfig.i386
+++ b/arch/um/Kconfig.i386
@@ -22,13 +22,17 @@ config TOP_ADDR
22 default 0x80000000 if HOST_2G_2G 22 default 0x80000000 if HOST_2G_2G
23 23
24config 3_LEVEL_PGTABLES 24config 3_LEVEL_PGTABLES
25 bool "Three-level pagetables" 25 bool "Three-level pagetables (EXPERIMENTAL)"
26 default n 26 default n
27 depends on EXPERIMENTAL
27 help 28 help
28 Three-level pagetables will let UML have more than 4G of physical 29 Three-level pagetables will let UML have more than 4G of physical
29 memory. All the memory that can't be mapped directly will be treated 30 memory. All the memory that can't be mapped directly will be treated
30 as high memory. 31 as high memory.
31 32
33 However, this it experimental on 32-bit architectures, so if unsure say
34 N (on x86-64 it's automatically enabled, instead, as it's safe there).
35
32config STUB_CODE 36config STUB_CODE
33 hex 37 hex
34 default 0xbfffe000 38 default 0xbfffe000
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 45435ff589c..6430a638385 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -32,7 +32,7 @@ um-modes-$(CONFIG_MODE_TT) += tt
32um-modes-$(CONFIG_MODE_SKAS) += skas 32um-modes-$(CONFIG_MODE_SKAS) += skas
33 33
34MODE_INCLUDE += $(foreach mode,$(um-modes-y),\ 34MODE_INCLUDE += $(foreach mode,$(um-modes-y),\
35 -I$(srctree)/$(ARCH_DIR)/kernel/$(mode)/include) 35 -I$(srctree)/$(ARCH_DIR)/include/$(mode))
36 36
37MAKEFILES-INCL += $(foreach mode,$(um-modes-y),\ 37MAKEFILES-INCL += $(foreach mode,$(um-modes-y),\
38 $(srctree)/$(ARCH_DIR)/Makefile-$(mode)) 38 $(srctree)/$(ARCH_DIR)/Makefile-$(mode))
diff --git a/arch/um/drivers/daemon_kern.c b/arch/um/drivers/daemon_kern.c
index 30d285b266a..507e3cbac9d 100644
--- a/arch/um/drivers/daemon_kern.c
+++ b/arch/um/drivers/daemon_kern.c
@@ -31,6 +31,10 @@ void daemon_init(struct net_device *dev, void *data)
31 dpri->fd = -1; 31 dpri->fd = -1;
32 dpri->control = -1; 32 dpri->control = -1;
33 dpri->dev = dev; 33 dpri->dev = dev;
34 /* We will free this pointer. If it contains crap we're burned. */
35 dpri->ctl_addr = NULL;
36 dpri->data_addr = NULL;
37 dpri->local_addr = NULL;
34 38
35 printk("daemon backend (uml_switch version %d) - %s:%s", 39 printk("daemon backend (uml_switch version %d) - %s:%s",
36 SWITCH_VERSION, dpri->sock_type, dpri->ctl_sock); 40 SWITCH_VERSION, dpri->sock_type, dpri->ctl_sock);
diff --git a/arch/um/drivers/daemon_user.c b/arch/um/drivers/daemon_user.c
index 1bb085b2824..c944265955e 100644
--- a/arch/um/drivers/daemon_user.c
+++ b/arch/um/drivers/daemon_user.c
@@ -158,10 +158,16 @@ static void daemon_remove(void *data)
158 struct daemon_data *pri = data; 158 struct daemon_data *pri = data;
159 159
160 os_close_file(pri->fd); 160 os_close_file(pri->fd);
161 pri->fd = -1;
161 os_close_file(pri->control); 162 os_close_file(pri->control);
163 pri->control = -1;
164
162 kfree(pri->data_addr); 165 kfree(pri->data_addr);
166 pri->data_addr = NULL;
163 kfree(pri->ctl_addr); 167 kfree(pri->ctl_addr);
168 pri->ctl_addr = NULL;
164 kfree(pri->local_addr); 169 kfree(pri->local_addr);
170 pri->local_addr = NULL;
165} 171}
166 172
167int daemon_user_write(int fd, void *buf, int len, struct daemon_data *pri) 173int daemon_user_write(int fd, void *buf, int len, struct daemon_data *pri)
diff --git a/arch/um/drivers/fd.c b/arch/um/drivers/fd.c
index 3296e86a03a..c41f75e4acb 100644
--- a/arch/um/drivers/fd.c
+++ b/arch/um/drivers/fd.c
@@ -11,6 +11,7 @@
11#include "user.h" 11#include "user.h"
12#include "user_util.h" 12#include "user_util.h"
13#include "chan_user.h" 13#include "chan_user.h"
14#include "os.h"
14 15
15struct fd_chan { 16struct fd_chan {
16 int fd; 17 int fd;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index fb1f9fb9b87..8ebb2241ad4 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -68,6 +68,11 @@ static int uml_net_rx(struct net_device *dev)
68 return pkt_len; 68 return pkt_len;
69} 69}
70 70
71static void uml_dev_close(void* dev)
72{
73 dev_close( (struct net_device *) dev);
74}
75
71irqreturn_t uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs) 76irqreturn_t uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
72{ 77{
73 struct net_device *dev = dev_id; 78 struct net_device *dev = dev_id;
@@ -80,15 +85,21 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
80 spin_lock(&lp->lock); 85 spin_lock(&lp->lock);
81 while((err = uml_net_rx(dev)) > 0) ; 86 while((err = uml_net_rx(dev)) > 0) ;
82 if(err < 0) { 87 if(err < 0) {
88 DECLARE_WORK(close_work, uml_dev_close, dev);
83 printk(KERN_ERR 89 printk(KERN_ERR
84 "Device '%s' read returned %d, shutting it down\n", 90 "Device '%s' read returned %d, shutting it down\n",
85 dev->name, err); 91 dev->name, err);
86 dev_close(dev); 92 /* dev_close can't be called in interrupt context, and takes
93 * again lp->lock.
94 * And dev_close() can be safely called multiple times on the
95 * same device, since it tests for (dev->flags & IFF_UP). So
96 * there's no harm in delaying the device shutdown. */
97 schedule_work(&close_work);
87 goto out; 98 goto out;
88 } 99 }
89 reactivate_fd(lp->fd, UM_ETH_IRQ); 100 reactivate_fd(lp->fd, UM_ETH_IRQ);
90 101
91 out: 102out:
92 spin_unlock(&lp->lock); 103 spin_unlock(&lp->lock);
93 return(IRQ_HANDLED); 104 return(IRQ_HANDLED);
94} 105}
@@ -317,6 +328,11 @@ static int eth_configure(int n, void *init, char *mac,
317 return 1; 328 return 1;
318 } 329 }
319 330
331 lp = dev->priv;
332 /* This points to the transport private data. It's still clear, but we
333 * must memset it to 0 *now*. Let's help the drivers. */
334 memset(lp, 0, size);
335
320 /* sysfs register */ 336 /* sysfs register */
321 if (!driver_registered) { 337 if (!driver_registered) {
322 platform_driver_register(&uml_net_driver); 338 platform_driver_register(&uml_net_driver);
@@ -358,7 +374,6 @@ static int eth_configure(int n, void *init, char *mac,
358 free_netdev(dev); 374 free_netdev(dev);
359 return 1; 375 return 1;
360 } 376 }
361 lp = dev->priv;
362 377
363 /* lp.user is the first four bytes of the transport data, which 378 /* lp.user is the first four bytes of the transport data, which
364 * has already been initialized. This structure assignment will 379 * has already been initialized. This structure assignment will
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 7696f8d2d89..101efd26d46 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1103,31 +1103,33 @@ static int ubd_ioctl(struct inode * inode, struct file * file,
1103 return(-EINVAL); 1103 return(-EINVAL);
1104} 1104}
1105 1105
1106static int same_backing_files(char *from_cmdline, char *from_cow, char *cow) 1106static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow)
1107{ 1107{
1108 struct uml_stat buf1, buf2; 1108 struct uml_stat buf1, buf2;
1109 int err; 1109 int err;
1110 1110
1111 if(from_cmdline == NULL) return(1); 1111 if(from_cmdline == NULL)
1112 if(!strcmp(from_cmdline, from_cow)) return(1); 1112 return 0;
1113 if(!strcmp(from_cmdline, from_cow))
1114 return 0;
1113 1115
1114 err = os_stat_file(from_cmdline, &buf1); 1116 err = os_stat_file(from_cmdline, &buf1);
1115 if(err < 0){ 1117 if(err < 0){
1116 printk("Couldn't stat '%s', err = %d\n", from_cmdline, -err); 1118 printk("Couldn't stat '%s', err = %d\n", from_cmdline, -err);
1117 return(1); 1119 return 0;
1118 } 1120 }
1119 err = os_stat_file(from_cow, &buf2); 1121 err = os_stat_file(from_cow, &buf2);
1120 if(err < 0){ 1122 if(err < 0){
1121 printk("Couldn't stat '%s', err = %d\n", from_cow, -err); 1123 printk("Couldn't stat '%s', err = %d\n", from_cow, -err);
1122 return(1); 1124 return 1;
1123 } 1125 }
1124 if((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino)) 1126 if((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino))
1125 return(1); 1127 return 0;
1126 1128
1127 printk("Backing file mismatch - \"%s\" requested,\n" 1129 printk("Backing file mismatch - \"%s\" requested,\n"
1128 "\"%s\" specified in COW header of \"%s\"\n", 1130 "\"%s\" specified in COW header of \"%s\"\n",
1129 from_cmdline, from_cow, cow); 1131 from_cmdline, from_cow, cow);
1130 return(0); 1132 return 1;
1131} 1133}
1132 1134
1133static int backing_file_mismatch(char *file, __u64 size, time_t mtime) 1135static int backing_file_mismatch(char *file, __u64 size, time_t mtime)
@@ -1189,18 +1191,19 @@ int open_ubd_file(char *file, struct openflags *openflags,
1189 unsigned long long size; 1191 unsigned long long size;
1190 __u32 version, align; 1192 __u32 version, align;
1191 char *backing_file; 1193 char *backing_file;
1192 int fd, err, sectorsize, same, mode = 0644; 1194 int fd, err, sectorsize, asked_switch, mode = 0644;
1193 1195
1194 fd = os_open_file(file, *openflags, mode); 1196 fd = os_open_file(file, *openflags, mode);
1195 if(fd < 0){ 1197 if (fd < 0) {
1196 if((fd == -ENOENT) && (create_cow_out != NULL)) 1198 if ((fd == -ENOENT) && (create_cow_out != NULL))
1197 *create_cow_out = 1; 1199 *create_cow_out = 1;
1198 if(!openflags->w || 1200 if (!openflags->w ||
1199 ((fd != -EROFS) && (fd != -EACCES))) return(fd); 1201 ((fd != -EROFS) && (fd != -EACCES)))
1202 return fd;
1200 openflags->w = 0; 1203 openflags->w = 0;
1201 fd = os_open_file(file, *openflags, mode); 1204 fd = os_open_file(file, *openflags, mode);
1202 if(fd < 0) 1205 if (fd < 0)
1203 return(fd); 1206 return fd;
1204 } 1207 }
1205 1208
1206 err = os_lock_file(fd, openflags->w); 1209 err = os_lock_file(fd, openflags->w);
@@ -1209,7 +1212,9 @@ int open_ubd_file(char *file, struct openflags *openflags,
1209 goto out_close; 1212 goto out_close;
1210 } 1213 }
1211 1214
1212 if(backing_file_out == NULL) return(fd); 1215 /* Succesful return case! */
1216 if(backing_file_out == NULL)
1217 return(fd);
1213 1218
1214 err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime, 1219 err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime,
1215 &size, &sectorsize, &align, bitmap_offset_out); 1220 &size, &sectorsize, &align, bitmap_offset_out);
@@ -1218,34 +1223,34 @@ int open_ubd_file(char *file, struct openflags *openflags,
1218 "errno = %d\n", file, -err); 1223 "errno = %d\n", file, -err);
1219 goto out_close; 1224 goto out_close;
1220 } 1225 }
1221 if(err) return(fd); 1226 if(err)
1222 1227 return(fd);
1223 if(backing_file_out == NULL) return(fd);
1224 1228
1225 same = same_backing_files(*backing_file_out, backing_file, file); 1229 asked_switch = path_requires_switch(*backing_file_out, backing_file, file);
1226 1230
1227 if(!same && !backing_file_mismatch(*backing_file_out, size, mtime)){ 1231 /* Allow switching only if no mismatch. */
1232 if (asked_switch && !backing_file_mismatch(*backing_file_out, size, mtime)) {
1228 printk("Switching backing file to '%s'\n", *backing_file_out); 1233 printk("Switching backing file to '%s'\n", *backing_file_out);
1229 err = write_cow_header(file, fd, *backing_file_out, 1234 err = write_cow_header(file, fd, *backing_file_out,
1230 sectorsize, align, &size); 1235 sectorsize, align, &size);
1231 if(err){ 1236 if (err) {
1232 printk("Switch failed, errno = %d\n", -err); 1237 printk("Switch failed, errno = %d\n", -err);
1233 return(err); 1238 goto out_close;
1234 } 1239 }
1235 } 1240 } else {
1236 else {
1237 *backing_file_out = backing_file; 1241 *backing_file_out = backing_file;
1238 err = backing_file_mismatch(*backing_file_out, size, mtime); 1242 err = backing_file_mismatch(*backing_file_out, size, mtime);
1239 if(err) goto out_close; 1243 if (err)
1244 goto out_close;
1240 } 1245 }
1241 1246
1242 cow_sizes(version, size, sectorsize, align, *bitmap_offset_out, 1247 cow_sizes(version, size, sectorsize, align, *bitmap_offset_out,
1243 bitmap_len_out, data_offset_out); 1248 bitmap_len_out, data_offset_out);
1244 1249
1245 return(fd); 1250 return fd;
1246 out_close: 1251 out_close:
1247 os_close_file(fd); 1252 os_close_file(fd);
1248 return(err); 1253 return err;
1249} 1254}
1250 1255
1251int create_cow_file(char *cow_file, char *backing_file, struct openflags flags, 1256int create_cow_file(char *cow_file, char *backing_file, struct openflags flags,
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h
index 8f4e46d677a..c649108a9e9 100644
--- a/arch/um/include/kern_util.h
+++ b/arch/um/include/kern_util.h
@@ -120,8 +120,10 @@ extern void machine_halt(void);
120extern int is_syscall(unsigned long addr); 120extern int is_syscall(unsigned long addr);
121extern void arch_switch(void); 121extern void arch_switch(void);
122extern void free_irq(unsigned int, void *); 122extern void free_irq(unsigned int, void *);
123extern int um_in_interrupt(void);
124extern int cpu(void); 123extern int cpu(void);
124
125/* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */
126extern int __cant_sleep(void);
125extern void segv_handler(int sig, union uml_pt_regs *regs); 127extern void segv_handler(int sig, union uml_pt_regs *regs);
126extern void sigio_handler(int sig, union uml_pt_regs *regs); 128extern void sigio_handler(int sig, union uml_pt_regs *regs);
127 129
diff --git a/arch/um/include/longjmp.h b/arch/um/include/longjmp.h
new file mode 100644
index 00000000000..018b3819ab0
--- /dev/null
+++ b/arch/um/include/longjmp.h
@@ -0,0 +1,19 @@
1#ifndef __UML_LONGJMP_H
2#define __UML_LONGJMP_H
3
4#include <setjmp.h>
5#include "os.h"
6
7#define UML_SIGLONGJMP(buf, val) do { \
8 longjmp(*buf, val); \
9} while(0)
10
11#define UML_SIGSETJMP(buf, enable) ({ \
12 int n; \
13 enable = get_signals(); \
14 n = setjmp(*buf); \
15 if(n != 0) \
16 set_signals(enable); \
17 n; })
18
19#endif
diff --git a/arch/um/include/mode_kern.h b/arch/um/include/mode_kern.h
index 2d88afd0cf1..e7539a8451e 100644
--- a/arch/um/include/mode_kern.h
+++ b/arch/um/include/mode_kern.h
@@ -9,22 +9,11 @@
9#include "linux/config.h" 9#include "linux/config.h"
10 10
11#ifdef CONFIG_MODE_TT 11#ifdef CONFIG_MODE_TT
12#include "mode_kern-tt.h" 12#include "mode_kern_tt.h"
13#endif 13#endif
14 14
15#ifdef CONFIG_MODE_SKAS 15#ifdef CONFIG_MODE_SKAS
16#include "mode_kern-skas.h" 16#include "mode_kern_skas.h"
17#endif 17#endif
18 18
19#endif 19#endif
20
21/*
22 * Overrides for Emacs so that we follow Linus's tabbing style.
23 * Emacs will notice this stuff at the end of the file and automatically
24 * adjust the settings for this buffer only. This must remain at the end
25 * of the file.
26 * ---------------------------------------------------------------------------
27 * Local variables:
28 * c-file-style: "linux"
29 * End:
30 */
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index dd72d66cf0e..eb1710b8125 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -11,6 +11,7 @@
11#include "../os/include/file.h" 11#include "../os/include/file.h"
12#include "sysdep/ptrace.h" 12#include "sysdep/ptrace.h"
13#include "kern_util.h" 13#include "kern_util.h"
14#include "skas/mm_id.h"
14 15
15#define OS_TYPE_FILE 1 16#define OS_TYPE_FILE 1
16#define OS_TYPE_DIR 2 17#define OS_TYPE_DIR 2
@@ -190,11 +191,12 @@ extern int os_protect_memory(void *addr, unsigned long len,
190 int r, int w, int x); 191 int r, int w, int x);
191extern int os_unmap_memory(void *addr, int len); 192extern int os_unmap_memory(void *addr, int len);
192extern void os_flush_stdout(void); 193extern void os_flush_stdout(void);
193extern unsigned long long os_usecs(void);
194 194
195/* tt.c 195/* tt.c
196 * for tt mode only (will be deleted in future...) 196 * for tt mode only (will be deleted in future...)
197 */ 197 */
198extern void stop(void);
199extern int wait_for_stop(int pid, int sig, int cont_type, void *relay);
198extern int protect_memory(unsigned long addr, unsigned long len, 200extern int protect_memory(unsigned long addr, unsigned long len,
199 int r, int w, int x, int must_succeed); 201 int r, int w, int x, int must_succeed);
200extern void forward_pending_sigio(int target); 202extern void forward_pending_sigio(int target);
@@ -230,9 +232,63 @@ extern void block_signals(void);
230extern void unblock_signals(void); 232extern void unblock_signals(void);
231extern int get_signals(void); 233extern int get_signals(void);
232extern int set_signals(int enable); 234extern int set_signals(int enable);
235extern void os_usr1_signal(int on);
233 236
234/* trap.c */ 237/* trap.c */
235extern void os_fill_handlinfo(struct kern_handlers h); 238extern void os_fill_handlinfo(struct kern_handlers h);
236extern void do_longjmp(void *p, int val); 239extern void do_longjmp(void *p, int val);
237 240
241/* util.c */
242extern void stack_protections(unsigned long address);
243extern void task_protections(unsigned long address);
244extern int raw(int fd);
245extern void setup_machinename(char *machine_out);
246extern void setup_hostinfo(void);
247extern int setjmp_wrapper(void (*proc)(void *, void *), ...);
248
249/* time.c */
250#define BILLION (1000 * 1000 * 1000)
251
252extern void switch_timers(int to_real);
253extern void idle_sleep(int secs);
254extern void enable_timer(void);
255extern void disable_timer(void);
256extern void user_time_init(void);
257extern void uml_idle_timer(void);
258extern unsigned long long os_nsecs(void);
259
260/* skas/mem.c */
261extern long run_syscall_stub(struct mm_id * mm_idp,
262 int syscall, unsigned long *args, long expected,
263 void **addr, int done);
264extern long syscall_stub_data(struct mm_id * mm_idp,
265 unsigned long *data, int data_count,
266 void **addr, void **stub_addr);
267extern int map(struct mm_id * mm_idp, unsigned long virt,
268 unsigned long len, int r, int w, int x, int phys_fd,
269 unsigned long long offset, int done, void **data);
270extern int unmap(struct mm_id * mm_idp, void *addr, unsigned long len,
271 int done, void **data);
272extern int protect(struct mm_id * mm_idp, unsigned long addr,
273 unsigned long len, int r, int w, int x, int done,
274 void **data);
275
276/* skas/process.c */
277extern int is_skas_winch(int pid, int fd, void *data);
278extern int start_userspace(unsigned long stub_stack);
279extern int copy_context_skas0(unsigned long stack, int pid);
280extern void userspace(union uml_pt_regs *regs);
281extern void map_stub_pages(int fd, unsigned long code,
282 unsigned long data, unsigned long stack);
283extern void new_thread(void *stack, void **switch_buf_ptr,
284 void **fork_buf_ptr, void (*handler)(int));
285extern void thread_wait(void *sw, void *fb);
286extern void switch_threads(void *me, void *next);
287extern int start_idle_thread(void *stack, void *switch_buf_ptr,
288 void **fork_buf_ptr);
289extern void initial_thread_cb_skas(void (*proc)(void *),
290 void *arg);
291extern void halt_skas(void);
292extern void reboot_skas(void);
293
238#endif 294#endif
diff --git a/arch/um/kernel/skas/include/mm_id.h b/arch/um/include/skas/mm_id.h
index 48dd0989dda..48dd0989dda 100644
--- a/arch/um/kernel/skas/include/mm_id.h
+++ b/arch/um/include/skas/mm_id.h
diff --git a/arch/um/include/skas/mmu-skas.h b/arch/um/include/skas/mmu-skas.h
new file mode 100644
index 00000000000..d8869a6ef1b
--- /dev/null
+++ b/arch/um/include/skas/mmu-skas.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SKAS_MMU_H
7#define __SKAS_MMU_H
8
9#include "linux/config.h"
10#include "mm_id.h"
11#include "asm/ldt.h"
12
13struct mmu_context_skas {
14 struct mm_id id;
15 unsigned long last_page_table;
16#ifdef CONFIG_3_LEVEL_PGTABLES
17 unsigned long last_pmd;
18#endif
19 uml_ldt_t ldt;
20};
21
22extern void switch_mm_skas(struct mm_id * mm_idp);
23
24#endif
diff --git a/arch/um/include/skas/mode-skas.h b/arch/um/include/skas/mode-skas.h
new file mode 100644
index 00000000000..260065cfeef
--- /dev/null
+++ b/arch/um/include/skas/mode-skas.h
@@ -0,0 +1,19 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __MODE_SKAS_H__
7#define __MODE_SKAS_H__
8
9#include <sysdep/ptrace.h>
10
11extern unsigned long exec_regs[];
12extern unsigned long exec_fp_regs[];
13extern unsigned long exec_fpx_regs[];
14extern int have_fpx_regs;
15
16extern void sig_handler_common_skas(int sig, void *sc_ptr);
17extern void kill_off_processes_skas(void);
18
19#endif
diff --git a/arch/um/kernel/skas/include/mode_kern-skas.h b/arch/um/include/skas/mode_kern_skas.h
index c97a80dfe37..63c58739bde 100644
--- a/arch/um/kernel/skas/include/mode_kern-skas.h
+++ b/arch/um/include/skas/mode_kern_skas.h
@@ -18,7 +18,6 @@ extern int copy_thread_skas(int nr, unsigned long clone_flags,
18 unsigned long sp, unsigned long stack_top, 18 unsigned long sp, unsigned long stack_top,
19 struct task_struct *p, struct pt_regs *regs); 19 struct task_struct *p, struct pt_regs *regs);
20extern void release_thread_skas(struct task_struct *task); 20extern void release_thread_skas(struct task_struct *task);
21extern void initial_thread_cb_skas(void (*proc)(void *), void *arg);
22extern void init_idle_skas(void); 21extern void init_idle_skas(void);
23extern void flush_tlb_kernel_range_skas(unsigned long start, 22extern void flush_tlb_kernel_range_skas(unsigned long start,
24 unsigned long end); 23 unsigned long end);
@@ -39,14 +38,3 @@ extern int thread_pid_skas(struct task_struct *task);
39#define kmem_end_skas (host_task_size - 1024 * 1024) 38#define kmem_end_skas (host_task_size - 1024 * 1024)
40 39
41#endif 40#endif
42
43/*
44 * Overrides for Emacs so that we follow Linus's tabbing style.
45 * Emacs will notice this stuff at the end of the file and automatically
46 * adjust the settings for this buffer only. This must remain at the end
47 * of the file.
48 * ---------------------------------------------------------------------------
49 * Local variables:
50 * c-file-style: "linux"
51 * End:
52 */
diff --git a/arch/um/kernel/skas/include/proc_mm.h b/arch/um/include/skas/proc_mm.h
index cce61a67905..90280920960 100644
--- a/arch/um/kernel/skas/include/proc_mm.h
+++ b/arch/um/include/skas/proc_mm.h
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -22,13 +22,13 @@ struct mm_mmap {
22 22
23struct mm_munmap { 23struct mm_munmap {
24 unsigned long addr; 24 unsigned long addr;
25 unsigned long len; 25 unsigned long len;
26}; 26};
27 27
28struct mm_mprotect { 28struct mm_mprotect {
29 unsigned long addr; 29 unsigned long addr;
30 unsigned long len; 30 unsigned long len;
31 unsigned int prot; 31 unsigned int prot;
32}; 32};
33 33
34struct proc_mm_op { 34struct proc_mm_op {
@@ -42,14 +42,3 @@ struct proc_mm_op {
42}; 42};
43 43
44#endif 44#endif
45
46/*
47 * Overrides for Emacs so that we follow Linus's tabbing style.
48 * Emacs will notice this stuff at the end of the file and automatically
49 * adjust the settings for this buffer only. This must remain at the end
50 * of the file.
51 * ---------------------------------------------------------------------------
52 * Local variables:
53 * c-file-style: "linux"
54 * End:
55 */
diff --git a/arch/um/include/skas/skas.h b/arch/um/include/skas/skas.h
new file mode 100644
index 00000000000..86357282d68
--- /dev/null
+++ b/arch/um/include/skas/skas.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SKAS_H
7#define __SKAS_H
8
9#include "mm_id.h"
10#include "sysdep/ptrace.h"
11
12extern int userspace_pid[];
13extern int proc_mm, ptrace_faultinfo, ptrace_ldt;
14extern int skas_needs_stub;
15
16extern int user_thread(unsigned long stack, int flags);
17extern void new_thread_proc(void *stack, void (*handler)(int sig));
18extern void new_thread_handler(int sig);
19extern void handle_syscall(union uml_pt_regs *regs);
20extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
21extern int new_mm(unsigned long stack);
22extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
23extern long execute_syscall_skas(void *r);
24extern unsigned long current_stub_stack(void);
25
26#endif
diff --git a/arch/um/kernel/skas/include/stub-data.h b/arch/um/include/skas/stub-data.h
index f6ed92c3727..f6ed92c3727 100644
--- a/arch/um/kernel/skas/include/stub-data.h
+++ b/arch/um/include/skas/stub-data.h
diff --git a/arch/um/kernel/skas/include/uaccess-skas.h b/arch/um/include/skas/uaccess-skas.h
index 64516c556cd..224a75f4c02 100644
--- a/arch/um/kernel/skas/include/uaccess-skas.h
+++ b/arch/um/include/skas/uaccess-skas.h
@@ -19,14 +19,3 @@ extern int clear_user_skas(void __user *mem, int len);
19extern int strnlen_user_skas(const void __user *str, int len); 19extern int strnlen_user_skas(const void __user *str, int len);
20 20
21#endif 21#endif
22
23/*
24 * Overrides for Emacs so that we follow Linus's tabbing style.
25 * Emacs will notice this stuff at the end of the file and automatically
26 * adjust the settings for this buffer only. This must remain at the end
27 * of the file.
28 * ---------------------------------------------------------------------------
29 * Local variables:
30 * c-file-style: "linux"
31 * End:
32 */
diff --git a/arch/um/include/time_user.h b/arch/um/include/time_user.h
deleted file mode 100644
index 17d7ef2141f..00000000000
--- a/arch/um/include/time_user.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __TIME_USER_H__
7#define __TIME_USER_H__
8
9extern void timer(void);
10extern void switch_timers(int to_real);
11extern void idle_sleep(int secs);
12extern void enable_timer(void);
13extern void prepare_timer(void * ptr);
14extern void disable_timer(void);
15extern unsigned long time_lock(void);
16extern void time_unlock(unsigned long);
17extern void user_time_init(void);
18
19#endif
diff --git a/arch/um/kernel/tt/include/debug.h b/arch/um/include/tt/debug.h
index 738435461e1..9778fa83829 100644
--- a/arch/um/kernel/tt/include/debug.h
+++ b/arch/um/include/tt/debug.h
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) and 2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) and
3 * Lars Brinkhoff. 3 * Lars Brinkhoff.
4 * Licensed under the GPL 4 * Licensed under the GPL
diff --git a/arch/um/include/tt/mmu-tt.h b/arch/um/include/tt/mmu-tt.h
new file mode 100644
index 00000000000..572a78b2258
--- /dev/null
+++ b/arch/um/include/tt/mmu-tt.h
@@ -0,0 +1,12 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __TT_MMU_H
7#define __TT_MMU_H
8
9struct mmu_context_tt {
10};
11
12#endif
diff --git a/arch/um/include/tt/mode-tt.h b/arch/um/include/tt/mode-tt.h
new file mode 100644
index 00000000000..2823cd56eea
--- /dev/null
+++ b/arch/um/include/tt/mode-tt.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __MODE_TT_H__
7#define __MODE_TT_H__
8
9#include "sysdep/ptrace.h"
10
11enum { OP_NONE, OP_EXEC, OP_FORK, OP_TRACE_ON, OP_REBOOT, OP_HALT, OP_CB };
12
13extern int tracing_pid;
14
15extern int tracer(int (*init_proc)(void *), void *sp);
16extern void sig_handler_common_tt(int sig, void *sc);
17extern void syscall_handler_tt(int sig, union uml_pt_regs *regs);
18extern void reboot_tt(void);
19extern void halt_tt(void);
20extern int is_tracer_winch(int pid, int fd, void *data);
21extern void kill_off_processes_tt(void);
22
23#endif
diff --git a/arch/um/include/tt/mode_kern_tt.h b/arch/um/include/tt/mode_kern_tt.h
new file mode 100644
index 00000000000..efa0012550d
--- /dev/null
+++ b/arch/um/include/tt/mode_kern_tt.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __TT_MODE_KERN_H__
7#define __TT_MODE_KERN_H__
8
9#include "linux/sched.h"
10#include "asm/page.h"
11#include "asm/ptrace.h"
12#include "asm/uaccess.h"
13
14extern void switch_to_tt(void *prev, void *next);
15extern void flush_thread_tt(void);
16extern void start_thread_tt(struct pt_regs *regs, unsigned long eip,
17 unsigned long esp);
18extern int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp,
19 unsigned long stack_top, struct task_struct *p,
20 struct pt_regs *regs);
21extern void release_thread_tt(struct task_struct *task);
22extern void initial_thread_cb_tt(void (*proc)(void *), void *arg);
23extern void init_idle_tt(void);
24extern void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end);
25extern void flush_tlb_kernel_vm_tt(void);
26extern void __flush_tlb_one_tt(unsigned long addr);
27extern void flush_tlb_range_tt(struct vm_area_struct *vma,
28 unsigned long start, unsigned long end);
29extern void flush_tlb_mm_tt(struct mm_struct *mm);
30extern void force_flush_all_tt(void);
31extern long execute_syscall_tt(void *r);
32extern void before_mem_tt(unsigned long brk_start);
33extern unsigned long set_task_sizes_tt(int arg, unsigned long *host_size_out,
34 unsigned long *task_size_out);
35extern int start_uml_tt(void);
36extern int external_pid_tt(struct task_struct *task);
37extern int thread_pid_tt(struct task_struct *task);
38
39#define kmem_end_tt (host_task_size - ABOVE_KMEM)
40
41#endif
diff --git a/arch/um/kernel/tt/include/tt.h b/arch/um/include/tt/tt.h
index c667b67af40..80852198018 100644
--- a/arch/um/kernel/tt/include/tt.h
+++ b/arch/um/include/tt/tt.h
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -34,13 +34,3 @@ extern long execute_syscall_tt(void *r);
34 34
35#endif 35#endif
36 36
37/*
38 * Overrides for Emacs so that we follow Linus's tabbing style.
39 * Emacs will notice this stuff at the end of the file and automatically
40 * adjust the settings for this buffer only. This must remain at the end
41 * of the file.
42 * ---------------------------------------------------------------------------
43 * Local variables:
44 * c-file-style: "linux"
45 * End:
46 */
diff --git a/arch/um/kernel/tt/include/uaccess-tt.h b/arch/um/include/tt/uaccess-tt.h
index b9bfe9c481c..b19645f32f2 100644
--- a/arch/um/kernel/tt/include/uaccess-tt.h
+++ b/arch/um/include/tt/uaccess-tt.h
@@ -46,14 +46,3 @@ extern int clear_user_tt(void __user *mem, int len);
46extern int strnlen_user_tt(const void __user *str, int len); 46extern int strnlen_user_tt(const void __user *str, int len);
47 47
48#endif 48#endif
49
50/*
51 * Overrides for Emacs so that we follow Linus's tabbing style.
52 * Emacs will notice this stuff at the end of the file and automatically
53 * adjust the settings for this buffer only. This must remain at the end
54 * of the file.
55 * ---------------------------------------------------------------------------
56 * Local variables:
57 * c-file-style: "linux"
58 * End:
59 */
diff --git a/arch/um/include/user.h b/arch/um/include/user.h
index 0f865ef4691..91b0ac4ad88 100644
--- a/arch/um/include/user.h
+++ b/arch/um/include/user.h
@@ -18,6 +18,7 @@ extern int open_gdb_chan(void);
18extern unsigned long strlcpy(char *, const char *, unsigned long); 18extern unsigned long strlcpy(char *, const char *, unsigned long);
19extern unsigned long strlcat(char *, const char *, unsigned long); 19extern unsigned long strlcat(char *, const char *, unsigned long);
20extern void *um_vmalloc(int size); 20extern void *um_vmalloc(int size);
21extern void *um_vmalloc_atomic(int size);
21extern void vfree(void *ptr); 22extern void vfree(void *ptr);
22 23
23#endif 24#endif
diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h
index c1dbd77b073..a6f1f176cf8 100644
--- a/arch/um/include/user_util.h
+++ b/arch/um/include/user_util.h
@@ -44,10 +44,6 @@ extern unsigned long brk_start;
44extern int pty_output_sigio; 44extern int pty_output_sigio;
45extern int pty_close_sigio; 45extern int pty_close_sigio;
46 46
47extern void stop(void);
48extern void stack_protections(unsigned long address);
49extern void task_protections(unsigned long address);
50extern int wait_for_stop(int pid, int sig, int cont_type, void *relay);
51extern void *add_signal_handler(int sig, void (*handler)(int)); 47extern void *add_signal_handler(int sig, void (*handler)(int));
52extern int linux_main(int argc, char **argv); 48extern int linux_main(int argc, char **argv);
53extern void set_cmdline(char *cmd); 49extern void set_cmdline(char *cmd);
@@ -55,8 +51,6 @@ extern void input_cb(void (*proc)(void *), void *arg, int arg_len);
55extern int get_pty(void); 51extern int get_pty(void);
56extern void *um_kmalloc(int size); 52extern void *um_kmalloc(int size);
57extern int switcheroo(int fd, int prot, void *from, void *to, int size); 53extern int switcheroo(int fd, int prot, void *from, void *to, int size);
58extern void setup_machinename(char *machine_out);
59extern void setup_hostinfo(void);
60extern void do_exec(int old_pid, int new_pid); 54extern void do_exec(int old_pid, int new_pid);
61extern void tracer_panic(char *msg, ...); 55extern void tracer_panic(char *msg, ...);
62extern int detach(int pid, int sig); 56extern int detach(int pid, int sig);
@@ -70,18 +64,6 @@ extern int cpu_feature(char *what, char *buf, int len);
70extern int arch_handle_signal(int sig, union uml_pt_regs *regs); 64extern int arch_handle_signal(int sig, union uml_pt_regs *regs);
71extern int arch_fixup(unsigned long address, void *sc_ptr); 65extern int arch_fixup(unsigned long address, void *sc_ptr);
72extern void arch_init_thread(void); 66extern void arch_init_thread(void);
73extern int setjmp_wrapper(void (*proc)(void *, void *), ...);
74extern int raw(int fd); 67extern int raw(int fd);
75 68
76#endif 69#endif
77
78/*
79 * Overrides for Emacs so that we follow Linus's tabbing style.
80 * Emacs will notice this stuff at the end of the file and automatically
81 * adjust the settings for this buffer only. This must remain at the end
82 * of the file.
83 * ---------------------------------------------------------------------------
84 * Local variables:
85 * c-file-style: "linux"
86 * End:
87 */
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 193cc2b7448..693018ba80f 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -9,9 +9,8 @@ clean-files :=
9obj-y = config.o exec_kern.o exitcode.o \ 9obj-y = config.o exec_kern.o exitcode.o \
10 init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \ 10 init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \
11 process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \ 11 process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \
12 signal_kern.o smp.o syscall_kern.o sysrq.o time.o \ 12 signal_kern.o smp.o syscall_kern.o sysrq.o \
13 time_kern.o tlb.o trap_kern.o uaccess.o um_arch.o umid.o \ 13 time_kern.o tlb.o trap_kern.o uaccess.o um_arch.o umid.o
14 user_util.o
15 14
16obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o 15obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
17obj-$(CONFIG_GPROF) += gprof_syms.o 16obj-$(CONFIG_GPROF) += gprof_syms.o
@@ -24,7 +23,7 @@ obj-$(CONFIG_MODE_SKAS) += skas/
24 23
25user-objs-$(CONFIG_TTY_LOG) += tty_log.o 24user-objs-$(CONFIG_TTY_LOG) += tty_log.o
26 25
27USER_OBJS := $(user-objs-y) config.o time.o tty_log.o user_util.o 26USER_OBJS := $(user-objs-y) config.o tty_log.o
28 27
29include arch/um/scripts/Makefile.rules 28include arch/um/scripts/Makefile.rules
30 29
diff --git a/arch/um/kernel/exec_kern.c b/arch/um/kernel/exec_kern.c
index efd222ffe20..569fe8b9b05 100644
--- a/arch/um/kernel/exec_kern.c
+++ b/arch/um/kernel/exec_kern.c
@@ -17,7 +17,6 @@
17#include "irq_user.h" 17#include "irq_user.h"
18#include "tlb.h" 18#include "tlb.h"
19#include "os.h" 19#include "os.h"
20#include "time_user.h"
21#include "choose-mode.h" 20#include "choose-mode.h"
22#include "mode_kern.h" 21#include "mode_kern.h"
23 22
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index 7f13b85d265..3113cab8675 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -39,7 +39,6 @@
39#include "init.h" 39#include "init.h"
40#include "irq_user.h" 40#include "irq_user.h"
41#include "mem_user.h" 41#include "mem_user.h"
42#include "time_user.h"
43#include "tlb.h" 42#include "tlb.h"
44#include "frame_kern.h" 43#include "frame_kern.h"
45#include "sigcontext.h" 44#include "sigcontext.h"
@@ -288,17 +287,27 @@ EXPORT_SYMBOL(disable_hlt);
288 287
289void *um_kmalloc(int size) 288void *um_kmalloc(int size)
290{ 289{
291 return(kmalloc(size, GFP_KERNEL)); 290 return kmalloc(size, GFP_KERNEL);
292} 291}
293 292
294void *um_kmalloc_atomic(int size) 293void *um_kmalloc_atomic(int size)
295{ 294{
296 return(kmalloc(size, GFP_ATOMIC)); 295 return kmalloc(size, GFP_ATOMIC);
297} 296}
298 297
299void *um_vmalloc(int size) 298void *um_vmalloc(int size)
300{ 299{
301 return(vmalloc(size)); 300 return vmalloc(size);
301}
302
303void *um_vmalloc_atomic(int size)
304{
305 return __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM, PAGE_KERNEL);
306}
307
308int __cant_sleep(void) {
309 return in_atomic() || irqs_disabled() || in_interrupt();
310 /* Is in_interrupt() really needed? */
302} 311}
303 312
304unsigned long get_fault_addr(void) 313unsigned long get_fault_addr(void)
@@ -370,11 +379,6 @@ int smp_sigio_handler(void)
370 return(0); 379 return(0);
371} 380}
372 381
373int um_in_interrupt(void)
374{
375 return(in_interrupt());
376}
377
378int cpu(void) 382int cpu(void)
379{ 383{
380 return(current_thread->cpu); 384 return(current_thread->cpu);
diff --git a/arch/um/kernel/sigio_user.c b/arch/um/kernel/sigio_user.c
index 62e5cfdf218..f7b18e157d3 100644
--- a/arch/um/kernel/sigio_user.c
+++ b/arch/um/kernel/sigio_user.c
@@ -337,70 +337,103 @@ int ignore_sigio_fd(int fd)
337 return(err); 337 return(err);
338} 338}
339 339
340static int setup_initial_poll(int fd) 340static struct pollfd* setup_initial_poll(int fd)
341{ 341{
342 struct pollfd *p; 342 struct pollfd *p;
343 343
344 p = um_kmalloc_atomic(sizeof(struct pollfd)); 344 p = um_kmalloc(sizeof(struct pollfd));
345 if(p == NULL){ 345 if (p == NULL) {
346 printk("setup_initial_poll : failed to allocate poll\n"); 346 printk("setup_initial_poll : failed to allocate poll\n");
347 return(-1); 347 return NULL;
348 } 348 }
349 *p = ((struct pollfd) { .fd = fd, 349 *p = ((struct pollfd) { .fd = fd,
350 .events = POLLIN, 350 .events = POLLIN,
351 .revents = 0 }); 351 .revents = 0 });
352 current_poll = ((struct pollfds) { .poll = p, 352 return p;
353 .used = 1,
354 .size = 1 });
355 return(0);
356} 353}
357 354
358void write_sigio_workaround(void) 355void write_sigio_workaround(void)
359{ 356{
360 unsigned long stack; 357 unsigned long stack;
358 struct pollfd *p;
361 int err; 359 int err;
360 int l_write_sigio_fds[2];
361 int l_sigio_private[2];
362 int l_write_sigio_pid;
362 363
364 /* We call this *tons* of times - and most ones we must just fail. */
363 sigio_lock(); 365 sigio_lock();
364 if(write_sigio_pid != -1) 366 l_write_sigio_pid = write_sigio_pid;
365 goto out; 367 sigio_unlock();
366 368
367 err = os_pipe(write_sigio_fds, 1, 1); 369 if (l_write_sigio_pid != -1)
370 return;
371
372 err = os_pipe(l_write_sigio_fds, 1, 1);
368 if(err < 0){ 373 if(err < 0){
369 printk("write_sigio_workaround - os_pipe 1 failed, " 374 printk("write_sigio_workaround - os_pipe 1 failed, "
370 "err = %d\n", -err); 375 "err = %d\n", -err);
371 goto out; 376 return;
372 } 377 }
373 err = os_pipe(sigio_private, 1, 1); 378 err = os_pipe(l_sigio_private, 1, 1);
374 if(err < 0){ 379 if(err < 0){
375 printk("write_sigio_workaround - os_pipe 2 failed, " 380 printk("write_sigio_workaround - os_pipe 1 failed, "
376 "err = %d\n", -err); 381 "err = %d\n", -err);
377 goto out_close1; 382 goto out_close1;
378 } 383 }
379 if(setup_initial_poll(sigio_private[1])) 384
385 p = setup_initial_poll(l_sigio_private[1]);
386 if(!p)
380 goto out_close2; 387 goto out_close2;
381 388
382 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL, 389 sigio_lock();
390
391 /* Did we race? Don't try to optimize this, please, it's not so likely
392 * to happen, and no more than once at the boot. */
393 if(write_sigio_pid != -1)
394 goto out_unlock;
395
396 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
383 CLONE_FILES | CLONE_VM, &stack, 0); 397 CLONE_FILES | CLONE_VM, &stack, 0);
384 398
385 if(write_sigio_pid < 0) goto out_close2; 399 if (write_sigio_pid < 0)
400 goto out_clear;
386 401
387 if(write_sigio_irq(write_sigio_fds[0])) 402 if (write_sigio_irq(l_write_sigio_fds[0]))
388 goto out_kill; 403 goto out_kill;
389 404
390 out: 405 /* Success, finally. */
406 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
407 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
408
409 current_poll = ((struct pollfds) { .poll = p,
410 .used = 1,
411 .size = 1 });
412
391 sigio_unlock(); 413 sigio_unlock();
392 return; 414 return;
393 415
394 out_kill: 416 out_kill:
395 os_kill_process(write_sigio_pid, 1); 417 l_write_sigio_pid = write_sigio_pid;
396 write_sigio_pid = -1; 418 write_sigio_pid = -1;
419 sigio_unlock();
420 /* Going to call waitpid, avoid holding the lock. */
421 os_kill_process(l_write_sigio_pid, 1);
422 goto out_free;
423
424 out_clear:
425 write_sigio_pid = -1;
426 out_unlock:
427 sigio_unlock();
428 out_free:
429 kfree(p);
397 out_close2: 430 out_close2:
398 os_close_file(sigio_private[0]); 431 os_close_file(l_sigio_private[0]);
399 os_close_file(sigio_private[1]); 432 os_close_file(l_sigio_private[1]);
400 out_close1: 433 out_close1:
401 os_close_file(write_sigio_fds[0]); 434 os_close_file(l_write_sigio_fds[0]);
402 os_close_file(write_sigio_fds[1]); 435 os_close_file(l_write_sigio_fds[1]);
403 sigio_unlock(); 436 return;
404} 437}
405 438
406int read_sigio_fd(int fd) 439int read_sigio_fd(int fd)
diff --git a/arch/um/kernel/signal_kern.c b/arch/um/kernel/signal_kern.c
index 7b0e0e81c16..da17b7541e0 100644
--- a/arch/um/kernel/signal_kern.c
+++ b/arch/um/kernel/signal_kern.c
@@ -99,31 +99,46 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
99 return err; 99 return err;
100} 100}
101 101
102static int kern_do_signal(struct pt_regs *regs, sigset_t *oldset) 102static int kern_do_signal(struct pt_regs *regs)
103{ 103{
104 struct k_sigaction ka_copy; 104 struct k_sigaction ka_copy;
105 siginfo_t info; 105 siginfo_t info;
106 sigset_t *oldset;
106 int sig, handled_sig = 0; 107 int sig, handled_sig = 0;
107 108
109 if (test_thread_flag(TIF_RESTORE_SIGMASK))
110 oldset = &current->saved_sigmask;
111 else
112 oldset = &current->blocked;
113
108 while((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0){ 114 while((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0){
109 handled_sig = 1; 115 handled_sig = 1;
110 /* Whee! Actually deliver the signal. */ 116 /* Whee! Actually deliver the signal. */
111 if(!handle_signal(regs, sig, &ka_copy, &info, oldset)) 117 if(!handle_signal(regs, sig, &ka_copy, &info, oldset)){
118 /* a signal was successfully delivered; the saved
119 * sigmask will have been stored in the signal frame,
120 * and will be restored by sigreturn, so we can simply
121 * clear the TIF_RESTORE_SIGMASK flag */
122 if (test_thread_flag(TIF_RESTORE_SIGMASK))
123 clear_thread_flag(TIF_RESTORE_SIGMASK);
112 break; 124 break;
125 }
113 } 126 }
114 127
115 /* Did we come from a system call? */ 128 /* Did we come from a system call? */
116 if(!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)){ 129 if(!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)){
117 /* Restart the system call - no handlers present */ 130 /* Restart the system call - no handlers present */
118 if(PT_REGS_SYSCALL_RET(regs) == -ERESTARTNOHAND || 131 switch(PT_REGS_SYSCALL_RET(regs)){
119 PT_REGS_SYSCALL_RET(regs) == -ERESTARTSYS || 132 case -ERESTARTNOHAND:
120 PT_REGS_SYSCALL_RET(regs) == -ERESTARTNOINTR){ 133 case -ERESTARTSYS:
134 case -ERESTARTNOINTR:
121 PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs); 135 PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs);
122 PT_REGS_RESTART_SYSCALL(regs); 136 PT_REGS_RESTART_SYSCALL(regs);
123 } 137 break;
124 else if(PT_REGS_SYSCALL_RET(regs) == -ERESTART_RESTARTBLOCK){ 138 case -ERESTART_RESTARTBLOCK:
125 PT_REGS_SYSCALL_RET(regs) = __NR_restart_syscall; 139 PT_REGS_SYSCALL_RET(regs) = __NR_restart_syscall;
126 PT_REGS_RESTART_SYSCALL(regs); 140 PT_REGS_RESTART_SYSCALL(regs);
141 break;
127 } 142 }
128 } 143 }
129 144
@@ -137,12 +152,19 @@ static int kern_do_signal(struct pt_regs *regs, sigset_t *oldset)
137 if(current->ptrace & PT_DTRACE) 152 if(current->ptrace & PT_DTRACE)
138 current->thread.singlestep_syscall = 153 current->thread.singlestep_syscall =
139 is_syscall(PT_REGS_IP(&current->thread.regs)); 154 is_syscall(PT_REGS_IP(&current->thread.regs));
155
156 /* if there's no signal to deliver, we just put the saved sigmask
157 * back */
158 if (!handled_sig && test_thread_flag(TIF_RESTORE_SIGMASK)) {
159 clear_thread_flag(TIF_RESTORE_SIGMASK);
160 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
161 }
140 return(handled_sig); 162 return(handled_sig);
141} 163}
142 164
143int do_signal(void) 165int do_signal(void)
144{ 166{
145 return(kern_do_signal(&current->thread.regs, &current->blocked)); 167 return(kern_do_signal(&current->thread.regs));
146} 168}
147 169
148/* 170/*
@@ -150,63 +172,20 @@ int do_signal(void)
150 */ 172 */
151long sys_sigsuspend(int history0, int history1, old_sigset_t mask) 173long sys_sigsuspend(int history0, int history1, old_sigset_t mask)
152{ 174{
153 sigset_t saveset;
154
155 mask &= _BLOCKABLE; 175 mask &= _BLOCKABLE;
156 spin_lock_irq(&current->sighand->siglock); 176 spin_lock_irq(&current->sighand->siglock);
157 saveset = current->blocked; 177 current->saved_sigmask = current->blocked;
158 siginitset(&current->blocked, mask); 178 siginitset(&current->blocked, mask);
159 recalc_sigpending(); 179 recalc_sigpending();
160 spin_unlock_irq(&current->sighand->siglock); 180 spin_unlock_irq(&current->sighand->siglock);
161 181
162 PT_REGS_SYSCALL_RET(&current->thread.regs) = -EINTR; 182 current->state = TASK_INTERRUPTIBLE;
163 while (1) { 183 schedule();
164 current->state = TASK_INTERRUPTIBLE; 184 set_thread_flag(TIF_RESTORE_SIGMASK);
165 schedule(); 185 return -ERESTARTNOHAND;
166 if(kern_do_signal(&current->thread.regs, &saveset))
167 return(-EINTR);
168 }
169}
170
171long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
172{
173 sigset_t saveset, newset;
174
175 /* XXX: Don't preclude handling different sized sigset_t's. */
176 if (sigsetsize != sizeof(sigset_t))
177 return -EINVAL;
178
179 if (copy_from_user(&newset, unewset, sizeof(newset)))
180 return -EFAULT;
181 sigdelsetmask(&newset, ~_BLOCKABLE);
182
183 spin_lock_irq(&current->sighand->siglock);
184 saveset = current->blocked;
185 current->blocked = newset;
186 recalc_sigpending();
187 spin_unlock_irq(&current->sighand->siglock);
188
189 PT_REGS_SYSCALL_RET(&current->thread.regs) = -EINTR;
190 while (1) {
191 current->state = TASK_INTERRUPTIBLE;
192 schedule();
193 if (kern_do_signal(&current->thread.regs, &saveset))
194 return(-EINTR);
195 }
196} 186}
197 187
198long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) 188long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
199{ 189{
200 return(do_sigaltstack(uss, uoss, PT_REGS_SP(&current->thread.regs))); 190 return(do_sigaltstack(uss, uoss, PT_REGS_SP(&current->thread.regs)));
201} 191}
202
203/*
204 * Overrides for Emacs so that we follow Linus's tabbing style.
205 * Emacs will notice this stuff at the end of the file and automatically
206 * adjust the settings for this buffer only. This must remain at the end
207 * of the file.
208 * ---------------------------------------------------------------------------
209 * Local variables:
210 * c-file-style: "linux"
211 * End:
212 */
diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile
index 7a9fc16d71d..57181a920d4 100644
--- a/arch/um/kernel/skas/Makefile
+++ b/arch/um/kernel/skas/Makefile
@@ -1,12 +1,12 @@
1# 1#
2# Copyright (C) 2002 - 2004 Jeff Dike (jdike@addtoit.com) 2# Copyright (C) 2002 - 2004 Jeff Dike (jdike@addtoit.com)
3# Licensed under the GPL 3# Licensed under the GPL
4# 4#
5 5
6obj-y := clone.o exec_kern.o mem.o mem_user.o mmu.o process.o process_kern.o \ 6obj-y := clone.o exec_kern.o mem.o mmu.o process_kern.o \
7 syscall.o tlb.o uaccess.o 7 syscall.o tlb.o uaccess.o
8 8
9USER_OBJS := process.o clone.o 9USER_OBJS := clone.o
10 10
11include arch/um/scripts/Makefile.rules 11include arch/um/scripts/Makefile.rules
12 12
diff --git a/arch/um/kernel/skas/include/mmu-skas.h b/arch/um/kernel/skas/include/mmu-skas.h
deleted file mode 100644
index 44110c521e4..00000000000
--- a/arch/um/kernel/skas/include/mmu-skas.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SKAS_MMU_H
7#define __SKAS_MMU_H
8
9#include "linux/config.h"
10#include "mm_id.h"
11#include "asm/ldt.h"
12
13struct mmu_context_skas {
14 struct mm_id id;
15 unsigned long last_page_table;
16#ifdef CONFIG_3_LEVEL_PGTABLES
17 unsigned long last_pmd;
18#endif
19 uml_ldt_t ldt;
20};
21
22extern void switch_mm_skas(struct mm_id * mm_idp);
23
24#endif
25
26/*
27 * Overrides for Emacs so that we follow Linus's tabbing style.
28 * Emacs will notice this stuff at the end of the file and automatically
29 * adjust the settings for this buffer only. This must remain at the end
30 * of the file.
31 * ---------------------------------------------------------------------------
32 * Local variables:
33 * c-file-style: "linux"
34 * End:
35 */
diff --git a/arch/um/kernel/skas/include/mode-skas.h b/arch/um/kernel/skas/include/mode-skas.h
deleted file mode 100644
index bcd26a6a388..00000000000
--- a/arch/um/kernel/skas/include/mode-skas.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __MODE_SKAS_H__
7#define __MODE_SKAS_H__
8
9#include <sysdep/ptrace.h>
10
11extern unsigned long exec_regs[];
12extern unsigned long exec_fp_regs[];
13extern unsigned long exec_fpx_regs[];
14extern int have_fpx_regs;
15
16extern void sig_handler_common_skas(int sig, void *sc_ptr);
17extern void halt_skas(void);
18extern void reboot_skas(void);
19extern void kill_off_processes_skas(void);
20extern int is_skas_winch(int pid, int fd, void *data);
21
22#endif
23
24/*
25 * Overrides for Emacs so that we follow Linus's tabbing style.
26 * Emacs will notice this stuff at the end of the file and automatically
27 * adjust the settings for this buffer only. This must remain at the end
28 * of the file.
29 * ---------------------------------------------------------------------------
30 * Local variables:
31 * c-file-style: "linux"
32 * End:
33 */
diff --git a/arch/um/kernel/skas/include/skas.h b/arch/um/kernel/skas/include/skas.h
deleted file mode 100644
index 01d489de398..00000000000
--- a/arch/um/kernel/skas/include/skas.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SKAS_H
7#define __SKAS_H
8
9#include "mm_id.h"
10#include "sysdep/ptrace.h"
11
12extern int userspace_pid[];
13extern int proc_mm, ptrace_faultinfo, ptrace_ldt;
14extern int skas_needs_stub;
15
16extern void switch_threads(void *me, void *next);
17extern void thread_wait(void *sw, void *fb);
18extern void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
19 void (*handler)(int));
20extern int start_idle_thread(void *stack, void *switch_buf_ptr,
21 void **fork_buf_ptr);
22extern int user_thread(unsigned long stack, int flags);
23extern void userspace(union uml_pt_regs *regs);
24extern void new_thread_proc(void *stack, void (*handler)(int sig));
25extern void new_thread_handler(int sig);
26extern void handle_syscall(union uml_pt_regs *regs);
27extern int map(struct mm_id * mm_idp, unsigned long virt,
28 unsigned long len, int r, int w, int x, int phys_fd,
29 unsigned long long offset, int done, void **data);
30extern int unmap(struct mm_id * mm_idp, void *addr, unsigned long len,
31 int done, void **data);
32extern int protect(struct mm_id * mm_idp, unsigned long addr,
33 unsigned long len, int r, int w, int x, int done,
34 void **data);
35extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
36extern int new_mm(int from, unsigned long stack);
37extern int start_userspace(unsigned long stub_stack);
38extern int copy_context_skas0(unsigned long stack, int pid);
39extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
40extern long execute_syscall_skas(void *r);
41extern unsigned long current_stub_stack(void);
42extern long run_syscall_stub(struct mm_id * mm_idp,
43 int syscall, unsigned long *args, long expected,
44 void **addr, int done);
45extern long syscall_stub_data(struct mm_id * mm_idp,
46 unsigned long *data, int data_count,
47 void **addr, void **stub_addr);
48
49#endif
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 677871f1b37..c5c9885a829 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -78,7 +78,7 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
78 struct mmu_context_skas *from_mm = NULL; 78 struct mmu_context_skas *from_mm = NULL;
79 struct mmu_context_skas *to_mm = &mm->context.skas; 79 struct mmu_context_skas *to_mm = &mm->context.skas;
80 unsigned long stack = 0; 80 unsigned long stack = 0;
81 int from_fd, ret = -ENOMEM; 81 int ret = -ENOMEM;
82 82
83 if(skas_needs_stub){ 83 if(skas_needs_stub){
84 stack = get_zeroed_page(GFP_KERNEL); 84 stack = get_zeroed_page(GFP_KERNEL);
@@ -108,11 +108,7 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
108 from_mm = &current->mm->context.skas; 108 from_mm = &current->mm->context.skas;
109 109
110 if(proc_mm){ 110 if(proc_mm){
111 if(from_mm) 111 ret = new_mm(stack);
112 from_fd = from_mm->id.u.mm_fd;
113 else from_fd = -1;
114
115 ret = new_mm(from_fd, stack);
116 if(ret < 0){ 112 if(ret < 0){
117 printk("init_new_context_skas - new_mm failed, " 113 printk("init_new_context_skas - new_mm failed, "
118 "errno = %d\n", ret); 114 "errno = %d\n", ret);
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 3b3955d8440..eea1c9c4bb0 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -18,7 +18,6 @@
18#include <asm/types.h> 18#include <asm/types.h>
19#include "user.h" 19#include "user.h"
20#include "ptrace_user.h" 20#include "ptrace_user.h"
21#include "time_user.h"
22#include "sysdep/ptrace.h" 21#include "sysdep/ptrace.h"
23#include "user_util.h" 22#include "user_util.h"
24#include "kern_util.h" 23#include "kern_util.h"
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index dc41c6dc2f3..3f70a2e12f0 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -13,14 +13,12 @@
13#include "asm/uaccess.h" 13#include "asm/uaccess.h"
14#include "asm/atomic.h" 14#include "asm/atomic.h"
15#include "kern_util.h" 15#include "kern_util.h"
16#include "time_user.h"
17#include "skas.h" 16#include "skas.h"
18#include "os.h" 17#include "os.h"
19#include "user_util.h" 18#include "user_util.h"
20#include "tlb.h" 19#include "tlb.h"
21#include "kern.h" 20#include "kern.h"
22#include "mode.h" 21#include "mode.h"
23#include "proc_mm.h"
24#include "registers.h" 22#include "registers.h"
25 23
26void switch_to_skas(void *prev, void *next) 24void switch_to_skas(void *prev, void *next)
@@ -34,7 +32,7 @@ void switch_to_skas(void *prev, void *next)
34 if(current->pid == 0) 32 if(current->pid == 0)
35 switch_timers(0); 33 switch_timers(0);
36 34
37 switch_threads(&from->thread.mode.skas.switch_buf, 35 switch_threads(&from->thread.mode.skas.switch_buf,
38 to->thread.mode.skas.switch_buf); 36 to->thread.mode.skas.switch_buf);
39 37
40 if(current->pid == 0) 38 if(current->pid == 0)
@@ -50,8 +48,8 @@ void new_thread_handler(int sig)
50 48
51 fn = current->thread.request.u.thread.proc; 49 fn = current->thread.request.u.thread.proc;
52 arg = current->thread.request.u.thread.arg; 50 arg = current->thread.request.u.thread.arg;
53 change_sig(SIGUSR1, 1); 51 os_usr1_signal(1);
54 thread_wait(&current->thread.mode.skas.switch_buf, 52 thread_wait(&current->thread.mode.skas.switch_buf,
55 current->thread.mode.skas.fork_buf); 53 current->thread.mode.skas.fork_buf);
56 54
57 if(current->thread.prev_sched != NULL) 55 if(current->thread.prev_sched != NULL)
@@ -82,8 +80,8 @@ void release_thread_skas(struct task_struct *task)
82 80
83void fork_handler(int sig) 81void fork_handler(int sig)
84{ 82{
85 change_sig(SIGUSR1, 1); 83 os_usr1_signal(1);
86 thread_wait(&current->thread.mode.skas.switch_buf, 84 thread_wait(&current->thread.mode.skas.switch_buf,
87 current->thread.mode.skas.fork_buf); 85 current->thread.mode.skas.fork_buf);
88 86
89 force_flush_all(); 87 force_flush_all();
@@ -93,13 +91,13 @@ void fork_handler(int sig)
93 schedule_tail(current->thread.prev_sched); 91 schedule_tail(current->thread.prev_sched);
94 current->thread.prev_sched = NULL; 92 current->thread.prev_sched = NULL;
95 93
96 /* Handle any immediate reschedules or signals */ 94/* Handle any immediate reschedules or signals */
97 interrupt_end(); 95 interrupt_end();
98 userspace(&current->thread.regs.regs); 96 userspace(&current->thread.regs.regs);
99} 97}
100 98
101int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp, 99int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
102 unsigned long stack_top, struct task_struct * p, 100 unsigned long stack_top, struct task_struct * p,
103 struct pt_regs *regs) 101 struct pt_regs *regs)
104{ 102{
105 void (*handler)(int); 103 void (*handler)(int);
@@ -123,27 +121,14 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
123 return(0); 121 return(0);
124} 122}
125 123
126extern void map_stub_pages(int fd, unsigned long code, 124int new_mm(unsigned long stack)
127 unsigned long data, unsigned long stack);
128int new_mm(int from, unsigned long stack)
129{ 125{
130 struct proc_mm_op copy; 126 int fd;
131 int n, fd;
132 127
133 fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0); 128 fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
134 if(fd < 0) 129 if(fd < 0)
135 return(fd); 130 return(fd);
136 131
137 if(from != -1){
138 copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
139 .u =
140 { .copy_segments = from } } );
141 n = os_write_file(fd, &copy, sizeof(copy));
142 if(n != sizeof(copy))
143 printk("new_mm : /proc/mm copy_segments failed, "
144 "err = %d\n", -n);
145 }
146
147 if(skas_needs_stub) 132 if(skas_needs_stub)
148 map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack); 133 map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
149 134
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index a5a47528dec..5992c325716 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -13,7 +13,7 @@
13#include "asm/pgtable.h" 13#include "asm/pgtable.h"
14#include "asm/uaccess.h" 14#include "asm/uaccess.h"
15#include "kern_util.h" 15#include "kern_util.h"
16#include "user_util.h" 16#include "os.h"
17 17
18extern void *um_virt_to_phys(struct task_struct *task, unsigned long addr, 18extern void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
19 pte_t *pte_out); 19 pte_t *pte_out);
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index 1429c131879..1731d90e685 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -25,12 +25,12 @@ int record_syscall_start(int syscall)
25 syscall_record[index].syscall = syscall; 25 syscall_record[index].syscall = syscall;
26 syscall_record[index].pid = current_pid(); 26 syscall_record[index].pid = current_pid();
27 syscall_record[index].result = 0xdeadbeef; 27 syscall_record[index].result = 0xdeadbeef;
28 syscall_record[index].start = os_usecs(); 28 syscall_record[index].start = os_nsecs();
29 return(index); 29 return(index);
30} 30}
31 31
32void record_syscall_end(int index, long result) 32void record_syscall_end(int index, long result)
33{ 33{
34 syscall_record[index].result = result; 34 syscall_record[index].result = result;
35 syscall_record[index].end = os_usecs(); 35 syscall_record[index].end = os_nsecs();
36} 36}
diff --git a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c
index 020ca79b8d3..3c7626cdba4 100644
--- a/arch/um/kernel/time_kern.c
+++ b/arch/um/kernel/time_kern.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -13,12 +13,12 @@
13#include "linux/interrupt.h" 13#include "linux/interrupt.h"
14#include "linux/init.h" 14#include "linux/init.h"
15#include "linux/delay.h" 15#include "linux/delay.h"
16#include "linux/hrtimer.h"
16#include "asm/irq.h" 17#include "asm/irq.h"
17#include "asm/param.h" 18#include "asm/param.h"
18#include "asm/current.h" 19#include "asm/current.h"
19#include "kern_util.h" 20#include "kern_util.h"
20#include "user_util.h" 21#include "user_util.h"
21#include "time_user.h"
22#include "mode.h" 22#include "mode.h"
23#include "os.h" 23#include "os.h"
24 24
@@ -39,7 +39,7 @@ unsigned long long sched_clock(void)
39int timer_irq_inited = 0; 39int timer_irq_inited = 0;
40 40
41static int first_tick; 41static int first_tick;
42static unsigned long long prev_usecs; 42static unsigned long long prev_nsecs;
43#ifdef CONFIG_UML_REAL_TIME_CLOCK 43#ifdef CONFIG_UML_REAL_TIME_CLOCK
44static long long delta; /* Deviation per interval */ 44static long long delta; /* Deviation per interval */
45#endif 45#endif
@@ -58,23 +58,23 @@ void timer_irq(union uml_pt_regs *regs)
58 if(first_tick){ 58 if(first_tick){
59#ifdef CONFIG_UML_REAL_TIME_CLOCK 59#ifdef CONFIG_UML_REAL_TIME_CLOCK
60 /* We've had 1 tick */ 60 /* We've had 1 tick */
61 unsigned long long usecs = os_usecs(); 61 unsigned long long nsecs = os_nsecs();
62 62
63 delta += usecs - prev_usecs; 63 delta += nsecs - prev_nsecs;
64 prev_usecs = usecs; 64 prev_nsecs = nsecs;
65 65
66 /* Protect against the host clock being set backwards */ 66 /* Protect against the host clock being set backwards */
67 if(delta < 0) 67 if(delta < 0)
68 delta = 0; 68 delta = 0;
69 69
70 ticks += (delta * HZ) / MILLION; 70 ticks += (delta * HZ) / BILLION;
71 delta -= (ticks * MILLION) / HZ; 71 delta -= (ticks * BILLION) / HZ;
72#else 72#else
73 ticks = 1; 73 ticks = 1;
74#endif 74#endif
75 } 75 }
76 else { 76 else {
77 prev_usecs = os_usecs(); 77 prev_nsecs = os_nsecs();
78 first_tick = 1; 78 first_tick = 1;
79 } 79 }
80 80
@@ -84,49 +84,102 @@ void timer_irq(union uml_pt_regs *regs)
84 } 84 }
85} 85}
86 86
87void boot_timer_handler(int sig) 87void do_boot_timer_handler(struct sigcontext * sc)
88{ 88{
89 struct pt_regs regs; 89 struct pt_regs regs;
90 90
91 CHOOSE_MODE((void) 91 CHOOSE_MODE((void) (UPT_SC(&regs.regs) = sc),
92 (UPT_SC(&regs.regs) = (struct sigcontext *) (&sig + 1)),
93 (void) (regs.regs.skas.is_user = 0)); 92 (void) (regs.regs.skas.is_user = 0));
94 do_timer(&regs); 93 do_timer(&regs);
95} 94}
96 95
96static DEFINE_SPINLOCK(timer_spinlock);
97
98static unsigned long long local_offset = 0;
99
100static inline unsigned long long get_time(void)
101{
102 unsigned long long nsecs;
103 unsigned long flags;
104
105 spin_lock_irqsave(&timer_spinlock, flags);
106 nsecs = os_nsecs();
107 nsecs += local_offset;
108 spin_unlock_irqrestore(&timer_spinlock, flags);
109
110 return nsecs;
111}
112
97irqreturn_t um_timer(int irq, void *dev, struct pt_regs *regs) 113irqreturn_t um_timer(int irq, void *dev, struct pt_regs *regs)
98{ 114{
115 unsigned long long nsecs;
99 unsigned long flags; 116 unsigned long flags;
100 117
101 do_timer(regs); 118 do_timer(regs);
119
102 write_seqlock_irqsave(&xtime_lock, flags); 120 write_seqlock_irqsave(&xtime_lock, flags);
103 timer(); 121 nsecs = get_time() + local_offset;
122 xtime.tv_sec = nsecs / NSEC_PER_SEC;
123 xtime.tv_nsec = nsecs - xtime.tv_sec * NSEC_PER_SEC;
104 write_sequnlock_irqrestore(&xtime_lock, flags); 124 write_sequnlock_irqrestore(&xtime_lock, flags);
125
105 return(IRQ_HANDLED); 126 return(IRQ_HANDLED);
106} 127}
107 128
108long um_time(int __user *tloc) 129long um_time(int __user *tloc)
109{ 130{
110 struct timeval now; 131 long ret = get_time() / NSEC_PER_SEC;
111 132
112 do_gettimeofday(&now); 133 if((tloc != NULL) && put_user(ret, tloc))
113 if (tloc) { 134 return -EFAULT;
114 if (put_user(now.tv_sec, tloc)) 135
115 now.tv_sec = -EFAULT; 136 return ret;
116 } 137}
117 return now.tv_sec; 138
139void do_gettimeofday(struct timeval *tv)
140{
141 unsigned long long nsecs = get_time();
142
143 tv->tv_sec = nsecs / NSEC_PER_SEC;
144 /* Careful about calculations here - this was originally done as
145 * (nsecs - tv->tv_sec * NSEC_PER_SEC) / NSEC_PER_USEC
146 * which gave bogus (> 1000000) values. Dunno why, suspect gcc
147 * (4.0.0) miscompiled it, or there's a subtle 64/32-bit conversion
148 * problem that I missed.
149 */
150 nsecs -= tv->tv_sec * NSEC_PER_SEC;
151 tv->tv_usec = (unsigned long) nsecs / NSEC_PER_USEC;
152}
153
154static inline void set_time(unsigned long long nsecs)
155{
156 unsigned long long now;
157 unsigned long flags;
158
159 spin_lock_irqsave(&timer_spinlock, flags);
160 now = os_nsecs();
161 local_offset = nsecs - now;
162 spin_unlock_irqrestore(&timer_spinlock, flags);
163
164 clock_was_set();
118} 165}
119 166
120long um_stime(int __user *tptr) 167long um_stime(int __user *tptr)
121{ 168{
122 int value; 169 int value;
123 struct timespec new;
124 170
125 if (get_user(value, tptr)) 171 if (get_user(value, tptr))
126 return -EFAULT; 172 return -EFAULT;
127 new.tv_sec = value; 173
128 new.tv_nsec = 0; 174 set_time((unsigned long long) value * NSEC_PER_SEC);
129 do_settimeofday(&new); 175
176 return 0;
177}
178
179int do_settimeofday(struct timespec *tv)
180{
181 set_time((unsigned long long) tv->tv_sec * NSEC_PER_SEC + tv->tv_nsec);
182
130 return 0; 183 return 0;
131} 184}
132 185
@@ -134,29 +187,15 @@ void timer_handler(int sig, union uml_pt_regs *regs)
134{ 187{
135 local_irq_disable(); 188 local_irq_disable();
136 irq_enter(); 189 irq_enter();
137 update_process_times(CHOOSE_MODE(user_context(UPT_SP(regs)), 190 update_process_times(CHOOSE_MODE(
138 (regs)->skas.is_user)); 191 (UPT_SC(regs) && user_context(UPT_SP(regs))),
192 (regs)->skas.is_user));
139 irq_exit(); 193 irq_exit();
140 local_irq_enable(); 194 local_irq_enable();
141 if(current_thread->cpu == 0) 195 if(current_thread->cpu == 0)
142 timer_irq(regs); 196 timer_irq(regs);
143} 197}
144 198
145static DEFINE_SPINLOCK(timer_spinlock);
146
147unsigned long time_lock(void)
148{
149 unsigned long flags;
150
151 spin_lock_irqsave(&timer_spinlock, flags);
152 return(flags);
153}
154
155void time_unlock(unsigned long flags)
156{
157 spin_unlock_irqrestore(&timer_spinlock, flags);
158}
159
160int __init timer_init(void) 199int __init timer_init(void)
161{ 200{
162 int err; 201 int err;
@@ -171,14 +210,3 @@ int __init timer_init(void)
171} 210}
172 211
173__initcall(timer_init); 212__initcall(timer_init);
174
175/*
176 * Overrides for Emacs so that we follow Linus's tabbing style.
177 * Emacs will notice this stuff at the end of the file and automatically
178 * adjust the settings for this buffer only. This must remain at the end
179 * of the file.
180 * ---------------------------------------------------------------------------
181 * Local variables:
182 * c-file-style: "linux"
183 * End:
184 */
diff --git a/arch/um/kernel/tt/exec_kern.c b/arch/um/kernel/tt/exec_kern.c
index 8f40e483873..5c1e4cc1c04 100644
--- a/arch/um/kernel/tt/exec_kern.c
+++ b/arch/um/kernel/tt/exec_kern.c
@@ -13,7 +13,6 @@
13#include "user_util.h" 13#include "user_util.h"
14#include "kern_util.h" 14#include "kern_util.h"
15#include "irq_user.h" 15#include "irq_user.h"
16#include "time_user.h"
17#include "mem_user.h" 16#include "mem_user.h"
18#include "os.h" 17#include "os.h"
19#include "tlb.h" 18#include "tlb.h"
diff --git a/arch/um/kernel/tt/gdb.c b/arch/um/kernel/tt/gdb.c
index 37e22d71a0d..786e4edd86c 100644
--- a/arch/um/kernel/tt/gdb.c
+++ b/arch/um/kernel/tt/gdb.c
@@ -20,6 +20,7 @@
20#include "user_util.h" 20#include "user_util.h"
21#include "tt.h" 21#include "tt.h"
22#include "sysdep/thread.h" 22#include "sysdep/thread.h"
23#include "os.h"
23 24
24extern int debugger_pid; 25extern int debugger_pid;
25extern int debugger_fd; 26extern int debugger_fd;
diff --git a/arch/um/kernel/tt/include/mmu-tt.h b/arch/um/kernel/tt/include/mmu-tt.h
deleted file mode 100644
index 0440510ab3f..00000000000
--- a/arch/um/kernel/tt/include/mmu-tt.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __TT_MMU_H
7#define __TT_MMU_H
8
9struct mmu_context_tt {
10};
11
12#endif
13
14/*
15 * Overrides for Emacs so that we follow Linus's tabbing style.
16 * Emacs will notice this stuff at the end of the file and automatically
17 * adjust the settings for this buffer only. This must remain at the end
18 * of the file.
19 * ---------------------------------------------------------------------------
20 * Local variables:
21 * c-file-style: "linux"
22 * End:
23 */
diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c
index 62535303aa2..295c1ac817b 100644
--- a/arch/um/kernel/tt/process_kern.c
+++ b/arch/um/kernel/tt/process_kern.c
@@ -18,7 +18,6 @@
18#include "os.h" 18#include "os.h"
19#include "kern.h" 19#include "kern.h"
20#include "sigcontext.h" 20#include "sigcontext.h"
21#include "time_user.h"
22#include "mem_user.h" 21#include "mem_user.h"
23#include "tlb.h" 22#include "tlb.h"
24#include "mode.h" 23#include "mode.h"
diff --git a/arch/um/kernel/tt/ptproxy/ptrace.c b/arch/um/kernel/tt/ptproxy/ptrace.c
index 528a5fc8d88..03774427d46 100644
--- a/arch/um/kernel/tt/ptproxy/ptrace.c
+++ b/arch/um/kernel/tt/ptproxy/ptrace.c
@@ -20,6 +20,7 @@ Jeff Dike (jdike@karaya.com) : Modified for integration into uml
20#include "kern_util.h" 20#include "kern_util.h"
21#include "ptrace_user.h" 21#include "ptrace_user.h"
22#include "tt.h" 22#include "tt.h"
23#include "os.h"
23 24
24long proxy_ptrace(struct debugger *debugger, int arg1, pid_t arg2, 25long proxy_ptrace(struct debugger *debugger, int arg1, pid_t arg2,
25 long arg3, long arg4, pid_t child, int *ret) 26 long arg3, long arg4, pid_t child, int *ret)
diff --git a/arch/um/kernel/tt/ptproxy/sysdep.c b/arch/um/kernel/tt/ptproxy/sysdep.c
index a5f0e01e214..99f178319d0 100644
--- a/arch/um/kernel/tt/ptproxy/sysdep.c
+++ b/arch/um/kernel/tt/ptproxy/sysdep.c
@@ -15,6 +15,7 @@ terms and conditions.
15#include "ptrace_user.h" 15#include "ptrace_user.h"
16#include "user_util.h" 16#include "user_util.h"
17#include "user.h" 17#include "user.h"
18#include "os.h"
18 19
19int get_syscall(pid_t pid, long *arg1, long *arg2, long *arg3, long *arg4, 20int get_syscall(pid_t pid, long *arg1, long *arg2, long *arg3, long *arg4,
20 long *arg5) 21 long *arg5)
diff --git a/arch/um/kernel/tt/trap_user.c b/arch/um/kernel/tt/trap_user.c
index a414c529fbc..b5d9d64d91e 100644
--- a/arch/um/kernel/tt/trap_user.c
+++ b/arch/um/kernel/tt/trap_user.c
@@ -18,7 +18,7 @@ void sig_handler_common_tt(int sig, void *sc_ptr)
18{ 18{
19 struct sigcontext *sc = sc_ptr; 19 struct sigcontext *sc = sc_ptr;
20 struct tt_regs save_regs, *r; 20 struct tt_regs save_regs, *r;
21 int save_errno = errno, is_user; 21 int save_errno = errno, is_user = 0;
22 void (*handler)(int, union uml_pt_regs *); 22 void (*handler)(int, union uml_pt_regs *);
23 23
24 /* This is done because to allow SIGSEGV to be delivered inside a SEGV 24 /* This is done because to allow SIGSEGV to be delivered inside a SEGV
@@ -35,7 +35,8 @@ void sig_handler_common_tt(int sig, void *sc_ptr)
35 GET_FAULTINFO_FROM_SC(r->faultinfo, sc); 35 GET_FAULTINFO_FROM_SC(r->faultinfo, sc);
36 } 36 }
37 save_regs = *r; 37 save_regs = *r;
38 is_user = user_context(SC_SP(sc)); 38 if (sc)
39 is_user = user_context(SC_SP(sc));
39 r->sc = sc; 40 r->sc = sc;
40 if(sig != SIGUSR2) 41 if(sig != SIGUSR2)
41 r->syscall = -1; 42 r->syscall = -1;
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index 40c7d6b1df6..08a4e628b24 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -5,12 +5,12 @@
5 5
6obj-y = aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \ 6obj-y = aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \
7 start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o user_syms.o \ 7 start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o user_syms.o \
8 drivers/ sys-$(SUBARCH)/ 8 util.o drivers/ sys-$(SUBARCH)/
9 9
10obj-$(CONFIG_MODE_SKAS) += skas/ 10obj-$(CONFIG_MODE_SKAS) += skas/
11 11
12USER_OBJS := aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \ 12USER_OBJS := aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \
13 start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o 13 start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o util.o
14 14
15elf_aux.o: $(ARCH_DIR)/kernel-offsets.h 15elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
16CFLAGS_elf_aux.o += -I$(objtree)/arch/um 16CFLAGS_elf_aux.o += -I$(objtree)/arch/um
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index 36cc8475bcd..6490a4ff40a 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -60,7 +60,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv,
60 60
61 if((stack_out != NULL) && (*stack_out != 0)) 61 if((stack_out != NULL) && (*stack_out != 0))
62 stack = *stack_out; 62 stack = *stack_out;
63 else stack = alloc_stack(0, um_in_interrupt()); 63 else stack = alloc_stack(0, __cant_sleep());
64 if(stack == 0) 64 if(stack == 0)
65 return(-ENOMEM); 65 return(-ENOMEM);
66 66
@@ -124,7 +124,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
124 unsigned long stack, sp; 124 unsigned long stack, sp;
125 int pid, status, err; 125 int pid, status, err;
126 126
127 stack = alloc_stack(stack_order, um_in_interrupt()); 127 stack = alloc_stack(stack_order, __cant_sleep());
128 if(stack == 0) return(-ENOMEM); 128 if(stack == 0) return(-ENOMEM);
129 129
130 sp = stack + (page_size() << stack_order) - sizeof(void *); 130 sp = stack + (page_size() << stack_order) - sizeof(void *);
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index 172c8474453..2878e89a674 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -16,7 +16,6 @@
16#include "user_util.h" 16#include "user_util.h"
17#include "kern_util.h" 17#include "kern_util.h"
18#include "mem_user.h" 18#include "mem_user.h"
19#include "time_user.h"
20#include "irq_user.h" 19#include "irq_user.h"
21#include "user.h" 20#include "user.h"
22#include "init.h" 21#include "init.h"
@@ -82,20 +81,8 @@ extern void scan_elf_aux( char **envp);
82int main(int argc, char **argv, char **envp) 81int main(int argc, char **argv, char **envp)
83{ 82{
84 char **new_argv; 83 char **new_argv;
85 sigset_t mask;
86 int ret, i, err; 84 int ret, i, err;
87 85
88 /* Enable all signals except SIGIO - in some environments, we can
89 * enter with some signals blocked
90 */
91
92 sigemptyset(&mask);
93 sigaddset(&mask, SIGIO);
94 if(sigprocmask(SIG_SETMASK, &mask, NULL) < 0){
95 perror("sigprocmask");
96 exit(1);
97 }
98
99#ifdef UML_CONFIG_CMDLINE_ON_HOST 86#ifdef UML_CONFIG_CMDLINE_ON_HOST
100 /* Allocate memory for thread command lines */ 87 /* Allocate memory for thread command lines */
101 if(argc < 2 || strlen(argv[1]) < THREAD_NAME_LEN - 1){ 88 if(argc < 2 || strlen(argv[1]) < THREAD_NAME_LEN - 1){
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index 39815c6b5e4..7f5e2dac2a3 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -18,6 +18,7 @@
18#include "process.h" 18#include "process.h"
19#include "irq_user.h" 19#include "irq_user.h"
20#include "kern_util.h" 20#include "kern_util.h"
21#include "longjmp.h"
21 22
22#define ARBITRARY_ADDR -1 23#define ARBITRARY_ADDR -1
23#define FAILURE_PID -1 24#define FAILURE_PID -1
@@ -205,24 +206,13 @@ void init_new_thread_signals(int altstack)
205 206
206int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr) 207int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr)
207{ 208{
208 sigjmp_buf buf; 209 sigjmp_buf buf;
209 int n; 210 int n, enable;
210 211
211 *jmp_ptr = &buf; 212 *jmp_ptr = &buf;
212 n = sigsetjmp(buf, 1); 213 n = UML_SIGSETJMP(&buf, enable);
213 if(n != 0) 214 if(n != 0)
214 return(n); 215 return(n);
215 (*fn)(arg); 216 (*fn)(arg);
216 return(0); 217 return(0);
217} 218}
218
219/*
220 * Overrides for Emacs so that we follow Linus's tabbing style.
221 * Emacs will notice this stuff at the end of the file and automatically
222 * adjust the settings for this buffer only. This must remain at the end
223 * of the file.
224 * ---------------------------------------------------------------------------
225 * Local variables:
226 * c-file-style: "linux"
227 * End:
228 */
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index c1f46a0fef1..f11b3124a0c 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -12,32 +12,66 @@
12#include <string.h> 12#include <string.h>
13#include <sys/mman.h> 13#include <sys/mman.h>
14#include "user_util.h" 14#include "user_util.h"
15#include "kern_util.h"
16#include "user.h" 15#include "user.h"
17#include "signal_kern.h" 16#include "signal_kern.h"
18#include "sysdep/sigcontext.h" 17#include "sysdep/sigcontext.h"
19#include "sysdep/signal.h" 18#include "sysdep/signal.h"
20#include "sigcontext.h" 19#include "sigcontext.h"
21#include "time_user.h"
22#include "mode.h" 20#include "mode.h"
21#include "os.h"
22
23/* These are the asynchronous signals. SIGVTALRM and SIGARLM are handled
24 * together under SIGVTALRM_BIT. SIGPROF is excluded because we want to
25 * be able to profile all of UML, not just the non-critical sections. If
26 * profiling is not thread-safe, then that is not my problem. We can disable
27 * profiling when SMP is enabled in that case.
28 */
29#define SIGIO_BIT 0
30#define SIGIO_MASK (1 << SIGIO_BIT)
31
32#define SIGVTALRM_BIT 1
33#define SIGVTALRM_MASK (1 << SIGVTALRM_BIT)
34
35#define SIGALRM_BIT 2
36#define SIGALRM_MASK (1 << SIGALRM_BIT)
37
38static int signals_enabled = 1;
39static int pending = 0;
23 40
24void sig_handler(ARCH_SIGHDLR_PARAM) 41void sig_handler(ARCH_SIGHDLR_PARAM)
25{ 42{
26 struct sigcontext *sc; 43 struct sigcontext *sc;
44 int enabled;
45
46 /* Must be the first thing that this handler does - x86_64 stores
47 * the sigcontext in %rdx, and we need to save it before it has a
48 * chance to get trashed.
49 */
27 50
28 ARCH_GET_SIGCONTEXT(sc, sig); 51 ARCH_GET_SIGCONTEXT(sc, sig);
52
53 enabled = signals_enabled;
54 if(!enabled && (sig == SIGIO)){
55 pending |= SIGIO_MASK;
56 return;
57 }
58
59 block_signals();
60
29 CHOOSE_MODE_PROC(sig_handler_common_tt, sig_handler_common_skas, 61 CHOOSE_MODE_PROC(sig_handler_common_tt, sig_handler_common_skas,
30 sig, sc); 62 sig, sc);
63
64 set_signals(enabled);
31} 65}
32 66
33extern int timer_irq_inited; 67extern int timer_irq_inited;
34 68
35void alarm_handler(ARCH_SIGHDLR_PARAM) 69static void real_alarm_handler(int sig, struct sigcontext *sc)
36{ 70{
37 struct sigcontext *sc; 71 if(!timer_irq_inited){
38 72 signals_enabled = 1;
39 ARCH_GET_SIGCONTEXT(sc, sig); 73 return;
40 if(!timer_irq_inited) return; 74 }
41 75
42 if(sig == SIGALRM) 76 if(sig == SIGALRM)
43 switch_timers(0); 77 switch_timers(0);
@@ -47,6 +81,52 @@ void alarm_handler(ARCH_SIGHDLR_PARAM)
47 81
48 if(sig == SIGALRM) 82 if(sig == SIGALRM)
49 switch_timers(1); 83 switch_timers(1);
84
85}
86
87void alarm_handler(ARCH_SIGHDLR_PARAM)
88{
89 struct sigcontext *sc;
90 int enabled;
91
92 ARCH_GET_SIGCONTEXT(sc, sig);
93
94 enabled = signals_enabled;
95 if(!signals_enabled){
96 if(sig == SIGVTALRM)
97 pending |= SIGVTALRM_MASK;
98 else pending |= SIGALRM_MASK;
99
100 return;
101 }
102
103 block_signals();
104
105 real_alarm_handler(sig, sc);
106 set_signals(enabled);
107}
108
109extern void do_boot_timer_handler(struct sigcontext * sc);
110
111void boot_timer_handler(ARCH_SIGHDLR_PARAM)
112{
113 struct sigcontext *sc;
114 int enabled;
115
116 ARCH_GET_SIGCONTEXT(sc, sig);
117
118 enabled = signals_enabled;
119 if(!enabled){
120 if(sig == SIGVTALRM)
121 pending |= SIGVTALRM_MASK;
122 else pending |= SIGALRM_MASK;
123 return;
124 }
125
126 block_signals();
127
128 do_boot_timer_handler(sc);
129 set_signals(enabled);
50} 130}
51 131
52void set_sigstack(void *sig_stack, int size) 132void set_sigstack(void *sig_stack, int size)
@@ -73,6 +153,7 @@ void set_handler(int sig, void (*handler)(int), int flags, ...)
73{ 153{
74 struct sigaction action; 154 struct sigaction action;
75 va_list ap; 155 va_list ap;
156 sigset_t sig_mask;
76 int mask; 157 int mask;
77 158
78 va_start(ap, flags); 159 va_start(ap, flags);
@@ -85,7 +166,12 @@ void set_handler(int sig, void (*handler)(int), int flags, ...)
85 action.sa_flags = flags; 166 action.sa_flags = flags;
86 action.sa_restorer = NULL; 167 action.sa_restorer = NULL;
87 if(sigaction(sig, &action, NULL) < 0) 168 if(sigaction(sig, &action, NULL) < 0)
88 panic("sigaction failed"); 169 panic("sigaction failed - errno = %d\n", errno);
170
171 sigemptyset(&sig_mask);
172 sigaddset(&sig_mask, sig);
173 if(sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
174 panic("sigprocmask failed - errno = %d\n", errno);
89} 175}
90 176
91int change_sig(int signal, int on) 177int change_sig(int signal, int on)
@@ -98,89 +184,77 @@ int change_sig(int signal, int on)
98 return(!sigismember(&old, signal)); 184 return(!sigismember(&old, signal));
99} 185}
100 186
101/* Both here and in set/get_signal we don't touch SIGPROF, because we must not
102 * disable profiling; it's safe because the profiling code does not interact
103 * with the kernel code at all.*/
104
105static void change_signals(int type)
106{
107 sigset_t mask;
108
109 sigemptyset(&mask);
110 sigaddset(&mask, SIGVTALRM);
111 sigaddset(&mask, SIGALRM);
112 sigaddset(&mask, SIGIO);
113 if(sigprocmask(type, &mask, NULL) < 0)
114 panic("Failed to change signal mask - errno = %d", errno);
115}
116
117void block_signals(void) 187void block_signals(void)
118{ 188{
119 change_signals(SIG_BLOCK); 189 signals_enabled = 0;
120} 190}
121 191
122void unblock_signals(void) 192void unblock_signals(void)
123{ 193{
124 change_signals(SIG_UNBLOCK); 194 int save_pending;
125}
126 195
127/* These are the asynchronous signals. SIGVTALRM and SIGARLM are handled 196 if(signals_enabled == 1)
128 * together under SIGVTALRM_BIT. SIGPROF is excluded because we want to 197 return;
129 * be able to profile all of UML, not just the non-critical sections. If
130 * profiling is not thread-safe, then that is not my problem. We can disable
131 * profiling when SMP is enabled in that case.
132 */
133#define SIGIO_BIT 0
134#define SIGVTALRM_BIT 1
135 198
136static int enable_mask(sigset_t *mask) 199 /* We loop because the IRQ handler returns with interrupts off. So,
137{ 200 * interrupts may have arrived and we need to re-enable them and
138 int sigs; 201 * recheck pending.
202 */
203 while(1){
204 /* Save and reset save_pending after enabling signals. This
205 * way, pending won't be changed while we're reading it.
206 */
207 signals_enabled = 1;
208
209 save_pending = pending;
210 if(save_pending == 0)
211 return;
212
213 pending = 0;
214
215 /* We have pending interrupts, so disable signals, as the
216 * handlers expect them off when they are called. They will
217 * be enabled again above.
218 */
219
220 signals_enabled = 0;
139 221
140 sigs = sigismember(mask, SIGIO) ? 0 : 1 << SIGIO_BIT; 222 /* Deal with SIGIO first because the alarm handler might
141 sigs |= sigismember(mask, SIGVTALRM) ? 0 : 1 << SIGVTALRM_BIT; 223 * schedule, leaving the pending SIGIO stranded until we come
142 sigs |= sigismember(mask, SIGALRM) ? 0 : 1 << SIGVTALRM_BIT; 224 * back here.
143 return(sigs); 225 */
226 if(save_pending & SIGIO_MASK)
227 CHOOSE_MODE_PROC(sig_handler_common_tt,
228 sig_handler_common_skas, SIGIO, NULL);
229
230 if(save_pending & SIGALRM_MASK)
231 real_alarm_handler(SIGALRM, NULL);
232
233 if(save_pending & SIGVTALRM_MASK)
234 real_alarm_handler(SIGVTALRM, NULL);
235 }
144} 236}
145 237
146int get_signals(void) 238int get_signals(void)
147{ 239{
148 sigset_t mask; 240 return signals_enabled;
149
150 if(sigprocmask(SIG_SETMASK, NULL, &mask) < 0)
151 panic("Failed to get signal mask");
152 return(enable_mask(&mask));
153} 241}
154 242
155int set_signals(int enable) 243int set_signals(int enable)
156{ 244{
157 sigset_t mask;
158 int ret; 245 int ret;
246 if(signals_enabled == enable)
247 return enable;
159 248
160 sigemptyset(&mask); 249 ret = signals_enabled;
161 if(enable & (1 << SIGIO_BIT)) 250 if(enable)
162 sigaddset(&mask, SIGIO); 251 unblock_signals();
163 if(enable & (1 << SIGVTALRM_BIT)){ 252 else block_signals();
164 sigaddset(&mask, SIGVTALRM);
165 sigaddset(&mask, SIGALRM);
166 }
167 253
168 /* This is safe - sigprocmask is guaranteed to copy locally the 254 return ret;
169 * value of new_set, do his work and then, at the end, write to 255}
170 * old_set.
171 */
172 if(sigprocmask(SIG_UNBLOCK, &mask, &mask) < 0)
173 panic("Failed to enable signals");
174 ret = enable_mask(&mask);
175 sigemptyset(&mask);
176 if((enable & (1 << SIGIO_BIT)) == 0)
177 sigaddset(&mask, SIGIO);
178 if((enable & (1 << SIGVTALRM_BIT)) == 0){
179 sigaddset(&mask, SIGVTALRM);
180 sigaddset(&mask, SIGALRM);
181 }
182 if(sigprocmask(SIG_BLOCK, &mask, NULL) < 0)
183 panic("Failed to block signals");
184 256
185 return(ret); 257void os_usr1_signal(int on)
258{
259 change_sig(SIGUSR1, on);
186} 260}
diff --git a/arch/um/os-Linux/skas/Makefile b/arch/um/os-Linux/skas/Makefile
index eab5386d60a..5fd8d4dad66 100644
--- a/arch/um/os-Linux/skas/Makefile
+++ b/arch/um/os-Linux/skas/Makefile
@@ -3,8 +3,8 @@
3# Licensed under the GPL 3# Licensed under the GPL
4# 4#
5 5
6obj-y := trap.o 6obj-y := mem.o process.o trap.o
7 7
8USER_OBJS := trap.o 8USER_OBJS := mem.o process.o trap.o
9 9
10include arch/um/scripts/Makefile.rules 10include arch/um/scripts/Makefile.rules
diff --git a/arch/um/kernel/skas/mem_user.c b/arch/um/os-Linux/skas/mem.c
index 1d89640bd50..9890e9090f5 100644
--- a/arch/um/kernel/skas/mem_user.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -32,7 +32,7 @@ extern void wait_stub_done(int pid, int sig, char * fname);
32static inline unsigned long *check_init_stack(struct mm_id * mm_idp, 32static inline unsigned long *check_init_stack(struct mm_id * mm_idp,
33 unsigned long *stack) 33 unsigned long *stack)
34{ 34{
35 if(stack == NULL){ 35 if(stack == NULL) {
36 stack = (unsigned long *) mm_idp->stack + 2; 36 stack = (unsigned long *) mm_idp->stack + 2;
37 *stack = 0; 37 *stack = 0;
38 } 38 }
@@ -45,13 +45,14 @@ int single_count = 0;
45int multi_count = 0; 45int multi_count = 0;
46int multi_op_count = 0; 46int multi_op_count = 0;
47 47
48static long do_syscall_stub(struct mm_id *mm_idp, void **addr) 48static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
49{ 49{
50 unsigned long regs[MAX_REG_NR]; 50 unsigned long regs[MAX_REG_NR];
51 unsigned long *data; 51 int n;
52 unsigned long *syscall;
53 long ret, offset; 52 long ret, offset;
54 int n, pid = mm_idp->u.pid; 53 unsigned long * data;
54 unsigned long * syscall;
55 int pid = mm_idp->u.pid;
55 56
56 if(proc_mm) 57 if(proc_mm)
57#warning Need to look up userspace_pid by cpu 58#warning Need to look up userspace_pid by cpu
@@ -59,10 +60,11 @@ static long do_syscall_stub(struct mm_id *mm_idp, void **addr)
59 60
60 multi_count++; 61 multi_count++;
61 62
62 get_safe_registers(regs); 63 get_safe_registers(regs);
63 regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE + 64 regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
64 ((unsigned long) &batch_syscall_stub - 65 ((unsigned long) &batch_syscall_stub -
65 (unsigned long) &__syscall_stub_start); 66 (unsigned long) &__syscall_stub_start);
67
66 n = ptrace_setregs(pid, regs); 68 n = ptrace_setregs(pid, regs);
67 if(n < 0) 69 if(n < 0)
68 panic("do_syscall_stub : PTRACE_SETREGS failed, errno = %d\n", 70 panic("do_syscall_stub : PTRACE_SETREGS failed, errno = %d\n",
@@ -80,6 +82,8 @@ static long do_syscall_stub(struct mm_id *mm_idp, void **addr)
80 if (offset) { 82 if (offset) {
81 data = (unsigned long *)(mm_idp->stack + 83 data = (unsigned long *)(mm_idp->stack +
82 offset - UML_CONFIG_STUB_DATA); 84 offset - UML_CONFIG_STUB_DATA);
85 printk("do_syscall_stub : ret = %d, offset = %d, "
86 "data = 0x%x\n", ret, offset, data);
83 syscall = (unsigned long *)((unsigned long)data + data[0]); 87 syscall = (unsigned long *)((unsigned long)data + data[0]);
84 printk("do_syscall_stub: syscall %ld failed, return value = " 88 printk("do_syscall_stub: syscall %ld failed, return value = "
85 "0x%lx, expected return value = 0x%lx\n", 89 "0x%lx, expected return value = 0x%lx\n",
@@ -107,32 +111,32 @@ static long do_syscall_stub(struct mm_id *mm_idp, void **addr)
107 111
108long run_syscall_stub(struct mm_id * mm_idp, int syscall, 112long run_syscall_stub(struct mm_id * mm_idp, int syscall,
109 unsigned long *args, long expected, void **addr, 113 unsigned long *args, long expected, void **addr,
110 int done) 114 int done)
111{ 115{
112 unsigned long *stack = check_init_stack(mm_idp, *addr); 116 unsigned long *stack = check_init_stack(mm_idp, *addr);
113 117
114 if(done && *addr == NULL) 118 if(done && *addr == NULL)
115 single_count++; 119 single_count++;
116 120
117 *stack += sizeof(long); 121 *stack += sizeof(long);
118 stack += *stack / sizeof(long); 122 stack += *stack / sizeof(long);
119 123
120 *stack++ = syscall; 124 *stack++ = syscall;
121 *stack++ = args[0]; 125 *stack++ = args[0];
122 *stack++ = args[1]; 126 *stack++ = args[1];
123 *stack++ = args[2]; 127 *stack++ = args[2];
124 *stack++ = args[3]; 128 *stack++ = args[3];
125 *stack++ = args[4]; 129 *stack++ = args[4];
126 *stack++ = args[5]; 130 *stack++ = args[5];
127 *stack++ = expected; 131 *stack++ = expected;
128 *stack = 0; 132 *stack = 0;
129 multi_op_count++; 133 multi_op_count++;
130 134
131 if(!done && ((((unsigned long) stack) & ~PAGE_MASK) < 135 if(!done && ((((unsigned long) stack) & ~PAGE_MASK) <
132 PAGE_SIZE - 10 * sizeof(long))){ 136 PAGE_SIZE - 10 * sizeof(long))){
133 *addr = stack; 137 *addr = stack;
134 return 0; 138 return 0;
135 } 139 }
136 140
137 return do_syscall_stub(mm_idp, addr); 141 return do_syscall_stub(mm_idp, addr);
138} 142}
@@ -150,7 +154,7 @@ long syscall_stub_data(struct mm_id * mm_idp,
150 if((((unsigned long) *addr) & ~PAGE_MASK) >= 154 if((((unsigned long) *addr) & ~PAGE_MASK) >=
151 PAGE_SIZE - (10 + data_count) * sizeof(long)) { 155 PAGE_SIZE - (10 + data_count) * sizeof(long)) {
152 ret = do_syscall_stub(mm_idp, addr); 156 ret = do_syscall_stub(mm_idp, addr);
153 /* in case of error, don't overwrite data on stack */ 157 /* in case of error, don't overwrite data on stack */
154 if(ret) 158 if(ret)
155 return ret; 159 return ret;
156 } 160 }
@@ -172,39 +176,39 @@ int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len,
172 int r, int w, int x, int phys_fd, unsigned long long offset, 176 int r, int w, int x, int phys_fd, unsigned long long offset,
173 int done, void **data) 177 int done, void **data)
174{ 178{
175 int prot, ret; 179 int prot, ret;
176 180
177 prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | 181 prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
178 (x ? PROT_EXEC : 0); 182 (x ? PROT_EXEC : 0);
179 183
180 if(proc_mm){ 184 if(proc_mm){
181 struct proc_mm_op map; 185 struct proc_mm_op map;
182 int fd = mm_idp->u.mm_fd; 186 int fd = mm_idp->u.mm_fd;
183 187
184 map = ((struct proc_mm_op) { .op = MM_MMAP, 188 map = ((struct proc_mm_op) { .op = MM_MMAP,
185 .u = 189 .u =
186 { .mmap = 190 { .mmap =
187 { .addr = virt, 191 { .addr = virt,
188 .len = len, 192 .len = len,
189 .prot = prot, 193 .prot = prot,
190 .flags = MAP_SHARED | 194 .flags = MAP_SHARED |
191 MAP_FIXED, 195 MAP_FIXED,
192 .fd = phys_fd, 196 .fd = phys_fd,
193 .offset= offset 197 .offset= offset
194 } } } ); 198 } } } );
195 ret = os_write_file(fd, &map, sizeof(map)); 199 ret = os_write_file(fd, &map, sizeof(map));
196 if(ret != sizeof(map)) 200 if(ret != sizeof(map))
197 printk("map : /proc/mm map failed, err = %d\n", -ret); 201 printk("map : /proc/mm map failed, err = %d\n", -ret);
198 else ret = 0; 202 else ret = 0;
199 } 203 }
200 else { 204 else {
201 unsigned long args[] = { virt, len, prot, 205 unsigned long args[] = { virt, len, prot,
202 MAP_SHARED | MAP_FIXED, phys_fd, 206 MAP_SHARED | MAP_FIXED, phys_fd,
203 MMAP_OFFSET(offset) }; 207 MMAP_OFFSET(offset) };
204 208
205 ret = run_syscall_stub(mm_idp, STUB_MMAP_NR, args, virt, 209 ret = run_syscall_stub(mm_idp, STUB_MMAP_NR, args, virt,
206 data, done); 210 data, done);
207 } 211 }
208 212
209 return ret; 213 return ret;
210} 214}
@@ -212,68 +216,66 @@ int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len,
212int unmap(struct mm_id * mm_idp, void *addr, unsigned long len, int done, 216int unmap(struct mm_id * mm_idp, void *addr, unsigned long len, int done,
213 void **data) 217 void **data)
214{ 218{
215 int ret; 219 int ret;
216 220
217 if(proc_mm){ 221 if(proc_mm){
218 struct proc_mm_op unmap; 222 struct proc_mm_op unmap;
219 int fd = mm_idp->u.mm_fd; 223 int fd = mm_idp->u.mm_fd;
220 224
221 unmap = ((struct proc_mm_op) { .op = MM_MUNMAP, 225 unmap = ((struct proc_mm_op) { .op = MM_MUNMAP,
222 .u = 226 .u =
223 { .munmap = 227 { .munmap =
224 { .addr = 228 { .addr =
225 (unsigned long) addr, 229 (unsigned long) addr,
226 .len = len } } } ); 230 .len = len } } } );
227 ret = os_write_file(fd, &unmap, sizeof(unmap)); 231 ret = os_write_file(fd, &unmap, sizeof(unmap));
228 if(ret != sizeof(unmap)) 232 if(ret != sizeof(unmap))
229 printk("unmap - proc_mm write returned %d\n", ret); 233 printk("unmap - proc_mm write returned %d\n", ret);
230 else ret = 0; 234 else ret = 0;
231 } 235 }
232 else { 236 else {
233 unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0, 237 unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
234 0 }; 238 0 };
235 239
236 ret = run_syscall_stub(mm_idp, __NR_munmap, args, 0, 240 ret = run_syscall_stub(mm_idp, __NR_munmap, args, 0,
237 data, done); 241 data, done);
238 if(ret < 0) 242 }
239 printk("munmap stub failed, errno = %d\n", ret);
240 }
241 243
242 return ret; 244 return ret;
243} 245}
244 246
245int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len, 247int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
246 int r, int w, int x, int done, void **data) 248 int r, int w, int x, int done, void **data)
247{ 249{
248 struct proc_mm_op protect; 250 struct proc_mm_op protect;
249 int prot, ret; 251 int prot, ret;
250 252
251 prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | 253 prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
252 (x ? PROT_EXEC : 0); 254 (x ? PROT_EXEC : 0);
253 255 if(proc_mm){
254 if(proc_mm){ 256 int fd = mm_idp->u.mm_fd;
255 int fd = mm_idp->u.mm_fd; 257
256 protect = ((struct proc_mm_op) { .op = MM_MPROTECT, 258 protect = ((struct proc_mm_op) { .op = MM_MPROTECT,
257 .u = 259 .u =
258 { .mprotect = 260 { .mprotect =
259 { .addr = 261 { .addr =
260 (unsigned long) addr, 262 (unsigned long) addr,
261 .len = len, 263 .len = len,
262 .prot = prot } } } ); 264 .prot = prot } } } );
263 265
264 ret = os_write_file(fd, &protect, sizeof(protect)); 266 ret = os_write_file(fd, &protect, sizeof(protect));
265 if(ret != sizeof(protect)) 267 if(ret != sizeof(protect))
266 printk("protect failed, err = %d", -ret); 268 printk("protect failed, err = %d", -ret);
267 else ret = 0; 269 else ret = 0;
268 } 270 }
269 else { 271 else {
270 unsigned long args[] = { addr, len, prot, 0, 0, 0 }; 272 unsigned long args[] = { addr, len, prot, 0, 0, 0 };
271 273
272 ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0, 274 ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0,
273 data, done); 275 data, done);
274 } 276 }
275 277
276 return ret; 278 return ret;
277} 279}
278 280
279void before_mem_skas(unsigned long unused) 281void before_mem_skas(unsigned long unused)
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
new file mode 100644
index 00000000000..120a21c5883
--- /dev/null
+++ b/arch/um/os-Linux/skas/process.c
@@ -0,0 +1,566 @@
1/*
2 * Copyright (C) 2002- 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#include <stdlib.h>
7#include <string.h>
8#include <unistd.h>
9#include <errno.h>
10#include <signal.h>
11#include <setjmp.h>
12#include <sched.h>
13#include "ptrace_user.h"
14#include <sys/wait.h>
15#include <sys/mman.h>
16#include <sys/user.h>
17#include <sys/time.h>
18#include <asm/unistd.h>
19#include <asm/types.h>
20#include "user.h"
21#include "sysdep/ptrace.h"
22#include "user_util.h"
23#include "kern_util.h"
24#include "skas.h"
25#include "stub-data.h"
26#include "mm_id.h"
27#include "sysdep/sigcontext.h"
28#include "sysdep/stub.h"
29#include "os.h"
30#include "proc_mm.h"
31#include "skas_ptrace.h"
32#include "chan_user.h"
33#include "registers.h"
34#include "mem.h"
35#include "uml-config.h"
36#include "process.h"
37#include "longjmp.h"
38
39int is_skas_winch(int pid, int fd, void *data)
40{
41 if(pid != os_getpgrp())
42 return(0);
43
44 register_winch_irq(-1, fd, -1, data);
45 return(1);
46}
47
48void wait_stub_done(int pid, int sig, char * fname)
49{
50 int n, status, err;
51
52 do {
53 if ( sig != -1 ) {
54 err = ptrace(PTRACE_CONT, pid, 0, sig);
55 if(err)
56 panic("%s : continue failed, errno = %d\n",
57 fname, errno);
58 }
59 sig = 0;
60
61 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
62 } while((n >= 0) && WIFSTOPPED(status) &&
63 ((WSTOPSIG(status) == SIGVTALRM) ||
64 /* running UML inside a detached screen can cause
65 * SIGWINCHes
66 */
67 (WSTOPSIG(status) == SIGWINCH)));
68
69 if((n < 0) || !WIFSTOPPED(status) ||
70 (WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){
71 unsigned long regs[HOST_FRAME_SIZE];
72
73 if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
74 printk("Failed to get registers from stub, "
75 "errno = %d\n", errno);
76 else {
77 int i;
78
79 printk("Stub registers -\n");
80 for(i = 0; i < HOST_FRAME_SIZE; i++)
81 printk("\t%d - %lx\n", i, regs[i]);
82 }
83 panic("%s : failed to wait for SIGUSR1/SIGTRAP, "
84 "pid = %d, n = %d, errno = %d, status = 0x%x\n",
85 fname, pid, n, errno, status);
86 }
87}
88
89extern unsigned long current_stub_stack(void);
90
91void get_skas_faultinfo(int pid, struct faultinfo * fi)
92{
93 int err;
94
95 if(ptrace_faultinfo){
96 err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
97 if(err)
98 panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, "
99 "errno = %d\n", errno);
100
101 /* Special handling for i386, which has different structs */
102 if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
103 memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
104 sizeof(struct faultinfo) -
105 sizeof(struct ptrace_faultinfo));
106 }
107 else {
108 wait_stub_done(pid, SIGSEGV, "get_skas_faultinfo");
109
110 /* faultinfo is prepared by the stub-segv-handler at start of
111 * the stub stack page. We just have to copy it.
112 */
113 memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
114 }
115}
116
117static void handle_segv(int pid, union uml_pt_regs * regs)
118{
119 get_skas_faultinfo(pid, &regs->skas.faultinfo);
120 segv(regs->skas.faultinfo, 0, 1, NULL);
121}
122
123/*To use the same value of using_sysemu as the caller, ask it that value (in local_using_sysemu)*/
124static void handle_trap(int pid, union uml_pt_regs *regs, int local_using_sysemu)
125{
126 int err, status;
127
128 /* Mark this as a syscall */
129 UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->skas.regs);
130
131 if (!local_using_sysemu)
132 {
133 err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET,
134 __NR_getpid);
135 if(err < 0)
136 panic("handle_trap - nullifying syscall failed errno = %d\n",
137 errno);
138
139 err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
140 if(err < 0)
141 panic("handle_trap - continuing to end of syscall failed, "
142 "errno = %d\n", errno);
143
144 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
145 if((err < 0) || !WIFSTOPPED(status) ||
146 (WSTOPSIG(status) != SIGTRAP + 0x80))
147 panic("handle_trap - failed to wait at end of syscall, "
148 "errno = %d, status = %d\n", errno, status);
149 }
150
151 handle_syscall(regs);
152}
153
154extern int __syscall_stub_start;
155
156static int userspace_tramp(void *stack)
157{
158 void *addr;
159
160 ptrace(PTRACE_TRACEME, 0, 0, 0);
161
162 init_new_thread_signals(1);
163 enable_timer();
164
165 if(!proc_mm){
166 /* This has a pte, but it can't be mapped in with the usual
167 * tlb_flush mechanism because this is part of that mechanism
168 */
169 int fd;
170 __u64 offset;
171 fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
172 addr = mmap64((void *) UML_CONFIG_STUB_CODE, page_size(),
173 PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
174 if(addr == MAP_FAILED){
175 printk("mapping mmap stub failed, errno = %d\n",
176 errno);
177 exit(1);
178 }
179
180 if(stack != NULL){
181 fd = phys_mapping(to_phys(stack), &offset);
182 addr = mmap((void *) UML_CONFIG_STUB_DATA, page_size(),
183 PROT_READ | PROT_WRITE,
184 MAP_FIXED | MAP_SHARED, fd, offset);
185 if(addr == MAP_FAILED){
186 printk("mapping segfault stack failed, "
187 "errno = %d\n", errno);
188 exit(1);
189 }
190 }
191 }
192 if(!ptrace_faultinfo && (stack != NULL)){
193 unsigned long v = UML_CONFIG_STUB_CODE +
194 (unsigned long) stub_segv_handler -
195 (unsigned long) &__syscall_stub_start;
196
197 set_sigstack((void *) UML_CONFIG_STUB_DATA, page_size());
198 set_handler(SIGSEGV, (void *) v, SA_ONSTACK,
199 SIGIO, SIGWINCH, SIGALRM, SIGVTALRM,
200 SIGUSR1, -1);
201 }
202
203 os_stop_process(os_getpid());
204 return(0);
205}
206
207/* Each element set once, and only accessed by a single processor anyway */
208#undef NR_CPUS
209#define NR_CPUS 1
210int userspace_pid[NR_CPUS];
211
212int start_userspace(unsigned long stub_stack)
213{
214 void *stack;
215 unsigned long sp;
216 int pid, status, n, flags;
217
218 stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
219 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
220 if(stack == MAP_FAILED)
221 panic("start_userspace : mmap failed, errno = %d", errno);
222 sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
223
224 flags = CLONE_FILES | SIGCHLD;
225 if(proc_mm) flags |= CLONE_VM;
226 pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
227 if(pid < 0)
228 panic("start_userspace : clone failed, errno = %d", errno);
229
230 do {
231 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
232 if(n < 0)
233 panic("start_userspace : wait failed, errno = %d",
234 errno);
235 } while(WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
236
237 if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP))
238 panic("start_userspace : expected SIGSTOP, got status = %d",
239 status);
240
241 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, (void *)PTRACE_O_TRACESYSGOOD) < 0)
242 panic("start_userspace : PTRACE_OLDSETOPTIONS failed, errno=%d\n",
243 errno);
244
245 if(munmap(stack, PAGE_SIZE) < 0)
246 panic("start_userspace : munmap failed, errno = %d\n", errno);
247
248 return(pid);
249}
250
251void userspace(union uml_pt_regs *regs)
252{
253 int err, status, op, pid = userspace_pid[0];
254 int local_using_sysemu; /*To prevent races if using_sysemu changes under us.*/
255
256 while(1){
257 restore_registers(pid, regs);
258
259 /* Now we set local_using_sysemu to be used for one loop */
260 local_using_sysemu = get_using_sysemu();
261
262 op = SELECT_PTRACE_OPERATION(local_using_sysemu, singlestepping(NULL));
263
264 err = ptrace(op, pid, 0, 0);
265 if(err)
266 panic("userspace - could not resume userspace process, "
267 "pid=%d, ptrace operation = %d, errno = %d\n",
268 op, errno);
269
270 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
271 if(err < 0)
272 panic("userspace - waitpid failed, errno = %d\n",
273 errno);
274
275 regs->skas.is_user = 1;
276 save_registers(pid, regs);
277 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
278
279 if(WIFSTOPPED(status)){
280 switch(WSTOPSIG(status)){
281 case SIGSEGV:
282 if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo)
283 user_signal(SIGSEGV, regs, pid);
284 else handle_segv(pid, regs);
285 break;
286 case SIGTRAP + 0x80:
287 handle_trap(pid, regs, local_using_sysemu);
288 break;
289 case SIGTRAP:
290 relay_signal(SIGTRAP, regs);
291 break;
292 case SIGIO:
293 case SIGVTALRM:
294 case SIGILL:
295 case SIGBUS:
296 case SIGFPE:
297 case SIGWINCH:
298 user_signal(WSTOPSIG(status), regs, pid);
299 break;
300 default:
301 printk("userspace - child stopped with signal "
302 "%d\n", WSTOPSIG(status));
303 }
304 pid = userspace_pid[0];
305 interrupt_end();
306
307 /* Avoid -ERESTARTSYS handling in host */
308 if(PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
309 PT_SYSCALL_NR(regs->skas.regs) = -1;
310 }
311 }
312}
313#define INIT_JMP_NEW_THREAD 0
314#define INIT_JMP_REMOVE_SIGSTACK 1
315#define INIT_JMP_CALLBACK 2
316#define INIT_JMP_HALT 3
317#define INIT_JMP_REBOOT 4
318
319int copy_context_skas0(unsigned long new_stack, int pid)
320{
321 int err;
322 unsigned long regs[MAX_REG_NR];
323 unsigned long current_stack = current_stub_stack();
324 struct stub_data *data = (struct stub_data *) current_stack;
325 struct stub_data *child_data = (struct stub_data *) new_stack;
326 __u64 new_offset;
327 int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
328
329 /* prepare offset and fd of child's stack as argument for parent's
330 * and child's mmap2 calls
331 */
332 *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset),
333 .fd = new_fd,
334 .timer = ((struct itimerval)
335 { { 0, 1000000 / hz() },
336 { 0, 1000000 / hz() }})});
337 get_safe_registers(regs);
338
339 /* Set parent's instruction pointer to start of clone-stub */
340 regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
341 (unsigned long) stub_clone_handler -
342 (unsigned long) &__syscall_stub_start;
343 regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA + PAGE_SIZE -
344 sizeof(void *);
345#ifdef __SIGNAL_FRAMESIZE
346 regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
347#endif
348 err = ptrace_setregs(pid, regs);
349 if(err < 0)
350 panic("copy_context_skas0 : PTRACE_SETREGS failed, "
351 "pid = %d, errno = %d\n", pid, errno);
352
353 /* set a well known return code for detection of child write failure */
354 child_data->err = 12345678;
355
356 /* Wait, until parent has finished its work: read child's pid from
357 * parent's stack, and check, if bad result.
358 */
359 wait_stub_done(pid, 0, "copy_context_skas0");
360
361 pid = data->err;
362 if(pid < 0)
363 panic("copy_context_skas0 - stub-parent reports error %d\n",
364 pid);
365
366 /* Wait, until child has finished too: read child's result from
367 * child's stack and check it.
368 */
369 wait_stub_done(pid, -1, "copy_context_skas0");
370 if (child_data->err != UML_CONFIG_STUB_DATA)
371 panic("copy_context_skas0 - stub-child reports error %d\n",
372 child_data->err);
373
374 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
375 (void *)PTRACE_O_TRACESYSGOOD) < 0)
376 panic("copy_context_skas0 : PTRACE_OLDSETOPTIONS failed, "
377 "errno = %d\n", errno);
378
379 return pid;
380}
381
382/*
383 * This is used only, if stub pages are needed, while proc_mm is
384 * availabl. Opening /proc/mm creates a new mm_context, which lacks
385 * the stub-pages. Thus, we map them using /proc/mm-fd
386 */
387void map_stub_pages(int fd, unsigned long code,
388 unsigned long data, unsigned long stack)
389{
390 struct proc_mm_op mmop;
391 int n;
392 __u64 code_offset;
393 int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
394 &code_offset);
395
396 mmop = ((struct proc_mm_op) { .op = MM_MMAP,
397 .u =
398 { .mmap =
399 { .addr = code,
400 .len = PAGE_SIZE,
401 .prot = PROT_EXEC,
402 .flags = MAP_FIXED | MAP_PRIVATE,
403 .fd = code_fd,
404 .offset = code_offset
405 } } });
406 n = os_write_file(fd, &mmop, sizeof(mmop));
407 if(n != sizeof(mmop))
408 panic("map_stub_pages : /proc/mm map for code failed, "
409 "err = %d\n", -n);
410
411 if ( stack ) {
412 __u64 map_offset;
413 int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
414 mmop = ((struct proc_mm_op)
415 { .op = MM_MMAP,
416 .u =
417 { .mmap =
418 { .addr = data,
419 .len = PAGE_SIZE,
420 .prot = PROT_READ | PROT_WRITE,
421 .flags = MAP_FIXED | MAP_SHARED,
422 .fd = map_fd,
423 .offset = map_offset
424 } } });
425 n = os_write_file(fd, &mmop, sizeof(mmop));
426 if(n != sizeof(mmop))
427 panic("map_stub_pages : /proc/mm map for data failed, "
428 "err = %d\n", -n);
429 }
430}
431
432void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
433 void (*handler)(int))
434{
435 unsigned long flags;
436 sigjmp_buf switch_buf, fork_buf;
437 int enable;
438
439 *switch_buf_ptr = &switch_buf;
440 *fork_buf_ptr = &fork_buf;
441
442 /* Somewhat subtle - siglongjmp restores the signal mask before doing
443 * the longjmp. This means that when jumping from one stack to another
444 * when the target stack has interrupts enabled, an interrupt may occur
445 * on the source stack. This is bad when starting up a process because
446 * it's not supposed to get timer ticks until it has been scheduled.
447 * So, we disable interrupts around the sigsetjmp to ensure that
448 * they can't happen until we get back here where they are safe.
449 */
450 flags = get_signals();
451 block_signals();
452 if(UML_SIGSETJMP(&fork_buf, enable) == 0)
453 new_thread_proc(stack, handler);
454
455 remove_sigstack();
456
457 set_signals(flags);
458}
459
460void thread_wait(void *sw, void *fb)
461{
462 sigjmp_buf buf, **switch_buf = sw, *fork_buf;
463 int enable;
464
465 *switch_buf = &buf;
466 fork_buf = fb;
467 if(UML_SIGSETJMP(&buf, enable) == 0)
468 siglongjmp(*fork_buf, INIT_JMP_REMOVE_SIGSTACK);
469}
470
471void switch_threads(void *me, void *next)
472{
473 sigjmp_buf my_buf, **me_ptr = me, *next_buf = next;
474 int enable;
475
476 *me_ptr = &my_buf;
477 if(UML_SIGSETJMP(&my_buf, enable) == 0)
478 UML_SIGLONGJMP(next_buf, 1);
479}
480
481static sigjmp_buf initial_jmpbuf;
482
483/* XXX Make these percpu */
484static void (*cb_proc)(void *arg);
485static void *cb_arg;
486static sigjmp_buf *cb_back;
487
488int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr)
489{
490 sigjmp_buf **switch_buf = switch_buf_ptr;
491 int n, enable;
492
493 set_handler(SIGWINCH, (__sighandler_t) sig_handler,
494 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGALRM,
495 SIGVTALRM, -1);
496
497 *fork_buf_ptr = &initial_jmpbuf;
498 n = UML_SIGSETJMP(&initial_jmpbuf, enable);
499 switch(n){
500 case INIT_JMP_NEW_THREAD:
501 new_thread_proc((void *) stack, new_thread_handler);
502 break;
503 case INIT_JMP_REMOVE_SIGSTACK:
504 remove_sigstack();
505 break;
506 case INIT_JMP_CALLBACK:
507 (*cb_proc)(cb_arg);
508 UML_SIGLONGJMP(cb_back, 1);
509 break;
510 case INIT_JMP_HALT:
511 kmalloc_ok = 0;
512 return(0);
513 case INIT_JMP_REBOOT:
514 kmalloc_ok = 0;
515 return(1);
516 default:
517 panic("Bad sigsetjmp return in start_idle_thread - %d\n", n);
518 }
519 UML_SIGLONGJMP(*switch_buf, 1);
520}
521
522void initial_thread_cb_skas(void (*proc)(void *), void *arg)
523{
524 sigjmp_buf here;
525 int enable;
526
527 cb_proc = proc;
528 cb_arg = arg;
529 cb_back = &here;
530
531 block_signals();
532 if(UML_SIGSETJMP(&here, enable) == 0)
533 UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
534 unblock_signals();
535
536 cb_proc = NULL;
537 cb_arg = NULL;
538 cb_back = NULL;
539}
540
541void halt_skas(void)
542{
543 block_signals();
544 UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
545}
546
547void reboot_skas(void)
548{
549 block_signals();
550 UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
551}
552
553void switch_mm_skas(struct mm_id *mm_idp)
554{
555 int err;
556
557#warning need cpu pid in switch_mm_skas
558 if(proc_mm){
559 err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
560 mm_idp->u.mm_fd);
561 if(err)
562 panic("switch_mm_skas - PTRACE_SWITCH_MM failed, "
563 "errno = %d\n", errno);
564 }
565 else userspace_pid[0] = mm_idp->u.pid;
566}
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index b47e5e71d1a..6c5b17ed59e 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -29,7 +29,6 @@
29#include "irq_user.h" 29#include "irq_user.h"
30#include "ptrace_user.h" 30#include "ptrace_user.h"
31#include "mem_user.h" 31#include "mem_user.h"
32#include "time_user.h"
33#include "init.h" 32#include "init.h"
34#include "os.h" 33#include "os.h"
35#include "uml-config.h" 34#include "uml-config.h"
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index cf30a39bc48..6f7626775ac 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -1,21 +1,128 @@
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include <stdio.h>
1#include <stdlib.h> 7#include <stdlib.h>
8#include <unistd.h>
9#include <time.h>
2#include <sys/time.h> 10#include <sys/time.h>
11#include <signal.h>
12#include <errno.h>
13#include "user_util.h"
14#include "kern_util.h"
15#include "user.h"
16#include "process.h"
17#include "kern_constants.h"
18#include "os.h"
19
20/* XXX This really needs to be declared and initialized in a kernel file since
21 * it's in <linux/time.h>
22 */
23extern struct timespec wall_to_monotonic;
24
25static void set_interval(int timer_type)
26{
27 int usec = 1000000/hz();
28 struct itimerval interval = ((struct itimerval) { { 0, usec },
29 { 0, usec } });
30
31 if(setitimer(timer_type, &interval, NULL) == -1)
32 panic("setitimer failed - errno = %d\n", errno);
33}
34
35void enable_timer(void)
36{
37 set_interval(ITIMER_VIRTUAL);
38}
39
40void disable_timer(void)
41{
42 struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
43 if((setitimer(ITIMER_VIRTUAL, &disable, NULL) < 0) ||
44 (setitimer(ITIMER_REAL, &disable, NULL) < 0))
45 printk("disnable_timer - setitimer failed, errno = %d\n",
46 errno);
47 /* If there are signals already queued, after unblocking ignore them */
48 set_handler(SIGALRM, SIG_IGN, 0, -1);
49 set_handler(SIGVTALRM, SIG_IGN, 0, -1);
50}
51
52void switch_timers(int to_real)
53{
54 struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
55 struct itimerval enable = ((struct itimerval) { { 0, 1000000/hz() },
56 { 0, 1000000/hz() }});
57 int old, new;
58
59 if(to_real){
60 old = ITIMER_VIRTUAL;
61 new = ITIMER_REAL;
62 }
63 else {
64 old = ITIMER_REAL;
65 new = ITIMER_VIRTUAL;
66 }
67
68 if((setitimer(old, &disable, NULL) < 0) ||
69 (setitimer(new, &enable, NULL)))
70 printk("switch_timers - setitimer failed, errno = %d\n",
71 errno);
72}
3 73
4unsigned long long os_usecs(void) 74void uml_idle_timer(void)
75{
76 if(signal(SIGVTALRM, SIG_IGN) == SIG_ERR)
77 panic("Couldn't unset SIGVTALRM handler");
78
79 set_handler(SIGALRM, (__sighandler_t) alarm_handler,
80 SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGVTALRM, -1);
81 set_interval(ITIMER_REAL);
82}
83
84extern void ktime_get_ts(struct timespec *ts);
85#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
86
87void time_init(void)
88{
89 struct timespec now;
90
91 if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR)
92 panic("Couldn't set SIGVTALRM handler");
93 set_interval(ITIMER_VIRTUAL);
94
95 do_posix_clock_monotonic_gettime(&now);
96 wall_to_monotonic.tv_sec = -now.tv_sec;
97 wall_to_monotonic.tv_nsec = -now.tv_nsec;
98}
99
100unsigned long long os_nsecs(void)
5{ 101{
6 struct timeval tv; 102 struct timeval tv;
7 103
8 gettimeofday(&tv, NULL); 104 gettimeofday(&tv, NULL);
9 return((unsigned long long) tv.tv_sec * 1000000 + tv.tv_usec); 105 return((unsigned long long) tv.tv_sec * BILLION + tv.tv_usec * 1000);
10} 106}
11 107
12/* 108void idle_sleep(int secs)
13 * Overrides for Emacs so that we follow Linus's tabbing style. 109{
14 * Emacs will notice this stuff at the end of the file and automatically 110 struct timespec ts;
15 * adjust the settings for this buffer only. This must remain at the end 111
16 * of the file. 112 ts.tv_sec = secs;
17 * --------------------------------------------------------------------------- 113 ts.tv_nsec = 0;
18 * Local variables: 114 nanosleep(&ts, NULL);
19 * c-file-style: "linux" 115}
20 * End: 116
21 */ 117/* XXX This partly duplicates init_irq_signals */
118
119void user_time_init(void)
120{
121 set_handler(SIGVTALRM, (__sighandler_t) alarm_handler,
122 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH,
123 SIGALRM, SIGUSR2, -1);
124 set_handler(SIGALRM, (__sighandler_t) alarm_handler,
125 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH,
126 SIGVTALRM, SIGUSR2, -1);
127 set_interval(ITIMER_VIRTUAL);
128}
diff --git a/arch/um/os-Linux/trap.c b/arch/um/os-Linux/trap.c
index 321e1c8e227..a9f6b26f982 100644
--- a/arch/um/os-Linux/trap.c
+++ b/arch/um/os-Linux/trap.c
@@ -10,6 +10,7 @@
10#include "user_util.h" 10#include "user_util.h"
11#include "os.h" 11#include "os.h"
12#include "mode.h" 12#include "mode.h"
13#include "longjmp.h"
13 14
14void usr2_handler(int sig, union uml_pt_regs *regs) 15void usr2_handler(int sig, union uml_pt_regs *regs)
15{ 16{
@@ -36,5 +37,5 @@ void do_longjmp(void *b, int val)
36{ 37{
37 sigjmp_buf *buf = b; 38 sigjmp_buf *buf = b;
38 39
39 siglongjmp(*buf, val); 40 UML_SIGLONGJMP(buf, val);
40} 41}
diff --git a/arch/um/os-Linux/tt.c b/arch/um/os-Linux/tt.c
index cb2648b79d0..919d19f1153 100644
--- a/arch/um/os-Linux/tt.c
+++ b/arch/um/os-Linux/tt.c
@@ -27,7 +27,6 @@
27#include "sysdep/sigcontext.h" 27#include "sysdep/sigcontext.h"
28#include "irq_user.h" 28#include "irq_user.h"
29#include "ptrace_user.h" 29#include "ptrace_user.h"
30#include "time_user.h"
31#include "init.h" 30#include "init.h"
32#include "os.h" 31#include "os.h"
33#include "uml-config.h" 32#include "uml-config.h"
@@ -63,6 +62,54 @@ void kill_child_dead(int pid)
63 } while(1); 62 } while(1);
64} 63}
65 64
65void stop(void)
66{
67 while(1) sleep(1000000);
68}
69
70int wait_for_stop(int pid, int sig, int cont_type, void *relay)
71{
72 sigset_t *relay_signals = relay;
73 int status, ret;
74
75 while(1){
76 CATCH_EINTR(ret = waitpid(pid, &status, WUNTRACED));
77 if((ret < 0) ||
78 !WIFSTOPPED(status) || (WSTOPSIG(status) != sig)){
79 if(ret < 0){
80 printk("wait failed, errno = %d\n",
81 errno);
82 }
83 else if(WIFEXITED(status))
84 printk("process %d exited with status %d\n",
85 pid, WEXITSTATUS(status));
86 else if(WIFSIGNALED(status))
87 printk("process %d exited with signal %d\n",
88 pid, WTERMSIG(status));
89 else if((WSTOPSIG(status) == SIGVTALRM) ||
90 (WSTOPSIG(status) == SIGALRM) ||
91 (WSTOPSIG(status) == SIGIO) ||
92 (WSTOPSIG(status) == SIGPROF) ||
93 (WSTOPSIG(status) == SIGCHLD) ||
94 (WSTOPSIG(status) == SIGWINCH) ||
95 (WSTOPSIG(status) == SIGINT)){
96 ptrace(cont_type, pid, 0, WSTOPSIG(status));
97 continue;
98 }
99 else if((relay_signals != NULL) &&
100 sigismember(relay_signals, WSTOPSIG(status))){
101 ptrace(cont_type, pid, 0, WSTOPSIG(status));
102 continue;
103 }
104 else printk("process %d stopped with signal %d\n",
105 pid, WSTOPSIG(status));
106 panic("wait_for_stop failed to wait for %d to stop "
107 "with %d\n", pid, sig);
108 }
109 return(status);
110 }
111}
112
66/* 113/*
67 *------------------------- 114 *-------------------------
68 * only for tt mode (will be deleted in future...) 115 * only for tt mode (will be deleted in future...)
diff --git a/arch/um/os-Linux/uaccess.c b/arch/um/os-Linux/uaccess.c
index 38d710158c3..166fb66995d 100644
--- a/arch/um/os-Linux/uaccess.c
+++ b/arch/um/os-Linux/uaccess.c
@@ -6,6 +6,7 @@
6 6
7#include <setjmp.h> 7#include <setjmp.h>
8#include <string.h> 8#include <string.h>
9#include "longjmp.h"
9 10
10unsigned long __do_user_copy(void *to, const void *from, int n, 11unsigned long __do_user_copy(void *to, const void *from, int n,
11 void **fault_addr, void **fault_catcher, 12 void **fault_addr, void **fault_catcher,
@@ -13,10 +14,11 @@ unsigned long __do_user_copy(void *to, const void *from, int n,
13 int n), int *faulted_out) 14 int n), int *faulted_out)
14{ 15{
15 unsigned long *faddrp = (unsigned long *) fault_addr, ret; 16 unsigned long *faddrp = (unsigned long *) fault_addr, ret;
17 int enable;
16 18
17 sigjmp_buf jbuf; 19 sigjmp_buf jbuf;
18 *fault_catcher = &jbuf; 20 *fault_catcher = &jbuf;
19 if(sigsetjmp(jbuf, 1) == 0){ 21 if(UML_SIGSETJMP(&jbuf, enable) == 0){
20 (*op)(to, from, n); 22 (*op)(to, from, n);
21 ret = 0; 23 ret = 0;
22 *faulted_out = 0; 24 *faulted_out = 0;
diff --git a/arch/um/kernel/user_util.c b/arch/um/os-Linux/util.c
index 4c231161f25..e32065e2fdc 100644
--- a/arch/um/kernel/user_util.c
+++ b/arch/um/os-Linux/util.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -29,17 +29,14 @@
29#include "init.h" 29#include "init.h"
30#include "ptrace_user.h" 30#include "ptrace_user.h"
31#include "uml-config.h" 31#include "uml-config.h"
32 32#include "os.h"
33void stop(void) 33#include "longjmp.h"
34{
35 while(1) sleep(1000000);
36}
37 34
38void stack_protections(unsigned long address) 35void stack_protections(unsigned long address)
39{ 36{
40 int prot = PROT_READ | PROT_WRITE | PROT_EXEC; 37 int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
41 38
42 if(mprotect((void *) address, page_size(), prot) < 0) 39 if(mprotect((void *) address, page_size(), prot) < 0)
43 panic("protecting stack failed, errno = %d", errno); 40 panic("protecting stack failed, errno = %d", errno);
44} 41}
45 42
@@ -59,49 +56,6 @@ void task_protections(unsigned long address)
59 panic("protecting stack failed, errno = %d", errno); 56 panic("protecting stack failed, errno = %d", errno);
60} 57}
61 58
62int wait_for_stop(int pid, int sig, int cont_type, void *relay)
63{
64 sigset_t *relay_signals = relay;
65 int status, ret;
66
67 while(1){
68 CATCH_EINTR(ret = waitpid(pid, &status, WUNTRACED));
69 if((ret < 0) ||
70 !WIFSTOPPED(status) || (WSTOPSIG(status) != sig)){
71 if(ret < 0){
72 printk("wait failed, errno = %d\n",
73 errno);
74 }
75 else if(WIFEXITED(status))
76 printk("process %d exited with status %d\n",
77 pid, WEXITSTATUS(status));
78 else if(WIFSIGNALED(status))
79 printk("process %d exited with signal %d\n",
80 pid, WTERMSIG(status));
81 else if((WSTOPSIG(status) == SIGVTALRM) ||
82 (WSTOPSIG(status) == SIGALRM) ||
83 (WSTOPSIG(status) == SIGIO) ||
84 (WSTOPSIG(status) == SIGPROF) ||
85 (WSTOPSIG(status) == SIGCHLD) ||
86 (WSTOPSIG(status) == SIGWINCH) ||
87 (WSTOPSIG(status) == SIGINT)){
88 ptrace(cont_type, pid, 0, WSTOPSIG(status));
89 continue;
90 }
91 else if((relay_signals != NULL) &&
92 sigismember(relay_signals, WSTOPSIG(status))){
93 ptrace(cont_type, pid, 0, WSTOPSIG(status));
94 continue;
95 }
96 else printk("process %d stopped with signal %d\n",
97 pid, WSTOPSIG(status));
98 panic("wait_for_stop failed to wait for %d to stop "
99 "with %d\n", pid, sig);
100 }
101 return(status);
102 }
103}
104
105int raw(int fd) 59int raw(int fd)
106{ 60{
107 struct termios tt; 61 struct termios tt;
@@ -113,7 +67,7 @@ int raw(int fd)
113 67
114 cfmakeraw(&tt); 68 cfmakeraw(&tt);
115 69
116 CATCH_EINTR(err = tcsetattr(fd, TCSADRAIN, &tt)); 70 CATCH_EINTR(err = tcsetattr(fd, TCSADRAIN, &tt));
117 if(err < 0) 71 if(err < 0)
118 return -errno; 72 return -errno;
119 73
@@ -149,7 +103,7 @@ void setup_hostinfo(void)
149 103
150int setjmp_wrapper(void (*proc)(void *, void *), ...) 104int setjmp_wrapper(void (*proc)(void *, void *), ...)
151{ 105{
152 va_list args; 106 va_list args;
153 sigjmp_buf buf; 107 sigjmp_buf buf;
154 int n; 108 int n;
155 109
@@ -161,14 +115,3 @@ int setjmp_wrapper(void (*proc)(void *, void *), ...)
161 va_end(args); 115 va_end(args);
162 return(n); 116 return(n);
163} 117}
164
165/*
166 * Overrides for Emacs so that we follow Linus's tabbing style.
167 * Emacs will notice this stuff at the end of the file and automatically
168 * adjust the settings for this buffer only. This must remain at the end
169 * of the file.
170 * ---------------------------------------------------------------------------
171 * Local variables:
172 * c-file-style: "linux"
173 * End:
174 */
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index 17746b4c08f..0cdfd4481d5 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -16,6 +16,8 @@
16#include "choose-mode.h" 16#include "choose-mode.h"
17#include "kern.h" 17#include "kern.h"
18#include "mode_kern.h" 18#include "mode_kern.h"
19#include "proc_mm.h"
20#include "os.h"
19 21
20extern int modify_ldt(int func, void *ptr, unsigned long bytecount); 22extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
21 23
@@ -456,13 +458,14 @@ long init_new_ldt(struct mmu_context_skas * new_mm,
456 int i; 458 int i;
457 long page, err=0; 459 long page, err=0;
458 void *addr = NULL; 460 void *addr = NULL;
461 struct proc_mm_op copy;
459 462
460 memset(&desc, 0, sizeof(desc));
461 463
462 if(!ptrace_ldt) 464 if(!ptrace_ldt)
463 init_MUTEX(&new_mm->ldt.semaphore); 465 init_MUTEX(&new_mm->ldt.semaphore);
464 466
465 if(!from_mm){ 467 if(!from_mm){
468 memset(&desc, 0, sizeof(desc));
466 /* 469 /*
467 * We have to initialize a clean ldt. 470 * We have to initialize a clean ldt.
468 */ 471 */
@@ -494,8 +497,26 @@ long init_new_ldt(struct mmu_context_skas * new_mm,
494 } 497 }
495 } 498 }
496 new_mm->ldt.entry_count = 0; 499 new_mm->ldt.entry_count = 0;
500
501 goto out;
497 } 502 }
498 else if (!ptrace_ldt) { 503
504 if(proc_mm){
505 /* We have a valid from_mm, so we now have to copy the LDT of
506 * from_mm to new_mm, because using proc_mm an new mm with
507 * an empty/default LDT was created in new_mm()
508 */
509 copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
510 .u =
511 { .copy_segments =
512 from_mm->id.u.mm_fd } } );
513 i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
514 if(i != sizeof(copy))
515 printk("new_mm : /proc/mm copy_segments failed, "
516 "err = %d\n", -i);
517 }
518
519 if(!ptrace_ldt) {
499 /* Our local LDT is used to supply the data for 520 /* Our local LDT is used to supply the data for
500 * modify_ldt(READLDT), if PTRACE_LDT isn't available, 521 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
501 * i.e., we have to use the stub for modify_ldt, which 522 * i.e., we have to use the stub for modify_ldt, which
@@ -524,6 +545,7 @@ long init_new_ldt(struct mmu_context_skas * new_mm,
524 up(&from_mm->ldt.semaphore); 545 up(&from_mm->ldt.semaphore);
525 } 546 }
526 547
548 out:
527 return err; 549 return err;
528} 550}
529 551
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 5231fe83ea4..09a3eb74331 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -646,6 +646,7 @@ CONFIG_8139TOO=y
646# CONFIG_DL2K is not set 646# CONFIG_DL2K is not set
647CONFIG_E1000=y 647CONFIG_E1000=y
648# CONFIG_E1000_NAPI is not set 648# CONFIG_E1000_NAPI is not set
649# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
649# CONFIG_NS83820 is not set 650# CONFIG_NS83820 is not set
650# CONFIG_HAMACHI is not set 651# CONFIG_HAMACHI is not set
651# CONFIG_YELLOWFIN is not set 652# CONFIG_YELLOWFIN is not set
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 58f5bfb52c6..f05c2a80248 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -672,6 +672,19 @@ ia32_sys_call_table:
672 .quad sys_inotify_add_watch 672 .quad sys_inotify_add_watch
673 .quad sys_inotify_rm_watch 673 .quad sys_inotify_rm_watch
674 .quad sys_migrate_pages 674 .quad sys_migrate_pages
675 .quad compat_sys_openat /* 295 */
676 .quad sys_mkdirat
677 .quad sys_mknodat
678 .quad sys_fchownat
679 .quad sys_futimesat
680 .quad compat_sys_newfstatat /* 300 */
681 .quad sys_unlinkat
682 .quad sys_renameat
683 .quad sys_linkat
684 .quad sys_symlinkat
685 .quad sys_readlinkat /* 305 */
686 .quad sys_fchmodat
687 .quad sys_faccessat
675ia32_syscall_end: 688ia32_syscall_end:
676 .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8 689 .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
677 .quad ni_syscall 690 .quad ni_syscall
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 8ac4db09610..70f1bb808a2 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -146,7 +146,7 @@ void pda_init(int cpu)
146 pda->irqstackptr += IRQSTACKSIZE-64; 146 pda->irqstackptr += IRQSTACKSIZE-64;
147} 147}
148 148
149char boot_exception_stacks[(N_EXCEPTION_STACKS - 2) * EXCEPTION_STKSZ + DEBUG_STKSZ] 149char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
150__attribute__((section(".bss.page_aligned"))); 150__attribute__((section(".bss.page_aligned")));
151 151
152/* May not be marked __init: used by software suspend */ 152/* May not be marked __init: used by software suspend */
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 283c089537b..bddf431bbb7 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -68,4 +68,6 @@ source "drivers/infiniband/Kconfig"
68 68
69source "drivers/sn/Kconfig" 69source "drivers/sn/Kconfig"
70 70
71source "drivers/edac/Kconfig"
72
71endmenu 73endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 7c45050ecd0..619dd964c51 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_PHONE) += telephony/
63obj-$(CONFIG_MD) += md/ 63obj-$(CONFIG_MD) += md/
64obj-$(CONFIG_BT) += bluetooth/ 64obj-$(CONFIG_BT) += bluetooth/
65obj-$(CONFIG_ISDN) += isdn/ 65obj-$(CONFIG_ISDN) += isdn/
66obj-$(CONFIG_EDAC) += edac/
66obj-$(CONFIG_MCA) += mca/ 67obj-$(CONFIG_MCA) += mca/
67obj-$(CONFIG_EISA) += eisa/ 68obj-$(CONFIG_EISA) += eisa/
68obj-$(CONFIG_CPU_FREQ) += cpufreq/ 69obj-$(CONFIG_CPU_FREQ) += cpufreq/
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 07c9be6a6bb..a85a60a93de 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -2630,7 +2630,7 @@ static int get_interface(struct slgt_info *info, int __user *if_mode)
2630static int set_interface(struct slgt_info *info, int if_mode) 2630static int set_interface(struct slgt_info *info, int if_mode)
2631{ 2631{
2632 unsigned long flags; 2632 unsigned long flags;
2633 unsigned char val; 2633 unsigned short val;
2634 2634
2635 DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode)); 2635 DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
2636 spin_lock_irqsave(&info->lock,flags); 2636 spin_lock_irqsave(&info->lock,flags);
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index bc56df8a347..4c272189cd4 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -34,7 +34,6 @@
34#include <linux/kernel.h> /* printk() */ 34#include <linux/kernel.h> /* printk() */
35#include <linux/fs.h> /* everything... */ 35#include <linux/fs.h> /* everything... */
36#include <linux/errno.h> /* error codes */ 36#include <linux/errno.h> /* error codes */
37#include <linux/delay.h> /* udelay */
38#include <linux/slab.h> 37#include <linux/slab.h>
39#include <linux/ioport.h> 38#include <linux/ioport.h>
40#include <linux/interrupt.h> 39#include <linux/interrupt.h>
@@ -156,6 +155,8 @@ This directory exports the following interfaces. There operation is
156documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4. 155documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
157alarms : 156alarms :
158current_ref : 157current_ref :
158received_ref_clk3a :
159received_ref_clk3b :
159enable_clk3a_output : 160enable_clk3a_output :
160enable_clk3b_output : 161enable_clk3b_output :
161enable_clka0_output : 162enable_clka0_output :
@@ -165,7 +166,7 @@ enable_clkb1_output :
165filter_select : 166filter_select :
166hardware_switching : 167hardware_switching :
167hardware_switching_mode : 168hardware_switching_mode :
168interrupt_switch : 169telclock_version :
169mode_select : 170mode_select :
170refalign : 171refalign :
171reset : 172reset :
@@ -173,7 +174,6 @@ select_amcb1_transmit_clock :
173select_amcb2_transmit_clock : 174select_amcb2_transmit_clock :
174select_redundant_clock : 175select_redundant_clock :
175select_ref_frequency : 176select_ref_frequency :
176test_mode :
177 177
178All sysfs interfaces are integers in hex format, i.e echo 99 > refalign 178All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
179has the same effect as echo 0x99 > refalign. 179has the same effect as echo 0x99 > refalign.
@@ -226,7 +226,7 @@ static int tlclk_release(struct inode *inode, struct file *filp)
226 return 0; 226 return 0;
227} 227}
228 228
229ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count, 229static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
230 loff_t *f_pos) 230 loff_t *f_pos)
231{ 231{
232 if (count < sizeof(struct tlclk_alarms)) 232 if (count < sizeof(struct tlclk_alarms))
@@ -242,7 +242,7 @@ ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
242 return sizeof(struct tlclk_alarms); 242 return sizeof(struct tlclk_alarms);
243} 243}
244 244
245ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count, 245static ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
246 loff_t *f_pos) 246 loff_t *f_pos)
247{ 247{
248 return 0; 248 return 0;
@@ -278,21 +278,21 @@ static ssize_t show_current_ref(struct device *d,
278static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL); 278static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
279 279
280 280
281static ssize_t show_interrupt_switch(struct device *d, 281static ssize_t show_telclock_version(struct device *d,
282 struct device_attribute *attr, char *buf) 282 struct device_attribute *attr, char *buf)
283{ 283{
284 unsigned long ret_val; 284 unsigned long ret_val;
285 unsigned long flags; 285 unsigned long flags;
286 286
287 spin_lock_irqsave(&event_lock, flags); 287 spin_lock_irqsave(&event_lock, flags);
288 ret_val = inb(TLCLK_REG6); 288 ret_val = inb(TLCLK_REG5);
289 spin_unlock_irqrestore(&event_lock, flags); 289 spin_unlock_irqrestore(&event_lock, flags);
290 290
291 return sprintf(buf, "0x%lX\n", ret_val); 291 return sprintf(buf, "0x%lX\n", ret_val);
292} 292}
293 293
294static DEVICE_ATTR(interrupt_switch, S_IRUGO, 294static DEVICE_ATTR(telclock_version, S_IRUGO,
295 show_interrupt_switch, NULL); 295 show_telclock_version, NULL);
296 296
297static ssize_t show_alarms(struct device *d, 297static ssize_t show_alarms(struct device *d,
298 struct device_attribute *attr, char *buf) 298 struct device_attribute *attr, char *buf)
@@ -309,6 +309,50 @@ static ssize_t show_alarms(struct device *d,
309 309
310static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); 310static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
311 311
312static ssize_t store_received_ref_clk3a(struct device *d,
313 struct device_attribute *attr, const char *buf, size_t count)
314{
315 unsigned long tmp;
316 unsigned char val;
317 unsigned long flags;
318
319 sscanf(buf, "%lX", &tmp);
320 dev_dbg(d, ": tmp = 0x%lX\n", tmp);
321
322 val = (unsigned char)tmp;
323 spin_lock_irqsave(&event_lock, flags);
324 SET_PORT_BITS(TLCLK_REG1, 0xef, val);
325 spin_unlock_irqrestore(&event_lock, flags);
326
327 return strnlen(buf, count);
328}
329
330static DEVICE_ATTR(received_ref_clk3a, S_IWUGO, NULL,
331 store_received_ref_clk3a);
332
333
334static ssize_t store_received_ref_clk3b(struct device *d,
335 struct device_attribute *attr, const char *buf, size_t count)
336{
337 unsigned long tmp;
338 unsigned char val;
339 unsigned long flags;
340
341 sscanf(buf, "%lX", &tmp);
342 dev_dbg(d, ": tmp = 0x%lX\n", tmp);
343
344 val = (unsigned char)tmp;
345 spin_lock_irqsave(&event_lock, flags);
346 SET_PORT_BITS(TLCLK_REG1, 0xef, val << 1);
347 spin_unlock_irqrestore(&event_lock, flags);
348
349 return strnlen(buf, count);
350}
351
352static DEVICE_ATTR(received_ref_clk3b, S_IWUGO, NULL,
353 store_received_ref_clk3b);
354
355
312static ssize_t store_enable_clk3b_output(struct device *d, 356static ssize_t store_enable_clk3b_output(struct device *d,
313 struct device_attribute *attr, const char *buf, size_t count) 357 struct device_attribute *attr, const char *buf, size_t count)
314{ 358{
@@ -436,26 +480,6 @@ static ssize_t store_enable_clka0_output(struct device *d,
436static DEVICE_ATTR(enable_clka0_output, S_IWUGO, NULL, 480static DEVICE_ATTR(enable_clka0_output, S_IWUGO, NULL,
437 store_enable_clka0_output); 481 store_enable_clka0_output);
438 482
439static ssize_t store_test_mode(struct device *d,
440 struct device_attribute *attr, const char *buf, size_t count)
441{
442 unsigned long flags;
443 unsigned long tmp;
444 unsigned char val;
445
446 sscanf(buf, "%lX", &tmp);
447 dev_dbg(d, "tmp = 0x%lX\n", tmp);
448
449 val = (unsigned char)tmp;
450 spin_lock_irqsave(&event_lock, flags);
451 SET_PORT_BITS(TLCLK_REG4, 0xfd, 2);
452 spin_unlock_irqrestore(&event_lock, flags);
453
454 return strnlen(buf, count);
455}
456
457static DEVICE_ATTR(test_mode, S_IWUGO, NULL, store_test_mode);
458
459static ssize_t store_select_amcb2_transmit_clock(struct device *d, 483static ssize_t store_select_amcb2_transmit_clock(struct device *d,
460 struct device_attribute *attr, const char *buf, size_t count) 484 struct device_attribute *attr, const char *buf, size_t count)
461{ 485{
@@ -475,7 +499,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
475 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38); 499 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
476 switch (val) { 500 switch (val) {
477 case CLK_8_592MHz: 501 case CLK_8_592MHz:
478 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); 502 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
479 break; 503 break;
480 case CLK_11_184MHz: 504 case CLK_11_184MHz:
481 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); 505 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
@@ -484,7 +508,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
484 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); 508 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
485 break; 509 break;
486 case CLK_44_736MHz: 510 case CLK_44_736MHz:
487 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); 511 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
488 break; 512 break;
489 } 513 }
490 } else 514 } else
@@ -653,9 +677,7 @@ static ssize_t store_refalign (struct device *d,
653 dev_dbg(d, "tmp = 0x%lX\n", tmp); 677 dev_dbg(d, "tmp = 0x%lX\n", tmp);
654 spin_lock_irqsave(&event_lock, flags); 678 spin_lock_irqsave(&event_lock, flags);
655 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0); 679 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
656 udelay(2);
657 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08); 680 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
658 udelay(2);
659 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0); 681 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
660 spin_unlock_irqrestore(&event_lock, flags); 682 spin_unlock_irqrestore(&event_lock, flags);
661 683
@@ -706,15 +728,16 @@ static DEVICE_ATTR(reset, S_IWUGO, NULL, store_reset);
706 728
707static struct attribute *tlclk_sysfs_entries[] = { 729static struct attribute *tlclk_sysfs_entries[] = {
708 &dev_attr_current_ref.attr, 730 &dev_attr_current_ref.attr,
709 &dev_attr_interrupt_switch.attr, 731 &dev_attr_telclock_version.attr,
710 &dev_attr_alarms.attr, 732 &dev_attr_alarms.attr,
733 &dev_attr_received_ref_clk3a.attr,
734 &dev_attr_received_ref_clk3b.attr,
711 &dev_attr_enable_clk3a_output.attr, 735 &dev_attr_enable_clk3a_output.attr,
712 &dev_attr_enable_clk3b_output.attr, 736 &dev_attr_enable_clk3b_output.attr,
713 &dev_attr_enable_clkb1_output.attr, 737 &dev_attr_enable_clkb1_output.attr,
714 &dev_attr_enable_clka1_output.attr, 738 &dev_attr_enable_clka1_output.attr,
715 &dev_attr_enable_clkb0_output.attr, 739 &dev_attr_enable_clkb0_output.attr,
716 &dev_attr_enable_clka0_output.attr, 740 &dev_attr_enable_clka0_output.attr,
717 &dev_attr_test_mode.attr,
718 &dev_attr_select_amcb1_transmit_clock.attr, 741 &dev_attr_select_amcb1_transmit_clock.attr,
719 &dev_attr_select_amcb2_transmit_clock.attr, 742 &dev_attr_select_amcb2_transmit_clock.attr,
720 &dev_attr_select_redundant_clock.attr, 743 &dev_attr_select_redundant_clock.attr,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
new file mode 100644
index 00000000000..4819e7fc00d
--- /dev/null
+++ b/drivers/edac/Kconfig
@@ -0,0 +1,102 @@
1#
2# EDAC Kconfig
3# Copyright (c) 2003 Linux Networx
4# Licensed and distributed under the GPL
5#
6# $Id: Kconfig,v 1.4.2.7 2005/07/08 22:05:38 dsp_llnl Exp $
7#
8
9menu 'EDAC - error detection and reporting (RAS)'
10
11config EDAC
12 tristate "EDAC core system error reporting"
13 depends on X86
14 default y
15 help
16 EDAC is designed to report errors in the core system.
17 These are low-level errors that are reported in the CPU or
18 supporting chipset: memory errors, cache errors, PCI errors,
19 thermal throttling, etc.. If unsure, select 'Y'.
20
21
22comment "Reporting subsystems"
23 depends on EDAC
24
25config EDAC_DEBUG
26 bool "Debugging"
27 depends on EDAC
28 help
29 This turns on debugging information for the entire EDAC
30 sub-system. You can insert module with "debug_level=x", current
31 there're four debug levels (x=0,1,2,3 from low to high).
32 Usually you should select 'N'.
33
34config EDAC_MM_EDAC
35 tristate "Main Memory EDAC (Error Detection And Correction) reporting"
36 depends on EDAC
37 default y
38 help
39 Some systems are able to detect and correct errors in main
40 memory. EDAC can report statistics on memory error
41 detection and correction (EDAC - or commonly referred to ECC
42 errors). EDAC will also try to decode where these errors
43 occurred so that a particular failing memory module can be
44 replaced. If unsure, select 'Y'.
45
46
47config EDAC_AMD76X
48 tristate "AMD 76x (760, 762, 768)"
49 depends on EDAC_MM_EDAC && PCI
50 help
51 Support for error detection and correction on the AMD 76x
52 series of chipsets used with the Athlon processor.
53
54config EDAC_E7XXX
55 tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
56 depends on EDAC_MM_EDAC && PCI
57 help
58 Support for error detection and correction on the Intel
59 E7205, E7500, E7501 and E7505 server chipsets.
60
61config EDAC_E752X
62 tristate "Intel e752x (e7520, e7525, e7320)"
63 depends on EDAC_MM_EDAC && PCI
64 help
65 Support for error detection and correction on the Intel
66 E7520, E7525, E7320 server chipsets.
67
68config EDAC_I82875P
69 tristate "Intel 82875p (D82875P, E7210)"
70 depends on EDAC_MM_EDAC && PCI
71 help
72 Support for error detection and correction on the Intel
73 DP82785P and E7210 server chipsets.
74
75config EDAC_I82860
76 tristate "Intel 82860"
77 depends on EDAC_MM_EDAC && PCI
78 help
79 Support for error detection and correction on the Intel
80 82860 chipset.
81
82config EDAC_R82600
83 tristate "Radisys 82600 embedded chipset"
84 depends on EDAC_MM_EDAC
85 help
86 Support for error detection and correction on the Radisys
87 82600 embedded chipset.
88
89choice
90 prompt "Error detecting method"
91 depends on EDAC
92 default EDAC_POLL
93
94config EDAC_POLL
95 bool "Poll for errors"
96 depends on EDAC
97 help
98 Poll the chipset periodically to detect errors.
99
100endchoice
101
102endmenu
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
new file mode 100644
index 00000000000..93137fdab4b
--- /dev/null
+++ b/drivers/edac/Makefile
@@ -0,0 +1,18 @@
1#
2# Makefile for the Linux kernel EDAC drivers.
3#
4# Copyright 02 Jul 2003, Linux Networx (http://lnxi.com)
5# This file may be distributed under the terms of the
6# GNU General Public License.
7#
8# $Id: Makefile,v 1.4.2.3 2005/07/08 22:05:38 dsp_llnl Exp $
9
10
11obj-$(CONFIG_EDAC_MM_EDAC) += edac_mc.o
12obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
13obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
14obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
15obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
16obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
17obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
18
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
new file mode 100644
index 00000000000..2fcc8120b53
--- /dev/null
+++ b/drivers/edac/amd76x_edac.c
@@ -0,0 +1,356 @@
1/*
2 * AMD 76x Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
12 *
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/init.h>
19
20#include <linux/pci.h>
21#include <linux/pci_ids.h>
22
23#include <linux/slab.h>
24
25#include "edac_mc.h"
26
27
28#define AMD76X_NR_CSROWS 8
29#define AMD76X_NR_CHANS 1
30#define AMD76X_NR_DIMMS 4
31
32
33/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
34#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
35 *
36 * 31:16 reserved
37 * 15:14 SERR enabled: x1=ue 1x=ce
38 * 13 reserved
39 * 12 diag: disabled, enabled
40 * 11:10 mode: dis, EC, ECC, ECC+scrub
41 * 9:8 status: x1=ue 1x=ce
42 * 7:4 UE cs row
43 * 3:0 CE cs row
44 */
45#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
46 *
47 * 31:26 clock disable 5 - 0
48 * 25 SDRAM init
49 * 24 reserved
50 * 23 mode register service
51 * 22:21 suspend to RAM
52 * 20 burst refresh enable
53 * 19 refresh disable
54 * 18 reserved
55 * 17:16 cycles-per-refresh
56 * 15:8 reserved
57 * 7:0 x4 mode enable 7 - 0
58 */
59#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
60 *
61 * 31:23 chip-select base
62 * 22:16 reserved
63 * 15:7 chip-select mask
64 * 6:3 reserved
65 * 2:1 address mode
66 * 0 chip-select enable
67 */
68
69
70struct amd76x_error_info {
71 u32 ecc_mode_status;
72};
73
74
75enum amd76x_chips {
76 AMD761 = 0,
77 AMD762
78};
79
80
81struct amd76x_dev_info {
82 const char *ctl_name;
83};
84
85
86static const struct amd76x_dev_info amd76x_devs[] = {
87 [AMD761] = {.ctl_name = "AMD761"},
88 [AMD762] = {.ctl_name = "AMD762"},
89};
90
91
92/**
93 * amd76x_get_error_info - fetch error information
94 * @mci: Memory controller
95 * @info: Info to fill in
96 *
97 * Fetch and store the AMD76x ECC status. Clear pending status
98 * on the chip so that further errors will be reported
99 */
100
101static void amd76x_get_error_info (struct mem_ctl_info *mci,
102 struct amd76x_error_info *info)
103{
104 pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS,
105 &info->ecc_mode_status);
106
107 if (info->ecc_mode_status & BIT(8))
108 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
109 (u32) BIT(8), (u32) BIT(8));
110
111 if (info->ecc_mode_status & BIT(9))
112 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
113 (u32) BIT(9), (u32) BIT(9));
114}
115
116
117/**
118 * amd76x_process_error_info - Error check
119 * @mci: Memory controller
120 * @info: Previously fetched information from chip
121 * @handle_errors: 1 if we should do recovery
122 *
123 * Process the chip state and decide if an error has occurred.
124 * A return of 1 indicates an error. Also if handle_errors is true
125 * then attempt to handle and clean up after the error
126 */
127
128static int amd76x_process_error_info (struct mem_ctl_info *mci,
129 struct amd76x_error_info *info, int handle_errors)
130{
131 int error_found;
132 u32 row;
133
134 error_found = 0;
135
136 /*
137 * Check for an uncorrectable error
138 */
139 if (info->ecc_mode_status & BIT(8)) {
140 error_found = 1;
141
142 if (handle_errors) {
143 row = (info->ecc_mode_status >> 4) & 0xf;
144 edac_mc_handle_ue(mci,
145 mci->csrows[row].first_page, 0, row,
146 mci->ctl_name);
147 }
148 }
149
150 /*
151 * Check for a correctable error
152 */
153 if (info->ecc_mode_status & BIT(9)) {
154 error_found = 1;
155
156 if (handle_errors) {
157 row = info->ecc_mode_status & 0xf;
158 edac_mc_handle_ce(mci,
159 mci->csrows[row].first_page, 0, 0, row, 0,
160 mci->ctl_name);
161 }
162 }
163 return error_found;
164}
165
166/**
167 * amd76x_check - Poll the controller
168 * @mci: Memory controller
169 *
170 * Called by the poll handlers this function reads the status
171 * from the controller and checks for errors.
172 */
173
174static void amd76x_check(struct mem_ctl_info *mci)
175{
176 struct amd76x_error_info info;
177 debugf3("MC: " __FILE__ ": %s()\n", __func__);
178 amd76x_get_error_info(mci, &info);
179 amd76x_process_error_info(mci, &info, 1);
180}
181
182
183/**
184 * amd76x_probe1 - Perform set up for detected device
185 * @pdev; PCI device detected
186 * @dev_idx: Device type index
187 *
188 * We have found an AMD76x and now need to set up the memory
189 * controller status reporting. We configure and set up the
190 * memory controller reporting and claim the device.
191 */
192
193static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
194{
195 int rc = -ENODEV;
196 int index;
197 struct mem_ctl_info *mci = NULL;
198 enum edac_type ems_modes[] = {
199 EDAC_NONE,
200 EDAC_EC,
201 EDAC_SECDED,
202 EDAC_SECDED
203 };
204 u32 ems;
205 u32 ems_mode;
206
207 debugf0("MC: " __FILE__ ": %s()\n", __func__);
208
209 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
210 ems_mode = (ems >> 10) & 0x3;
211
212 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
213
214 if (mci == NULL) {
215 rc = -ENOMEM;
216 goto fail;
217 }
218
219 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
220
221 mci->pdev = pci_dev_get(pdev);
222 mci->mtype_cap = MEM_FLAG_RDDR;
223
224 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
225 mci->edac_cap = ems_mode ?
226 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
227
228 mci->mod_name = BS_MOD_STR;
229 mci->mod_ver = "$Revision: 1.4.2.5 $";
230 mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
231 mci->edac_check = amd76x_check;
232 mci->ctl_page_to_phys = NULL;
233
234 for (index = 0; index < mci->nr_csrows; index++) {
235 struct csrow_info *csrow = &mci->csrows[index];
236 u32 mba;
237 u32 mba_base;
238 u32 mba_mask;
239 u32 dms;
240
241 /* find the DRAM Chip Select Base address and mask */
242 pci_read_config_dword(mci->pdev,
243 AMD76X_MEM_BASE_ADDR + (index * 4),
244 &mba);
245
246 if (!(mba & BIT(0)))
247 continue;
248
249 mba_base = mba & 0xff800000UL;
250 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
251
252 pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS,
253 &dms);
254
255 csrow->first_page = mba_base >> PAGE_SHIFT;
256 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
257 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
258 csrow->page_mask = mba_mask >> PAGE_SHIFT;
259 csrow->grain = csrow->nr_pages << PAGE_SHIFT;
260 csrow->mtype = MEM_RDDR;
261 csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
262 csrow->edac_mode = ems_modes[ems_mode];
263 }
264
265 /* clear counters */
266 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8),
267 (u32) (0x3 << 8));
268
269 if (edac_mc_add_mc(mci)) {
270 debugf3("MC: " __FILE__
271 ": %s(): failed edac_mc_add_mc()\n", __func__);
272 goto fail;
273 }
274
275 /* get this far and it's successful */
276 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
277 return 0;
278
279fail:
280 if (mci) {
281 if(mci->pdev)
282 pci_dev_put(mci->pdev);
283 edac_mc_free(mci);
284 }
285 return rc;
286}
287
288/* returns count (>= 0), or negative on error */
289static int __devinit amd76x_init_one(struct pci_dev *pdev,
290 const struct pci_device_id *ent)
291{
292 debugf0("MC: " __FILE__ ": %s()\n", __func__);
293
294 /* don't need to call pci_device_enable() */
295 return amd76x_probe1(pdev, ent->driver_data);
296}
297
298
299/**
300 * amd76x_remove_one - driver shutdown
301 * @pdev: PCI device being handed back
302 *
303 * Called when the driver is unloaded. Find the matching mci
304 * structure for the device then delete the mci and free the
305 * resources.
306 */
307
308static void __devexit amd76x_remove_one(struct pci_dev *pdev)
309{
310 struct mem_ctl_info *mci;
311
312 debugf0(__FILE__ ": %s()\n", __func__);
313
314 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
315 return;
316 if (edac_mc_del_mc(mci))
317 return;
318 pci_dev_put(mci->pdev);
319 edac_mc_free(mci);
320}
321
322
323static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
324 {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 AMD762},
326 {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 AMD761},
328 {0,} /* 0 terminated list. */
329};
330
331MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
332
333
334static struct pci_driver amd76x_driver = {
335 .name = BS_MOD_STR,
336 .probe = amd76x_init_one,
337 .remove = __devexit_p(amd76x_remove_one),
338 .id_table = amd76x_pci_tbl,
339};
340
341static int __init amd76x_init(void)
342{
343 return pci_register_driver(&amd76x_driver);
344}
345
346static void __exit amd76x_exit(void)
347{
348 pci_unregister_driver(&amd76x_driver);
349}
350
351module_init(amd76x_init);
352module_exit(amd76x_exit);
353
354MODULE_LICENSE("GPL");
355MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
356MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
new file mode 100644
index 00000000000..770a5a63307
--- /dev/null
+++ b/drivers/edac/e752x_edac.c
@@ -0,0 +1,1071 @@
1/*
2 * Intel e752x Memory Controller kernel module
3 * (C) 2004 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * See "enum e752x_chips" below for supported chipsets
8 *
9 * Written by Tom Zimmerman
10 *
11 * Contributors:
12 * Thayne Harbaugh at realmsys.com (?)
13 * Wang Zhenyu at intel.com
14 * Dave Jiang at mvista.com
15 *
16 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
17 *
18 */
19
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/init.h>
24
25#include <linux/pci.h>
26#include <linux/pci_ids.h>
27
28#include <linux/slab.h>
29
30#include "edac_mc.h"
31
32
33#ifndef PCI_DEVICE_ID_INTEL_7520_0
34#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
35#endif /* PCI_DEVICE_ID_INTEL_7520_0 */
36
37#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
38#define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
39#endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
40
41#ifndef PCI_DEVICE_ID_INTEL_7525_0
42#define PCI_DEVICE_ID_INTEL_7525_0 0x359E
43#endif /* PCI_DEVICE_ID_INTEL_7525_0 */
44
45#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
46#define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
47#endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
48
49#ifndef PCI_DEVICE_ID_INTEL_7320_0
50#define PCI_DEVICE_ID_INTEL_7320_0 0x3592
51#endif /* PCI_DEVICE_ID_INTEL_7320_0 */
52
53#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
54#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
55#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
56
57#define E752X_NR_CSROWS 8 /* number of csrows */
58
59
60/* E752X register addresses - device 0 function 0 */
61#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
62#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
63 /*
64 * 31:30 Device width row 7
65 * 01=x8 10=x4 11=x8 DDR2
66 * 27:26 Device width row 6
67 * 23:22 Device width row 5
68 * 19:20 Device width row 4
69 * 15:14 Device width row 3
70 * 11:10 Device width row 2
71 * 7:6 Device width row 1
72 * 3:2 Device width row 0
73 */
74#define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
75 /* FIXME:IS THIS RIGHT? */
76 /*
77 * 22 Number channels 0=1,1=2
78 * 19:18 DRB Granularity 32/64MB
79 */
80#define E752X_DRM 0x80 /* Dimm mapping register */
81#define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
82 /*
83 * 14:12 1 single A, 2 single B, 3 dual
84 */
85#define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
86#define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
87#define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
88#define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
89
90/* E752X register addresses - device 0 function 1 */
91#define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
92#define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
93#define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
94#define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
95#define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
96#define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
97#define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
98#define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
99#define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
100#define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
101#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
102#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
103#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
104#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI command reg (8b) */
105#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
106#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
107#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
108#define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
109#define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
110#define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
111 /* error address register (32b) */
112 /*
113 * 31 Reserved
114 * 30:2 CE address (64 byte block 34:6)
115 * 1 Reserved
116 * 0 HiLoCS
117 */
118#define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
119 /* error address register (32b) */
120 /*
121 * 31 Reserved
122 * 30:2 CE address (64 byte block 34:6)
123 * 1 Reserved
124 * 0 HiLoCS
125 */
126#define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
127 /* error address register (32b) */
128 /*
129 * 31 Reserved
130 * 30:2 CE address (64 byte block 34:6)
131 * 1 Reserved
132 * 0 HiLoCS
133 */
134#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM first uncorrectable scrub memory */
135 /* error address register (32b) */
136 /*
137 * 31 Reserved
138 * 30:2 CE address (64 byte block 34:6)
139 * 1 Reserved
140 * 0 HiLoCS
141 */
142#define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
143 /* error syndrome register (16b) */
144#define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
145 /* error syndrome register (16b) */
146#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
147
148/* ICH5R register addresses - device 30 function 0 */
149#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
150#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
151#define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
152
153enum e752x_chips {
154 E7520 = 0,
155 E7525 = 1,
156 E7320 = 2
157};
158
159
160struct e752x_pvt {
161 struct pci_dev *bridge_ck;
162 struct pci_dev *dev_d0f0;
163 struct pci_dev *dev_d0f1;
164 u32 tolm;
165 u32 remapbase;
166 u32 remaplimit;
167 int mc_symmetric;
168 u8 map[8];
169 int map_type;
170 const struct e752x_dev_info *dev_info;
171};
172
173
174struct e752x_dev_info {
175 u16 err_dev;
176 const char *ctl_name;
177};
178
179struct e752x_error_info {
180 u32 ferr_global;
181 u32 nerr_global;
182 u8 hi_ferr;
183 u8 hi_nerr;
184 u16 sysbus_ferr;
185 u16 sysbus_nerr;
186 u8 buf_ferr;
187 u8 buf_nerr;
188 u16 dram_ferr;
189 u16 dram_nerr;
190 u32 dram_sec1_add;
191 u32 dram_sec2_add;
192 u16 dram_sec1_syndrome;
193 u16 dram_sec2_syndrome;
194 u32 dram_ded_add;
195 u32 dram_scrb_add;
196 u32 dram_retr_add;
197};
198
199static const struct e752x_dev_info e752x_devs[] = {
200 [E7520] = {
201 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
202 .ctl_name = "E7520"},
203 [E7525] = {
204 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
205 .ctl_name = "E7525"},
206 [E7320] = {
207 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
208 .ctl_name = "E7320"},
209};
210
211
212static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
213 unsigned long page)
214{
215 u32 remap;
216 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
217
218 debugf3("MC: " __FILE__ ": %s()\n", __func__);
219
220 if (page < pvt->tolm)
221 return page;
222 if ((page >= 0x100000) && (page < pvt->remapbase))
223 return page;
224 remap = (page - pvt->tolm) + pvt->remapbase;
225 if (remap < pvt->remaplimit)
226 return remap;
227 printk(KERN_ERR "Invalid page %lx - out of range\n", page);
228 return pvt->tolm - 1;
229}
230
231static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
232 u32 sec1_add, u16 sec1_syndrome)
233{
234 u32 page;
235 int row;
236 int channel;
237 int i;
238 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
239
240 debugf3("MC: " __FILE__ ": %s()\n", __func__);
241
242 /* convert the addr to 4k page */
243 page = sec1_add >> (PAGE_SHIFT - 4);
244
245 /* FIXME - check for -1 */
246 if (pvt->mc_symmetric) {
247 /* chip select are bits 14 & 13 */
248 row = ((page >> 1) & 3);
249 printk(KERN_WARNING
250 "Test row %d Table %d %d %d %d %d %d %d %d\n",
251 row, pvt->map[0], pvt->map[1], pvt->map[2],
252 pvt->map[3], pvt->map[4], pvt->map[5],
253 pvt->map[6], pvt->map[7]);
254
255 /* test for channel remapping */
256 for (i = 0; i < 8; i++) {
257 if (pvt->map[i] == row)
258 break;
259 }
260 printk(KERN_WARNING "Test computed row %d\n", i);
261 if (i < 8)
262 row = i;
263 else
264 printk(KERN_WARNING
265 "MC%d: row %d not found in remap table\n",
266 mci->mc_idx, row);
267 } else
268 row = edac_mc_find_csrow_by_page(mci, page);
269 /* 0 = channel A, 1 = channel B */
270 channel = !(error_one & 1);
271
272 if (!pvt->map_type)
273 row = 7 - row;
274 edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel,
275 "e752x CE");
276}
277
278
279static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
280 u32 sec1_add, u16 sec1_syndrome, int *error_found,
281 int handle_error)
282{
283 *error_found = 1;
284
285 if (handle_error)
286 do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
287}
288
289static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add,
290 u32 scrb_add)
291{
292 u32 error_2b, block_page;
293 int row;
294 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
295
296 debugf3("MC: " __FILE__ ": %s()\n", __func__);
297
298 if (error_one & 0x0202) {
299 error_2b = ded_add;
300 /* convert to 4k address */
301 block_page = error_2b >> (PAGE_SHIFT - 4);
302 row = pvt->mc_symmetric ?
303 /* chip select are bits 14 & 13 */
304 ((block_page >> 1) & 3) :
305 edac_mc_find_csrow_by_page(mci, block_page);
306 edac_mc_handle_ue(mci, block_page, 0, row,
307 "e752x UE from Read");
308 }
309 if (error_one & 0x0404) {
310 error_2b = scrb_add;
311 /* convert to 4k address */
312 block_page = error_2b >> (PAGE_SHIFT - 4);
313 row = pvt->mc_symmetric ?
314 /* chip select are bits 14 & 13 */
315 ((block_page >> 1) & 3) :
316 edac_mc_find_csrow_by_page(mci, block_page);
317 edac_mc_handle_ue(mci, block_page, 0, row,
318 "e752x UE from Scruber");
319 }
320}
321
322static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
323 u32 ded_add, u32 scrb_add, int *error_found, int handle_error)
324{
325 *error_found = 1;
326
327 if (handle_error)
328 do_process_ue(mci, error_one, ded_add, scrb_add);
329}
330
331static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
332 int *error_found, int handle_error)
333{
334 *error_found = 1;
335
336 if (!handle_error)
337 return;
338
339 debugf3("MC: " __FILE__ ": %s()\n", __func__);
340 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
341}
342
343static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
344 u32 retry_add)
345{
346 u32 error_1b, page;
347 int row;
348 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
349
350 error_1b = retry_add;
351 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
352 row = pvt->mc_symmetric ?
353 ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
354 edac_mc_find_csrow_by_page(mci, page);
355 printk(KERN_WARNING
356 "MC%d: CE page 0x%lx, row %d : Memory read retry\n",
357 mci->mc_idx, (long unsigned int) page, row);
358}
359
360static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
361 u32 retry_add, int *error_found, int handle_error)
362{
363 *error_found = 1;
364
365 if (handle_error)
366 do_process_ded_retry(mci, error, retry_add);
367}
368
369static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
370 int *error_found, int handle_error)
371{
372 *error_found = 1;
373
374 if (handle_error)
375 printk(KERN_WARNING "MC%d: Memory threshold CE\n",
376 mci->mc_idx);
377}
378
379static char *global_message[11] = {
380 "PCI Express C1", "PCI Express C", "PCI Express B1",
381 "PCI Express B", "PCI Express A1", "PCI Express A",
382 "DMA Controler", "HUB Interface", "System Bus",
383 "DRAM Controler", "Internal Buffer"
384};
385
386static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
387
388static void do_global_error(int fatal, u32 errors)
389{
390 int i;
391
392 for (i = 0; i < 11; i++) {
393 if (errors & (1 << i))
394 printk(KERN_WARNING "%sError %s\n",
395 fatal_message[fatal], global_message[i]);
396 }
397}
398
399static inline void global_error(int fatal, u32 errors, int *error_found,
400 int handle_error)
401{
402 *error_found = 1;
403
404 if (handle_error)
405 do_global_error(fatal, errors);
406}
407
408static char *hub_message[7] = {
409 "HI Address or Command Parity", "HI Illegal Access",
410 "HI Internal Parity", "Out of Range Access",
411 "HI Data Parity", "Enhanced Config Access",
412 "Hub Interface Target Abort"
413};
414
415static void do_hub_error(int fatal, u8 errors)
416{
417 int i;
418
419 for (i = 0; i < 7; i++) {
420 if (errors & (1 << i))
421 printk(KERN_WARNING "%sError %s\n",
422 fatal_message[fatal], hub_message[i]);
423 }
424}
425
426static inline void hub_error(int fatal, u8 errors, int *error_found,
427 int handle_error)
428{
429 *error_found = 1;
430
431 if (handle_error)
432 do_hub_error(fatal, errors);
433}
434
435static char *membuf_message[4] = {
436 "Internal PMWB to DRAM parity",
437 "Internal PMWB to System Bus Parity",
438 "Internal System Bus or IO to PMWB Parity",
439 "Internal DRAM to PMWB Parity"
440};
441
442static void do_membuf_error(u8 errors)
443{
444 int i;
445
446 for (i = 0; i < 4; i++) {
447 if (errors & (1 << i))
448 printk(KERN_WARNING "Non-Fatal Error %s\n",
449 membuf_message[i]);
450 }
451}
452
453static inline void membuf_error(u8 errors, int *error_found, int handle_error)
454{
455 *error_found = 1;
456
457 if (handle_error)
458 do_membuf_error(errors);
459}
460
461#if 0
462char *sysbus_message[10] = {
463 "Addr or Request Parity",
464 "Data Strobe Glitch",
465 "Addr Strobe Glitch",
466 "Data Parity",
467 "Addr Above TOM",
468 "Non DRAM Lock Error",
469 "MCERR", "BINIT",
470 "Memory Parity",
471 "IO Subsystem Parity"
472};
473#endif /* 0 */
474
475static void do_sysbus_error(int fatal, u32 errors)
476{
477 int i;
478
479 for (i = 0; i < 10; i++) {
480 if (errors & (1 << i))
481 printk(KERN_WARNING "%sError System Bus %s\n",
482 fatal_message[fatal], global_message[i]);
483 }
484}
485
486static inline void sysbus_error(int fatal, u32 errors, int *error_found,
487 int handle_error)
488{
489 *error_found = 1;
490
491 if (handle_error)
492 do_sysbus_error(fatal, errors);
493}
494
495static void e752x_check_hub_interface (struct e752x_error_info *info,
496 int *error_found, int handle_error)
497{
498 u8 stat8;
499
500 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
501 stat8 = info->hi_ferr;
502 if(stat8 & 0x7f) { /* Error, so process */
503 stat8 &= 0x7f;
504 if(stat8 & 0x2b)
505 hub_error(1, stat8 & 0x2b, error_found, handle_error);
506 if(stat8 & 0x54)
507 hub_error(0, stat8 & 0x54, error_found, handle_error);
508 }
509 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
510 stat8 = info->hi_nerr;
511 if(stat8 & 0x7f) { /* Error, so process */
512 stat8 &= 0x7f;
513 if (stat8 & 0x2b)
514 hub_error(1, stat8 & 0x2b, error_found, handle_error);
515 if(stat8 & 0x54)
516 hub_error(0, stat8 & 0x54, error_found, handle_error);
517 }
518}
519
520static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found,
521 int handle_error)
522{
523 u32 stat32, error32;
524
525 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
526 stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
527
528 if (stat32 == 0)
529 return; /* no errors */
530
531 error32 = (stat32 >> 16) & 0x3ff;
532 stat32 = stat32 & 0x3ff;
533 if(stat32 & 0x083)
534 sysbus_error(1, stat32 & 0x083, error_found, handle_error);
535 if(stat32 & 0x37c)
536 sysbus_error(0, stat32 & 0x37c, error_found, handle_error);
537 if(error32 & 0x083)
538 sysbus_error(1, error32 & 0x083, error_found, handle_error);
539 if(error32 & 0x37c)
540 sysbus_error(0, error32 & 0x37c, error_found, handle_error);
541}
542
543static void e752x_check_membuf (struct e752x_error_info *info, int *error_found,
544 int handle_error)
545{
546 u8 stat8;
547
548 stat8 = info->buf_ferr;
549 if (stat8 & 0x0f) { /* Error, so process */
550 stat8 &= 0x0f;
551 membuf_error(stat8, error_found, handle_error);
552 }
553 stat8 = info->buf_nerr;
554 if (stat8 & 0x0f) { /* Error, so process */
555 stat8 &= 0x0f;
556 membuf_error(stat8, error_found, handle_error);
557 }
558}
559
560static void e752x_check_dram (struct mem_ctl_info *mci,
561 struct e752x_error_info *info, int *error_found, int handle_error)
562{
563 u16 error_one, error_next;
564
565 error_one = info->dram_ferr;
566 error_next = info->dram_nerr;
567
568 /* decode and report errors */
569 if(error_one & 0x0101) /* check first error correctable */
570 process_ce(mci, error_one, info->dram_sec1_add,
571 info->dram_sec1_syndrome, error_found,
572 handle_error);
573
574 if(error_next & 0x0101) /* check next error correctable */
575 process_ce(mci, error_next, info->dram_sec2_add,
576 info->dram_sec2_syndrome, error_found,
577 handle_error);
578
579 if(error_one & 0x4040)
580 process_ue_no_info_wr(mci, error_found, handle_error);
581
582 if(error_next & 0x4040)
583 process_ue_no_info_wr(mci, error_found, handle_error);
584
585 if(error_one & 0x2020)
586 process_ded_retry(mci, error_one, info->dram_retr_add,
587 error_found, handle_error);
588
589 if(error_next & 0x2020)
590 process_ded_retry(mci, error_next, info->dram_retr_add,
591 error_found, handle_error);
592
593 if(error_one & 0x0808)
594 process_threshold_ce(mci, error_one, error_found,
595 handle_error);
596
597 if(error_next & 0x0808)
598 process_threshold_ce(mci, error_next, error_found,
599 handle_error);
600
601 if(error_one & 0x0606)
602 process_ue(mci, error_one, info->dram_ded_add,
603 info->dram_scrb_add, error_found, handle_error);
604
605 if(error_next & 0x0606)
606 process_ue(mci, error_next, info->dram_ded_add,
607 info->dram_scrb_add, error_found, handle_error);
608}
609
610static void e752x_get_error_info (struct mem_ctl_info *mci,
611 struct e752x_error_info *info)
612{
613 struct pci_dev *dev;
614 struct e752x_pvt *pvt;
615
616 memset(info, 0, sizeof(*info));
617 pvt = (struct e752x_pvt *) mci->pvt_info;
618 dev = pvt->dev_d0f1;
619
620 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
621
622 if (info->ferr_global) {
623 pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr);
624 pci_read_config_word(dev, E752X_SYSBUS_FERR,
625 &info->sysbus_ferr);
626 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
627 pci_read_config_word(dev, E752X_DRAM_FERR,
628 &info->dram_ferr);
629 pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
630 &info->dram_sec1_add);
631 pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
632 &info->dram_sec1_syndrome);
633 pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
634 &info->dram_ded_add);
635 pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
636 &info->dram_scrb_add);
637 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
638 &info->dram_retr_add);
639
640 if (info->hi_ferr & 0x7f)
641 pci_write_config_byte(dev, E752X_HI_FERR,
642 info->hi_ferr);
643
644 if (info->sysbus_ferr)
645 pci_write_config_word(dev, E752X_SYSBUS_FERR,
646 info->sysbus_ferr);
647
648 if (info->buf_ferr & 0x0f)
649 pci_write_config_byte(dev, E752X_BUF_FERR,
650 info->buf_ferr);
651
652 if (info->dram_ferr)
653 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
654 info->dram_ferr, info->dram_ferr);
655
656 pci_write_config_dword(dev, E752X_FERR_GLOBAL,
657 info->ferr_global);
658 }
659
660 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
661
662 if (info->nerr_global) {
663 pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr);
664 pci_read_config_word(dev, E752X_SYSBUS_NERR,
665 &info->sysbus_nerr);
666 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
667 pci_read_config_word(dev, E752X_DRAM_NERR,
668 &info->dram_nerr);
669 pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
670 &info->dram_sec2_add);
671 pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
672 &info->dram_sec2_syndrome);
673
674 if (info->hi_nerr & 0x7f)
675 pci_write_config_byte(dev, E752X_HI_NERR,
676 info->hi_nerr);
677
678 if (info->sysbus_nerr)
679 pci_write_config_word(dev, E752X_SYSBUS_NERR,
680 info->sysbus_nerr);
681
682 if (info->buf_nerr & 0x0f)
683 pci_write_config_byte(dev, E752X_BUF_NERR,
684 info->buf_nerr);
685
686 if (info->dram_nerr)
687 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
688 info->dram_nerr, info->dram_nerr);
689
690 pci_write_config_dword(dev, E752X_NERR_GLOBAL,
691 info->nerr_global);
692 }
693}
694
695static int e752x_process_error_info (struct mem_ctl_info *mci,
696 struct e752x_error_info *info, int handle_errors)
697{
698 u32 error32, stat32;
699 int error_found;
700
701 error_found = 0;
702 error32 = (info->ferr_global >> 18) & 0x3ff;
703 stat32 = (info->ferr_global >> 4) & 0x7ff;
704
705 if (error32)
706 global_error(1, error32, &error_found, handle_errors);
707
708 if (stat32)
709 global_error(0, stat32, &error_found, handle_errors);
710
711 error32 = (info->nerr_global >> 18) & 0x3ff;
712 stat32 = (info->nerr_global >> 4) & 0x7ff;
713
714 if (error32)
715 global_error(1, error32, &error_found, handle_errors);
716
717 if (stat32)
718 global_error(0, stat32, &error_found, handle_errors);
719
720 e752x_check_hub_interface(info, &error_found, handle_errors);
721 e752x_check_sysbus(info, &error_found, handle_errors);
722 e752x_check_membuf(info, &error_found, handle_errors);
723 e752x_check_dram(mci, info, &error_found, handle_errors);
724 return error_found;
725}
726
727static void e752x_check(struct mem_ctl_info *mci)
728{
729 struct e752x_error_info info;
730 debugf3("MC: " __FILE__ ": %s()\n", __func__);
731 e752x_get_error_info(mci, &info);
732 e752x_process_error_info(mci, &info, 1);
733}
734
735static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
736{
737 int rc = -ENODEV;
738 int index;
739 u16 pci_data, stat;
740 u32 stat32;
741 u16 stat16;
742 u8 stat8;
743 struct mem_ctl_info *mci = NULL;
744 struct e752x_pvt *pvt = NULL;
745 u16 ddrcsr;
746 u32 drc;
747 int drc_chan; /* Number of channels 0=1chan,1=2chan */
748 int drc_drbg; /* DRB granularity 0=64mb,1=128mb */
749 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
750 u32 dra;
751 unsigned long last_cumul_size;
752 struct pci_dev *pres_dev;
753 struct pci_dev *dev = NULL;
754
755 debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
756 debugf0("Starting Probe1\n");
757
758 /* enable device 0 function 1 */
759 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
760 stat8 |= (1 << 5);
761 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
762
763 /* need to find out the number of channels */
764 pci_read_config_dword(pdev, E752X_DRC, &drc);
765 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
766 /* FIXME: should check >>12 or 0xf, true for all? */
767 /* Dual channel = 1, Single channel = 0 */
768 drc_chan = (((ddrcsr >> 12) & 3) == 3);
769 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
770 drc_ddim = (drc >> 20) & 0x3;
771
772 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
773
774 if (mci == NULL) {
775 rc = -ENOMEM;
776 goto fail;
777 }
778
779 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
780
781 mci->mtype_cap = MEM_FLAG_RDDR;
782 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
783 EDAC_FLAG_S4ECD4ED;
784 /* FIXME - what if different memory types are in different csrows? */
785 mci->mod_name = BS_MOD_STR;
786 mci->mod_ver = "$Revision: 1.5.2.11 $";
787 mci->pdev = pdev;
788
789 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
790 pvt = (struct e752x_pvt *) mci->pvt_info;
791 pvt->dev_info = &e752x_devs[dev_idx];
792 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
793 pvt->dev_info->err_dev,
794 pvt->bridge_ck);
795 if (pvt->bridge_ck == NULL)
796 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
797 PCI_DEVFN(0, 1));
798 if (pvt->bridge_ck == NULL) {
799 printk(KERN_ERR "MC: error reporting device not found:"
800 "vendor %x device 0x%x (broken BIOS?)\n",
801 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
802 goto fail;
803 }
804 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
805
806 debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
807 mci->ctl_name = pvt->dev_info->ctl_name;
808 mci->edac_check = e752x_check;
809 mci->ctl_page_to_phys = ctl_page_to_phys;
810
811 /* find out the device types */
812 pci_read_config_dword(pdev, E752X_DRA, &dra);
813
814 /*
815 * The dram row boundary (DRB) reg values are boundary address for
816 * each DRAM row with a granularity of 64 or 128MB (single/dual
817 * channel operation). DRB regs are cumulative; therefore DRB7 will
818 * contain the total memory contained in all eight rows.
819 */
820 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
821 u8 value;
822 u32 cumul_size;
823 /* mem_dev 0=x8, 1=x4 */
824 int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
825 struct csrow_info *csrow = &mci->csrows[index];
826
827 mem_dev = (mem_dev == 2);
828 pci_read_config_byte(mci->pdev, E752X_DRB + index, &value);
829 /* convert a 128 or 64 MiB DRB to a page size. */
830 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
831 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
832 __func__, index, cumul_size);
833 if (cumul_size == last_cumul_size)
834 continue; /* not populated */
835
836 csrow->first_page = last_cumul_size;
837 csrow->last_page = cumul_size - 1;
838 csrow->nr_pages = cumul_size - last_cumul_size;
839 last_cumul_size = cumul_size;
840 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
841 csrow->mtype = MEM_RDDR; /* only one type supported */
842 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
843
844 /*
845 * if single channel or x8 devices then SECDED
846 * if dual channel and x4 then S4ECD4ED
847 */
848 if (drc_ddim) {
849 if (drc_chan && mem_dev) {
850 csrow->edac_mode = EDAC_S4ECD4ED;
851 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
852 } else {
853 csrow->edac_mode = EDAC_SECDED;
854 mci->edac_cap |= EDAC_FLAG_SECDED;
855 }
856 } else
857 csrow->edac_mode = EDAC_NONE;
858 }
859
860 /* Fill in the memory map table */
861 {
862 u8 value;
863 u8 last = 0;
864 u8 row = 0;
865 for (index = 0; index < 8; index += 2) {
866
867 pci_read_config_byte(mci->pdev, E752X_DRB + index,
868 &value);
869 /* test if there is a dimm in this slot */
870 if (value == last) {
871 /* no dimm in the slot, so flag it as empty */
872 pvt->map[index] = 0xff;
873 pvt->map[index + 1] = 0xff;
874 } else { /* there is a dimm in the slot */
875 pvt->map[index] = row;
876 row++;
877 last = value;
878 /* test the next value to see if the dimm is
879 double sided */
880 pci_read_config_byte(mci->pdev,
881 E752X_DRB + index + 1,
882 &value);
883 pvt->map[index + 1] = (value == last) ?
884 0xff : /* the dimm is single sided,
885 so flag as empty */
886 row; /* this is a double sided dimm
887 to save the next row # */
888 row++;
889 last = value;
890 }
891 }
892 }
893
894 /* set the map type. 1 = normal, 0 = reversed */
895 pci_read_config_byte(mci->pdev, E752X_DRM, &stat8);
896 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
897
898 mci->edac_cap |= EDAC_FLAG_NONE;
899
900 debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
901 __func__);
902 /* load the top of low memory, remap base, and remap limit vars */
903 pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data);
904 pvt->tolm = ((u32) pci_data) << 4;
905 pci_read_config_word(mci->pdev, E752X_REMAPBASE, &pci_data);
906 pvt->remapbase = ((u32) pci_data) << 14;
907 pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data);
908 pvt->remaplimit = ((u32) pci_data) << 14;
909 printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
910 pvt->remapbase, pvt->remaplimit);
911
912 if (edac_mc_add_mc(mci)) {
913 debugf3("MC: " __FILE__
914 ": %s(): failed edac_mc_add_mc()\n",
915 __func__);
916 goto fail;
917 }
918
919 /* Walk through the PCI table and clear errors */
920 switch (dev_idx) {
921 case E7520:
922 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
923 PCI_DEVICE_ID_INTEL_7520_0, NULL);
924 break;
925 case E7525:
926 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
927 PCI_DEVICE_ID_INTEL_7525_0, NULL);
928 break;
929 case E7320:
930 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
931 PCI_DEVICE_ID_INTEL_7320_0, NULL);
932 break;
933 }
934
935
936 pvt->dev_d0f0 = dev;
937 for (pres_dev = dev;
938 ((struct pci_dev *) pres_dev->global_list.next != dev);
939 pres_dev = (struct pci_dev *) pres_dev->global_list.next) {
940 pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32);
941 stat = (u16) (stat32 >> 16);
942 /* clear any error bits */
943 if (stat32 & ((1 << 6) + (1 << 8)))
944 pci_write_config_word(pres_dev, PCI_STATUS, stat);
945 }
946 /* find the error reporting device and clear errors */
947 dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
948 /* Turn off error disable & SMI in case the BIOS turned it on */
949 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
950 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
951 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
952 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
953 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
954 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
955 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
956 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
957 /* clear other MCH errors */
958 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32);
959 pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32);
960 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32);
961 pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32);
962 pci_read_config_byte(dev, E752X_HI_FERR, &stat8);
963 pci_write_config_byte(dev, E752X_HI_FERR, stat8);
964 pci_read_config_byte(dev, E752X_HI_NERR, &stat8);
965 pci_write_config_byte(dev, E752X_HI_NERR, stat8);
966 pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32);
967 pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32);
968 pci_read_config_byte(dev, E752X_BUF_FERR, &stat8);
969 pci_write_config_byte(dev, E752X_BUF_FERR, stat8);
970 pci_read_config_byte(dev, E752X_BUF_NERR, &stat8);
971 pci_write_config_byte(dev, E752X_BUF_NERR, stat8);
972 pci_read_config_word(dev, E752X_DRAM_FERR, &stat16);
973 pci_write_config_word(dev, E752X_DRAM_FERR, stat16);
974 pci_read_config_word(dev, E752X_DRAM_NERR, &stat16);
975 pci_write_config_word(dev, E752X_DRAM_NERR, stat16);
976
977 /* get this far and it's successful */
978 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
979 return 0;
980
981fail:
982 if (mci) {
983 if (pvt->dev_d0f0)
984 pci_dev_put(pvt->dev_d0f0);
985 if (pvt->dev_d0f1)
986 pci_dev_put(pvt->dev_d0f1);
987 if (pvt->bridge_ck)
988 pci_dev_put(pvt->bridge_ck);
989 edac_mc_free(mci);
990 }
991 return rc;
992}
993
994/* returns count (>= 0), or negative on error */
995static int __devinit e752x_init_one(struct pci_dev *pdev,
996 const struct pci_device_id *ent)
997{
998 debugf0("MC: " __FILE__ ": %s()\n", __func__);
999
1000 /* wake up and enable device */
1001 if(pci_enable_device(pdev) < 0)
1002 return -EIO;
1003 return e752x_probe1(pdev, ent->driver_data);
1004}
1005
1006
1007static void __devexit e752x_remove_one(struct pci_dev *pdev)
1008{
1009 struct mem_ctl_info *mci;
1010 struct e752x_pvt *pvt;
1011
1012 debugf0(__FILE__ ": %s()\n", __func__);
1013
1014 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
1015 return;
1016
1017 if (edac_mc_del_mc(mci))
1018 return;
1019
1020 pvt = (struct e752x_pvt *) mci->pvt_info;
1021 pci_dev_put(pvt->dev_d0f0);
1022 pci_dev_put(pvt->dev_d0f1);
1023 pci_dev_put(pvt->bridge_ck);
1024 edac_mc_free(mci);
1025}
1026
1027
1028static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
1029 {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1030 E7520},
1031 {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1032 E7525},
1033 {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1034 E7320},
1035 {0,} /* 0 terminated list. */
1036};
1037
1038MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1039
1040
1041static struct pci_driver e752x_driver = {
1042 name: BS_MOD_STR,
1043 probe: e752x_init_one,
1044 remove: __devexit_p(e752x_remove_one),
1045 id_table: e752x_pci_tbl,
1046};
1047
1048
1049static int __init e752x_init(void)
1050{
1051 int pci_rc;
1052
1053 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1054 pci_rc = pci_register_driver(&e752x_driver);
1055 return (pci_rc < 0) ? pci_rc : 0;
1056}
1057
1058
1059static void __exit e752x_exit(void)
1060{
1061 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1062 pci_unregister_driver(&e752x_driver);
1063}
1064
1065
1066module_init(e752x_init);
1067module_exit(e752x_exit);
1068
1069MODULE_LICENSE("GPL");
1070MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1071MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
new file mode 100644
index 00000000000..d5e320dfc66
--- /dev/null
+++ b/drivers/edac/e7xxx_edac.c
@@ -0,0 +1,558 @@
1/*
2 * Intel e7xxx Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * See "enum e7xxx_chips" below for supported chipsets
8 *
9 * Written by Thayne Harbaugh
10 * Based on work by Dan Hollis <goemon at anime dot net> and others.
11 * http://www.anime.net/~goemon/linux-ecc/
12 *
13 * Contributors:
14 * Eric Biederman (Linux Networx)
15 * Tom Zimmerman (Linux Networx)
16 * Jim Garlick (Lawrence Livermore National Labs)
17 * Dave Peterson (Lawrence Livermore National Labs)
18 * That One Guy (Some other place)
19 * Wang Zhenyu (intel.com)
20 *
21 * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
22 *
23 */
24
25
26#include <linux/config.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include "edac_mc.h"
33
34
35#ifndef PCI_DEVICE_ID_INTEL_7205_0
36#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
37#endif /* PCI_DEVICE_ID_INTEL_7205_0 */
38
39#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
40#define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551
41#endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
42
43#ifndef PCI_DEVICE_ID_INTEL_7500_0
44#define PCI_DEVICE_ID_INTEL_7500_0 0x2540
45#endif /* PCI_DEVICE_ID_INTEL_7500_0 */
46
47#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
48#define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541
49#endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
50
51#ifndef PCI_DEVICE_ID_INTEL_7501_0
52#define PCI_DEVICE_ID_INTEL_7501_0 0x254c
53#endif /* PCI_DEVICE_ID_INTEL_7501_0 */
54
55#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
56#define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541
57#endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
58
59#ifndef PCI_DEVICE_ID_INTEL_7505_0
60#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
61#endif /* PCI_DEVICE_ID_INTEL_7505_0 */
62
63#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
64#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
65#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
66
67
68#define E7XXX_NR_CSROWS 8 /* number of csrows */
69#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
70
71
72/* E7XXX register addresses - device 0 function 0 */
73#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
74#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
75 /*
76 * 31 Device width row 7 0=x8 1=x4
77 * 27 Device width row 6
78 * 23 Device width row 5
79 * 19 Device width row 4
80 * 15 Device width row 3
81 * 11 Device width row 2
82 * 7 Device width row 1
83 * 3 Device width row 0
84 */
85#define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */
86 /*
87 * 22 Number channels 0=1,1=2
88 * 19:18 DRB Granularity 32/64MB
89 */
90#define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
91#define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
92#define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
93
94/* E7XXX register addresses - device 0 function 1 */
95#define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */
96#define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */
97#define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */
98 /* error address register (32b) */
99 /*
100 * 31:28 Reserved
101 * 27:6 CE address (4k block 33:12)
102 * 5:0 Reserved
103 */
104#define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */
105 /* error address register (32b) */
106 /*
107 * 31:28 Reserved
108 * 27:6 CE address (4k block 33:12)
109 * 5:0 Reserved
110 */
111#define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */
112 /* error syndrome register (16b) */
113
114enum e7xxx_chips {
115 E7500 = 0,
116 E7501,
117 E7505,
118 E7205,
119};
120
121
122struct e7xxx_pvt {
123 struct pci_dev *bridge_ck;
124 u32 tolm;
125 u32 remapbase;
126 u32 remaplimit;
127 const struct e7xxx_dev_info *dev_info;
128};
129
130
131struct e7xxx_dev_info {
132 u16 err_dev;
133 const char *ctl_name;
134};
135
136
137struct e7xxx_error_info {
138 u8 dram_ferr;
139 u8 dram_nerr;
140 u32 dram_celog_add;
141 u16 dram_celog_syndrome;
142 u32 dram_uelog_add;
143};
144
145static const struct e7xxx_dev_info e7xxx_devs[] = {
146 [E7500] = {
147 .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
148 .ctl_name = "E7500"},
149 [E7501] = {
150 .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
151 .ctl_name = "E7501"},
152 [E7505] = {
153 .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
154 .ctl_name = "E7505"},
155 [E7205] = {
156 .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
157 .ctl_name = "E7205"},
158};
159
160
161/* FIXME - is this valid for both SECDED and S4ECD4ED? */
162static inline int e7xxx_find_channel(u16 syndrome)
163{
164 debugf3("MC: " __FILE__ ": %s()\n", __func__);
165
166 if ((syndrome & 0xff00) == 0)
167 return 0;
168 if ((syndrome & 0x00ff) == 0)
169 return 1;
170 if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
171 return 0;
172 return 1;
173}
174
175
176static unsigned long
177ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page)
178{
179 u32 remap;
180 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
181
182 debugf3("MC: " __FILE__ ": %s()\n", __func__);
183
184 if ((page < pvt->tolm) ||
185 ((page >= 0x100000) && (page < pvt->remapbase)))
186 return page;
187 remap = (page - pvt->tolm) + pvt->remapbase;
188 if (remap < pvt->remaplimit)
189 return remap;
190 printk(KERN_ERR "Invalid page %lx - out of range\n", page);
191 return pvt->tolm - 1;
192}
193
194
195static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
196{
197 u32 error_1b, page;
198 u16 syndrome;
199 int row;
200 int channel;
201
202 debugf3("MC: " __FILE__ ": %s()\n", __func__);
203
204 /* read the error address */
205 error_1b = info->dram_celog_add;
206 /* FIXME - should use PAGE_SHIFT */
207 page = error_1b >> 6; /* convert the address to 4k page */
208 /* read the syndrome */
209 syndrome = info->dram_celog_syndrome;
210 /* FIXME - check for -1 */
211 row = edac_mc_find_csrow_by_page(mci, page);
212 /* convert syndrome to channel */
213 channel = e7xxx_find_channel(syndrome);
214 edac_mc_handle_ce(mci, page, 0, syndrome, row, channel,
215 "e7xxx CE");
216}
217
218
219static void process_ce_no_info(struct mem_ctl_info *mci)
220{
221 debugf3("MC: " __FILE__ ": %s()\n", __func__);
222 edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
223}
224
225
226static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
227{
228 u32 error_2b, block_page;
229 int row;
230
231 debugf3("MC: " __FILE__ ": %s()\n", __func__);
232
233 /* read the error address */
234 error_2b = info->dram_uelog_add;
235 /* FIXME - should use PAGE_SHIFT */
236 block_page = error_2b >> 6; /* convert to 4k address */
237 row = edac_mc_find_csrow_by_page(mci, block_page);
238 edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
239}
240
241
242static void process_ue_no_info(struct mem_ctl_info *mci)
243{
244 debugf3("MC: " __FILE__ ": %s()\n", __func__);
245 edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
246}
247
248
249static void e7xxx_get_error_info (struct mem_ctl_info *mci,
250 struct e7xxx_error_info *info)
251{
252 struct e7xxx_pvt *pvt;
253
254 pvt = (struct e7xxx_pvt *) mci->pvt_info;
255 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
256 &info->dram_ferr);
257 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
258 &info->dram_nerr);
259
260 if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
261 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
262 &info->dram_celog_add);
263 pci_read_config_word(pvt->bridge_ck,
264 E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome);
265 }
266
267 if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
268 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
269 &info->dram_uelog_add);
270
271 if (info->dram_ferr & 3)
272 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03,
273 0x03);
274
275 if (info->dram_nerr & 3)
276 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03,
277 0x03);
278}
279
280
281static int e7xxx_process_error_info (struct mem_ctl_info *mci,
282 struct e7xxx_error_info *info, int handle_errors)
283{
284 int error_found;
285
286 error_found = 0;
287
288 /* decode and report errors */
289 if (info->dram_ferr & 1) { /* check first error correctable */
290 error_found = 1;
291
292 if (handle_errors)
293 process_ce(mci, info);
294 }
295
296 if (info->dram_ferr & 2) { /* check first error uncorrectable */
297 error_found = 1;
298
299 if (handle_errors)
300 process_ue(mci, info);
301 }
302
303 if (info->dram_nerr & 1) { /* check next error correctable */
304 error_found = 1;
305
306 if (handle_errors) {
307 if (info->dram_ferr & 1)
308 process_ce_no_info(mci);
309 else
310 process_ce(mci, info);
311 }
312 }
313
314 if (info->dram_nerr & 2) { /* check next error uncorrectable */
315 error_found = 1;
316
317 if (handle_errors) {
318 if (info->dram_ferr & 2)
319 process_ue_no_info(mci);
320 else
321 process_ue(mci, info);
322 }
323 }
324
325 return error_found;
326}
327
328
329static void e7xxx_check(struct mem_ctl_info *mci)
330{
331 struct e7xxx_error_info info;
332
333 debugf3("MC: " __FILE__ ": %s()\n", __func__);
334 e7xxx_get_error_info(mci, &info);
335 e7xxx_process_error_info(mci, &info, 1);
336}
337
338
339static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
340{
341 int rc = -ENODEV;
342 int index;
343 u16 pci_data;
344 struct mem_ctl_info *mci = NULL;
345 struct e7xxx_pvt *pvt = NULL;
346 u32 drc;
347 int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */
348 int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */
349 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
350 u32 dra;
351 unsigned long last_cumul_size;
352
353
354 debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
355
356 /* need to find out the number of channels */
357 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
358 /* only e7501 can be single channel */
359 if (dev_idx == E7501) {
360 drc_chan = ((drc >> 22) & 0x1);
361 drc_drbg = (drc >> 18) & 0x3;
362 }
363 drc_ddim = (drc >> 20) & 0x3;
364
365 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
366
367 if (mci == NULL) {
368 rc = -ENOMEM;
369 goto fail;
370 }
371
372 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
373
374 mci->mtype_cap = MEM_FLAG_RDDR;
375 mci->edac_ctl_cap =
376 EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED;
377 /* FIXME - what if different memory types are in different csrows? */
378 mci->mod_name = BS_MOD_STR;
379 mci->mod_ver = "$Revision: 1.5.2.9 $";
380 mci->pdev = pdev;
381
382 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
383 pvt = (struct e7xxx_pvt *) mci->pvt_info;
384 pvt->dev_info = &e7xxx_devs[dev_idx];
385 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
386 pvt->dev_info->err_dev,
387 pvt->bridge_ck);
388 if (!pvt->bridge_ck) {
389 printk(KERN_ERR
390 "MC: error reporting device not found:"
391 "vendor %x device 0x%x (broken BIOS?)\n",
392 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
393 goto fail;
394 }
395
396 debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
397 mci->ctl_name = pvt->dev_info->ctl_name;
398
399 mci->edac_check = e7xxx_check;
400 mci->ctl_page_to_phys = ctl_page_to_phys;
401
402 /* find out the device types */
403 pci_read_config_dword(pdev, E7XXX_DRA, &dra);
404
405 /*
406 * The dram row boundary (DRB) reg values are boundary address
407 * for each DRAM row with a granularity of 32 or 64MB (single/dual
408 * channel operation). DRB regs are cumulative; therefore DRB7 will
409 * contain the total memory contained in all eight rows.
410 */
411 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
412 u8 value;
413 u32 cumul_size;
414 /* mem_dev 0=x8, 1=x4 */
415 int mem_dev = (dra >> (index * 4 + 3)) & 0x1;
416 struct csrow_info *csrow = &mci->csrows[index];
417
418 pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value);
419 /* convert a 64 or 32 MiB DRB to a page size. */
420 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
421 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
422 __func__, index, cumul_size);
423 if (cumul_size == last_cumul_size)
424 continue; /* not populated */
425
426 csrow->first_page = last_cumul_size;
427 csrow->last_page = cumul_size - 1;
428 csrow->nr_pages = cumul_size - last_cumul_size;
429 last_cumul_size = cumul_size;
430 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
431 csrow->mtype = MEM_RDDR; /* only one type supported */
432 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
433
434 /*
435 * if single channel or x8 devices then SECDED
436 * if dual channel and x4 then S4ECD4ED
437 */
438 if (drc_ddim) {
439 if (drc_chan && mem_dev) {
440 csrow->edac_mode = EDAC_S4ECD4ED;
441 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
442 } else {
443 csrow->edac_mode = EDAC_SECDED;
444 mci->edac_cap |= EDAC_FLAG_SECDED;
445 }
446 } else
447 csrow->edac_mode = EDAC_NONE;
448 }
449
450 mci->edac_cap |= EDAC_FLAG_NONE;
451
452 debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
453 __func__);
454 /* load the top of low memory, remap base, and remap limit vars */
455 pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data);
456 pvt->tolm = ((u32) pci_data) << 4;
457 pci_read_config_word(mci->pdev, E7XXX_REMAPBASE, &pci_data);
458 pvt->remapbase = ((u32) pci_data) << 14;
459 pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data);
460 pvt->remaplimit = ((u32) pci_data) << 14;
461 printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
462 pvt->remapbase, pvt->remaplimit);
463
464 /* clear any pending errors, or initial state bits */
465 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
466 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
467
468 if (edac_mc_add_mc(mci) != 0) {
469 debugf3("MC: " __FILE__
470 ": %s(): failed edac_mc_add_mc()\n",
471 __func__);
472 goto fail;
473 }
474
475 /* get this far and it's successful */
476 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
477 return 0;
478
479fail:
480 if (mci != NULL) {
481 if(pvt != NULL && pvt->bridge_ck)
482 pci_dev_put(pvt->bridge_ck);
483 edac_mc_free(mci);
484 }
485
486 return rc;
487}
488
489/* returns count (>= 0), or negative on error */
490static int __devinit
491e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
492{
493 debugf0("MC: " __FILE__ ": %s()\n", __func__);
494
495 /* wake up and enable device */
496 return pci_enable_device(pdev) ?
497 -EIO : e7xxx_probe1(pdev, ent->driver_data);
498}
499
500
501static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
502{
503 struct mem_ctl_info *mci;
504 struct e7xxx_pvt *pvt;
505
506 debugf0(__FILE__ ": %s()\n", __func__);
507
508 if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) &&
509 edac_mc_del_mc(mci)) {
510 pvt = (struct e7xxx_pvt *) mci->pvt_info;
511 pci_dev_put(pvt->bridge_ck);
512 edac_mc_free(mci);
513 }
514}
515
516
517static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
518 {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
519 E7205},
520 {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
521 E7500},
522 {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
523 E7501},
524 {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
525 E7505},
526 {0,} /* 0 terminated list. */
527};
528
529MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
530
531
532static struct pci_driver e7xxx_driver = {
533 .name = BS_MOD_STR,
534 .probe = e7xxx_init_one,
535 .remove = __devexit_p(e7xxx_remove_one),
536 .id_table = e7xxx_pci_tbl,
537};
538
539
540static int __init e7xxx_init(void)
541{
542 return pci_register_driver(&e7xxx_driver);
543}
544
545
546static void __exit e7xxx_exit(void)
547{
548 pci_unregister_driver(&e7xxx_driver);
549}
550
551module_init(e7xxx_init);
552module_exit(e7xxx_exit);
553
554
555MODULE_LICENSE("GPL");
556MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
557 "Based on.work by Dan Hollis et al");
558MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
new file mode 100644
index 00000000000..4be9bd0a126
--- /dev/null
+++ b/drivers/edac/edac_mc.c
@@ -0,0 +1,2209 @@
1/*
2 * edac_mc kernel module
3 * (C) 2005 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
15
16#include <linux/config.h>
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/proc_fs.h>
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/sysctl.h>
25#include <linux/highmem.h>
26#include <linux/timer.h>
27#include <linux/slab.h>
28#include <linux/jiffies.h>
29#include <linux/spinlock.h>
30#include <linux/list.h>
31#include <linux/sysdev.h>
32#include <linux/ctype.h>
33
34#include <asm/uaccess.h>
35#include <asm/page.h>
36#include <asm/edac.h>
37
38#include "edac_mc.h"
39
40#define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__
41
42#ifdef CONFIG_EDAC_DEBUG
43/* Values of 0 to 4 will generate output */
44int edac_debug_level = 1;
45EXPORT_SYMBOL(edac_debug_level);
46#endif
47
48/* EDAC Controls, setable by module parameter, and sysfs */
49static int log_ue = 1;
50static int log_ce = 1;
51static int panic_on_ue = 1;
52static int poll_msec = 1000;
53
54static int check_pci_parity = 0; /* default YES check PCI parity */
55static int panic_on_pci_parity; /* default no panic on PCI Parity */
56static atomic_t pci_parity_count = ATOMIC_INIT(0);
57
58/* lock to memory controller's control array */
59static DECLARE_MUTEX(mem_ctls_mutex);
60static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
61
62/* Structure of the whitelist and blacklist arrays */
63struct edac_pci_device_list {
64 unsigned int vendor; /* Vendor ID */
65 unsigned int device; /* Deviice ID */
66};
67
68
69#define MAX_LISTED_PCI_DEVICES 32
70
71/* List of PCI devices (vendor-id:device-id) that should be skipped */
72static struct edac_pci_device_list pci_blacklist[MAX_LISTED_PCI_DEVICES];
73static int pci_blacklist_count;
74
75/* List of PCI devices (vendor-id:device-id) that should be scanned */
76static struct edac_pci_device_list pci_whitelist[MAX_LISTED_PCI_DEVICES];
77static int pci_whitelist_count ;
78
79/* START sysfs data and methods */
80
81static const char *mem_types[] = {
82 [MEM_EMPTY] = "Empty",
83 [MEM_RESERVED] = "Reserved",
84 [MEM_UNKNOWN] = "Unknown",
85 [MEM_FPM] = "FPM",
86 [MEM_EDO] = "EDO",
87 [MEM_BEDO] = "BEDO",
88 [MEM_SDR] = "Unbuffered-SDR",
89 [MEM_RDR] = "Registered-SDR",
90 [MEM_DDR] = "Unbuffered-DDR",
91 [MEM_RDDR] = "Registered-DDR",
92 [MEM_RMBS] = "RMBS"
93};
94
95static const char *dev_types[] = {
96 [DEV_UNKNOWN] = "Unknown",
97 [DEV_X1] = "x1",
98 [DEV_X2] = "x2",
99 [DEV_X4] = "x4",
100 [DEV_X8] = "x8",
101 [DEV_X16] = "x16",
102 [DEV_X32] = "x32",
103 [DEV_X64] = "x64"
104};
105
106static const char *edac_caps[] = {
107 [EDAC_UNKNOWN] = "Unknown",
108 [EDAC_NONE] = "None",
109 [EDAC_RESERVED] = "Reserved",
110 [EDAC_PARITY] = "PARITY",
111 [EDAC_EC] = "EC",
112 [EDAC_SECDED] = "SECDED",
113 [EDAC_S2ECD2ED] = "S2ECD2ED",
114 [EDAC_S4ECD4ED] = "S4ECD4ED",
115 [EDAC_S8ECD8ED] = "S8ECD8ED",
116 [EDAC_S16ECD16ED] = "S16ECD16ED"
117};
118
119
120/* sysfs object: /sys/devices/system/edac */
121static struct sysdev_class edac_class = {
122 set_kset_name("edac"),
123};
124
125/* sysfs objects:
126 * /sys/devices/system/edac/mc
127 * /sys/devices/system/edac/pci
128 */
129static struct kobject edac_memctrl_kobj;
130static struct kobject edac_pci_kobj;
131
132/*
133 * /sys/devices/system/edac/mc;
134 * data structures and methods
135 */
136static ssize_t memctrl_string_show(void *ptr, char *buffer)
137{
138 char *value = (char*) ptr;
139 return sprintf(buffer, "%s\n", value);
140}
141
142static ssize_t memctrl_int_show(void *ptr, char *buffer)
143{
144 int *value = (int*) ptr;
145 return sprintf(buffer, "%d\n", *value);
146}
147
148static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
149{
150 int *value = (int*) ptr;
151
152 if (isdigit(*buffer))
153 *value = simple_strtoul(buffer, NULL, 0);
154
155 return count;
156}
157
158struct memctrl_dev_attribute {
159 struct attribute attr;
160 void *value;
161 ssize_t (*show)(void *,char *);
162 ssize_t (*store)(void *, const char *, size_t);
163};
164
165/* Set of show/store abstract level functions for memory control object */
166static ssize_t
167memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer)
168{
169 struct memctrl_dev_attribute *memctrl_dev;
170 memctrl_dev = (struct memctrl_dev_attribute*)attr;
171
172 if (memctrl_dev->show)
173 return memctrl_dev->show(memctrl_dev->value, buffer);
174 return -EIO;
175}
176
177static ssize_t
178memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
179 const char *buffer, size_t count)
180{
181 struct memctrl_dev_attribute *memctrl_dev;
182 memctrl_dev = (struct memctrl_dev_attribute*)attr;
183
184 if (memctrl_dev->store)
185 return memctrl_dev->store(memctrl_dev->value, buffer, count);
186 return -EIO;
187}
188
189static struct sysfs_ops memctrlfs_ops = {
190 .show = memctrl_dev_show,
191 .store = memctrl_dev_store
192};
193
194#define MEMCTRL_ATTR(_name,_mode,_show,_store) \
195struct memctrl_dev_attribute attr_##_name = { \
196 .attr = {.name = __stringify(_name), .mode = _mode }, \
197 .value = &_name, \
198 .show = _show, \
199 .store = _store, \
200};
201
202#define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store) \
203struct memctrl_dev_attribute attr_##_name = { \
204 .attr = {.name = __stringify(_name), .mode = _mode }, \
205 .value = _data, \
206 .show = _show, \
207 .store = _store, \
208};
209
210/* cwrow<id> attribute f*/
211MEMCTRL_STRING_ATTR(mc_version,EDAC_MC_VERSION,S_IRUGO,memctrl_string_show,NULL);
212
213/* csrow<id> control files */
214MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
215MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
216MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
217MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
218
219
220/* Base Attributes of the memory ECC object */
221static struct memctrl_dev_attribute *memctrl_attr[] = {
222 &attr_panic_on_ue,
223 &attr_log_ue,
224 &attr_log_ce,
225 &attr_poll_msec,
226 &attr_mc_version,
227 NULL,
228};
229
230/* Main MC kobject release() function */
231static void edac_memctrl_master_release(struct kobject *kobj)
232{
233 debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
234}
235
236static struct kobj_type ktype_memctrl = {
237 .release = edac_memctrl_master_release,
238 .sysfs_ops = &memctrlfs_ops,
239 .default_attrs = (struct attribute **) memctrl_attr,
240};
241
242
243/* Initialize the main sysfs entries for edac:
244 * /sys/devices/system/edac
245 *
246 * and children
247 *
248 * Return: 0 SUCCESS
249 * !0 FAILURE
250 */
251static int edac_sysfs_memctrl_setup(void)
252{
253 int err=0;
254
255 debugf1("MC: " __FILE__ ": %s()\n", __func__);
256
257 /* create the /sys/devices/system/edac directory */
258 err = sysdev_class_register(&edac_class);
259 if (!err) {
260 /* Init the MC's kobject */
261 memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
262 kobject_init(&edac_memctrl_kobj);
263
264 edac_memctrl_kobj.parent = &edac_class.kset.kobj;
265 edac_memctrl_kobj.ktype = &ktype_memctrl;
266
267 /* generate sysfs "..../edac/mc" */
268 err = kobject_set_name(&edac_memctrl_kobj,"mc");
269 if (!err) {
270 /* FIXME: maybe new sysdev_create_subdir() */
271 err = kobject_register(&edac_memctrl_kobj);
272 if (err) {
273 debugf1("Failed to register '.../edac/mc'\n");
274 } else {
275 debugf1("Registered '.../edac/mc' kobject\n");
276 }
277 }
278 } else {
279 debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err);
280 }
281
282 return err;
283}
284
285/*
286 * MC teardown:
287 * the '..../edac/mc' kobject followed by '..../edac' itself
288 */
289static void edac_sysfs_memctrl_teardown(void)
290{
291 debugf0("MC: " __FILE__ ": %s()\n", __func__);
292
293 /* Unregister the MC's kobject */
294 kobject_unregister(&edac_memctrl_kobj);
295
296 /* release the master edac mc kobject */
297 kobject_put(&edac_memctrl_kobj);
298
299 /* Unregister the 'edac' object */
300 sysdev_class_unregister(&edac_class);
301}
302
303/*
304 * /sys/devices/system/edac/pci;
305 * data structures and methods
306 */
307
308struct list_control {
309 struct edac_pci_device_list *list;
310 int *count;
311};
312
313/* Output the list as: vendor_id:device:id<,vendor_id:device_id> */
314static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
315{
316 struct list_control *listctl;
317 struct edac_pci_device_list *list;
318 char *p = buffer;
319 int len=0;
320 int i;
321
322 listctl = ptr;
323 list = listctl->list;
324
325 for (i = 0; i < *(listctl->count); i++, list++ ) {
326 if (len > 0)
327 len += snprintf(p + len, (PAGE_SIZE-len), ",");
328
329 len += snprintf(p + len,
330 (PAGE_SIZE-len),
331 "%x:%x",
332 list->vendor,list->device);
333 }
334
335 len += snprintf(p + len,(PAGE_SIZE-len), "\n");
336
337 return (ssize_t) len;
338}
339
340/**
341 *
342 * Scan string from **s to **e looking for one 'vendor:device' tuple
343 * where each field is a hex value
344 *
345 * return 0 if an entry is NOT found
346 * return 1 if an entry is found
347 * fill in *vendor_id and *device_id with values found
348 *
349 * In both cases, make sure *s has been moved forward toward *e
350 */
351static int parse_one_device(const char **s,const char **e,
352 unsigned int *vendor_id, unsigned int *device_id)
353{
354 const char *runner, *p;
355
356 /* if null byte, we are done */
357 if (!**s) {
358 (*s)++; /* keep *s moving */
359 return 0;
360 }
361
362 /* skip over newlines & whitespace */
363 if ((**s == '\n') || isspace(**s)) {
364 (*s)++;
365 return 0;
366 }
367
368 if (!isxdigit(**s)) {
369 (*s)++;
370 return 0;
371 }
372
373 /* parse vendor_id */
374 runner = *s;
375 while (runner < *e) {
376 /* scan for vendor:device delimiter */
377 if (*runner == ':') {
378 *vendor_id = simple_strtol((char*) *s, (char**) &p, 16);
379 runner = p + 1;
380 break;
381 }
382 runner++;
383 }
384
385 if (!isxdigit(*runner)) {
386 *s = ++runner;
387 return 0;
388 }
389
390 /* parse device_id */
391 if (runner < *e) {
392 *device_id = simple_strtol((char*)runner, (char**)&p, 16);
393 runner = p;
394 }
395
396 *s = runner;
397
398 return 1;
399}
400
401static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
402 size_t count)
403{
404 struct list_control *listctl;
405 struct edac_pci_device_list *list;
406 unsigned int vendor_id, device_id;
407 const char *s, *e;
408 int *index;
409
410 s = (char*)buffer;
411 e = s + count;
412
413 listctl = ptr;
414 list = listctl->list;
415 index = listctl->count;
416
417 *index = 0;
418 while (*index < MAX_LISTED_PCI_DEVICES) {
419
420 if (parse_one_device(&s,&e,&vendor_id,&device_id)) {
421 list[ *index ].vendor = vendor_id;
422 list[ *index ].device = device_id;
423 (*index)++;
424 }
425
426 /* check for all data consume */
427 if (s >= e)
428 break;
429 }
430
431 return count;
432}
433
434static ssize_t edac_pci_int_show(void *ptr, char *buffer)
435{
436 int *value = ptr;
437 return sprintf(buffer,"%d\n",*value);
438}
439
440static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
441{
442 int *value = ptr;
443
444 if (isdigit(*buffer))
445 *value = simple_strtoul(buffer,NULL,0);
446
447 return count;
448}
449
450struct edac_pci_dev_attribute {
451 struct attribute attr;
452 void *value;
453 ssize_t (*show)(void *,char *);
454 ssize_t (*store)(void *, const char *,size_t);
455};
456
457/* Set of show/store abstract level functions for PCI Parity object */
458static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
459 char *buffer)
460{
461 struct edac_pci_dev_attribute *edac_pci_dev;
462 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
463
464 if (edac_pci_dev->show)
465 return edac_pci_dev->show(edac_pci_dev->value, buffer);
466 return -EIO;
467}
468
469static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr,
470 const char *buffer, size_t count)
471{
472 struct edac_pci_dev_attribute *edac_pci_dev;
473 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
474
475 if (edac_pci_dev->show)
476 return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
477 return -EIO;
478}
479
480static struct sysfs_ops edac_pci_sysfs_ops = {
481 .show = edac_pci_dev_show,
482 .store = edac_pci_dev_store
483};
484
485
486#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
487struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
488 .attr = {.name = __stringify(_name), .mode = _mode }, \
489 .value = &_name, \
490 .show = _show, \
491 .store = _store, \
492};
493
494#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
495struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
496 .attr = {.name = __stringify(_name), .mode = _mode }, \
497 .value = _data, \
498 .show = _show, \
499 .store = _store, \
500};
501
502static struct list_control pci_whitelist_control = {
503 .list = pci_whitelist,
504 .count = &pci_whitelist_count
505};
506
507static struct list_control pci_blacklist_control = {
508 .list = pci_blacklist,
509 .count = &pci_blacklist_count
510};
511
512/* whitelist attribute */
513EDAC_PCI_STRING_ATTR(pci_parity_whitelist,
514 &pci_whitelist_control,
515 S_IRUGO|S_IWUSR,
516 edac_pci_list_string_show,
517 edac_pci_list_string_store);
518
519EDAC_PCI_STRING_ATTR(pci_parity_blacklist,
520 &pci_blacklist_control,
521 S_IRUGO|S_IWUSR,
522 edac_pci_list_string_show,
523 edac_pci_list_string_store);
524
525/* PCI Parity control files */
526EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
527EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
528EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL);
529
530/* Base Attributes of the memory ECC object */
531static struct edac_pci_dev_attribute *edac_pci_attr[] = {
532 &edac_pci_attr_check_pci_parity,
533 &edac_pci_attr_panic_on_pci_parity,
534 &edac_pci_attr_pci_parity_count,
535 &edac_pci_attr_pci_parity_whitelist,
536 &edac_pci_attr_pci_parity_blacklist,
537 NULL,
538};
539
540/* No memory to release */
541static void edac_pci_release(struct kobject *kobj)
542{
543 debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__);
544}
545
546static struct kobj_type ktype_edac_pci = {
547 .release = edac_pci_release,
548 .sysfs_ops = &edac_pci_sysfs_ops,
549 .default_attrs = (struct attribute **) edac_pci_attr,
550};
551
552/**
553 * edac_sysfs_pci_setup()
554 *
555 */
556static int edac_sysfs_pci_setup(void)
557{
558 int err;
559
560 debugf1("MC: " __FILE__ ": %s()\n", __func__);
561
562 memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
563
564 kobject_init(&edac_pci_kobj);
565 edac_pci_kobj.parent = &edac_class.kset.kobj;
566 edac_pci_kobj.ktype = &ktype_edac_pci;
567
568 err = kobject_set_name(&edac_pci_kobj, "pci");
569 if (!err) {
570 /* Instanstiate the csrow object */
571 /* FIXME: maybe new sysdev_create_subdir() */
572 err = kobject_register(&edac_pci_kobj);
573 if (err)
574 debugf1("Failed to register '.../edac/pci'\n");
575 else
576 debugf1("Registered '.../edac/pci' kobject\n");
577 }
578 return err;
579}
580
581
582static void edac_sysfs_pci_teardown(void)
583{
584 debugf0("MC: " __FILE__ ": %s()\n", __func__);
585
586 kobject_unregister(&edac_pci_kobj);
587 kobject_put(&edac_pci_kobj);
588}
589
590/* EDAC sysfs CSROW data structures and methods */
591
592/* Set of more detailed csrow<id> attribute show/store functions */
593static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data)
594{
595 ssize_t size = 0;
596
597 if (csrow->nr_channels > 0) {
598 size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n",
599 csrow->channels[0].label);
600 }
601 return size;
602}
603
604static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data)
605{
606 ssize_t size = 0;
607
608 if (csrow->nr_channels > 0) {
609 size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
610 csrow->channels[1].label);
611 }
612 return size;
613}
614
615static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
616 const char *data, size_t size)
617{
618 ssize_t max_size = 0;
619
620 if (csrow->nr_channels > 0) {
621 max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
622 strncpy(csrow->channels[0].label, data, max_size);
623 csrow->channels[0].label[max_size] = '\0';
624 }
625 return size;
626}
627
628static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
629 const char *data, size_t size)
630{
631 ssize_t max_size = 0;
632
633 if (csrow->nr_channels > 1) {
634 max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
635 strncpy(csrow->channels[1].label, data, max_size);
636 csrow->channels[1].label[max_size] = '\0';
637 }
638 return max_size;
639}
640
641static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data)
642{
643 return sprintf(data,"%u\n", csrow->ue_count);
644}
645
646static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data)
647{
648 return sprintf(data,"%u\n", csrow->ce_count);
649}
650
651static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data)
652{
653 ssize_t size = 0;
654
655 if (csrow->nr_channels > 0) {
656 size = sprintf(data,"%u\n", csrow->channels[0].ce_count);
657 }
658 return size;
659}
660
661static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data)
662{
663 ssize_t size = 0;
664
665 if (csrow->nr_channels > 1) {
666 size = sprintf(data,"%u\n", csrow->channels[1].ce_count);
667 }
668 return size;
669}
670
671static ssize_t csrow_size_show(struct csrow_info *csrow, char *data)
672{
673 return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages));
674}
675
676static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data)
677{
678 return sprintf(data,"%s\n", mem_types[csrow->mtype]);
679}
680
681static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data)
682{
683 return sprintf(data,"%s\n", dev_types[csrow->dtype]);
684}
685
686static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data)
687{
688 return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]);
689}
690
691struct csrowdev_attribute {
692 struct attribute attr;
693 ssize_t (*show)(struct csrow_info *,char *);
694 ssize_t (*store)(struct csrow_info *, const char *,size_t);
695};
696
697#define to_csrow(k) container_of(k, struct csrow_info, kobj)
698#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
699
700/* Set of show/store higher level functions for csrow objects */
701static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr,
702 char *buffer)
703{
704 struct csrow_info *csrow = to_csrow(kobj);
705 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
706
707 if (csrowdev_attr->show)
708 return csrowdev_attr->show(csrow, buffer);
709 return -EIO;
710}
711
712static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
713 const char *buffer, size_t count)
714{
715 struct csrow_info *csrow = to_csrow(kobj);
716 struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
717
718 if (csrowdev_attr->store)
719 return csrowdev_attr->store(csrow, buffer, count);
720 return -EIO;
721}
722
723static struct sysfs_ops csrowfs_ops = {
724 .show = csrowdev_show,
725 .store = csrowdev_store
726};
727
728#define CSROWDEV_ATTR(_name,_mode,_show,_store) \
729struct csrowdev_attribute attr_##_name = { \
730 .attr = {.name = __stringify(_name), .mode = _mode }, \
731 .show = _show, \
732 .store = _store, \
733};
734
735/* cwrow<id>/attribute files */
736CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL);
737CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL);
738CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL);
739CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL);
740CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL);
741CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL);
742CSROWDEV_ATTR(ch0_ce_count,S_IRUGO,csrow_ch0_ce_count_show,NULL);
743CSROWDEV_ATTR(ch1_ce_count,S_IRUGO,csrow_ch1_ce_count_show,NULL);
744
745/* control/attribute files */
746CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR,
747 csrow_ch0_dimm_label_show,
748 csrow_ch0_dimm_label_store);
749CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
750 csrow_ch1_dimm_label_show,
751 csrow_ch1_dimm_label_store);
752
753
754/* Attributes of the CSROW<id> object */
755static struct csrowdev_attribute *csrow_attr[] = {
756 &attr_dev_type,
757 &attr_mem_type,
758 &attr_edac_mode,
759 &attr_size_mb,
760 &attr_ue_count,
761 &attr_ce_count,
762 &attr_ch0_ce_count,
763 &attr_ch1_ce_count,
764 &attr_ch0_dimm_label,
765 &attr_ch1_dimm_label,
766 NULL,
767};
768
769
770/* No memory to release */
771static void edac_csrow_instance_release(struct kobject *kobj)
772{
773 debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
774}
775
776static struct kobj_type ktype_csrow = {
777 .release = edac_csrow_instance_release,
778 .sysfs_ops = &csrowfs_ops,
779 .default_attrs = (struct attribute **) csrow_attr,
780};
781
782/* Create a CSROW object under specifed edac_mc_device */
783static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
784 struct csrow_info *csrow, int index )
785{
786 int err = 0;
787
788 debugf0("MC: " __FILE__ ": %s()\n", __func__);
789
790 memset(&csrow->kobj, 0, sizeof(csrow->kobj));
791
792 /* generate ..../edac/mc/mc<id>/csrow<index> */
793
794 kobject_init(&csrow->kobj);
795 csrow->kobj.parent = edac_mci_kobj;
796 csrow->kobj.ktype = &ktype_csrow;
797
798 /* name this instance of csrow<id> */
799 err = kobject_set_name(&csrow->kobj,"csrow%d",index);
800 if (!err) {
801 /* Instanstiate the csrow object */
802 err = kobject_register(&csrow->kobj);
803 if (err)
804 debugf0("Failed to register CSROW%d\n",index);
805 else
806 debugf0("Registered CSROW%d\n",index);
807 }
808
809 return err;
810}
811
812/* sysfs data structures and methods for the MCI kobjects */
813
814static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
815 const char *data, size_t count )
816{
817 int row, chan;
818
819 mci->ue_noinfo_count = 0;
820 mci->ce_noinfo_count = 0;
821 mci->ue_count = 0;
822 mci->ce_count = 0;
823 for (row = 0; row < mci->nr_csrows; row++) {
824 struct csrow_info *ri = &mci->csrows[row];
825
826 ri->ue_count = 0;
827 ri->ce_count = 0;
828 for (chan = 0; chan < ri->nr_channels; chan++)
829 ri->channels[chan].ce_count = 0;
830 }
831 mci->start_time = jiffies;
832
833 return count;
834}
835
836static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
837{
838 return sprintf(data,"%d\n", mci->ue_count);
839}
840
841static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
842{
843 return sprintf(data,"%d\n", mci->ce_count);
844}
845
846static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
847{
848 return sprintf(data,"%d\n", mci->ce_noinfo_count);
849}
850
851static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
852{
853 return sprintf(data,"%d\n", mci->ue_noinfo_count);
854}
855
856static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
857{
858 return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ);
859}
860
861static ssize_t mci_mod_name_show(struct mem_ctl_info *mci, char *data)
862{
863 return sprintf(data,"%s %s\n", mci->mod_name, mci->mod_ver);
864}
865
866static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
867{
868 return sprintf(data,"%s\n", mci->ctl_name);
869}
870
871static int mci_output_edac_cap(char *buf, unsigned long edac_cap)
872{
873 char *p = buf;
874 int bit_idx;
875
876 for (bit_idx = 0; bit_idx < 8 * sizeof(edac_cap); bit_idx++) {
877 if ((edac_cap >> bit_idx) & 0x1)
878 p += sprintf(p, "%s ", edac_caps[bit_idx]);
879 }
880
881 return p - buf;
882}
883
884static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data)
885{
886 char *p = data;
887
888 p += mci_output_edac_cap(p,mci->edac_ctl_cap);
889 p += sprintf(p, "\n");
890
891 return p - data;
892}
893
894static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci,
895 char *data)
896{
897 char *p = data;
898
899 p += mci_output_edac_cap(p,mci->edac_cap);
900 p += sprintf(p, "\n");
901
902 return p - data;
903}
904
905static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap)
906{
907 char *p = buf;
908 int bit_idx;
909
910 for (bit_idx = 0; bit_idx < 8 * sizeof(mtype_cap); bit_idx++) {
911 if ((mtype_cap >> bit_idx) & 0x1)
912 p += sprintf(p, "%s ", mem_types[bit_idx]);
913 }
914
915 return p - buf;
916}
917
918static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data)
919{
920 char *p = data;
921
922 p += mci_output_mtype_cap(p,mci->mtype_cap);
923 p += sprintf(p, "\n");
924
925 return p - data;
926}
927
928static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
929{
930 int total_pages, csrow_idx;
931
932 for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
933 csrow_idx++) {
934 struct csrow_info *csrow = &mci->csrows[csrow_idx];
935
936 if (!csrow->nr_pages)
937 continue;
938 total_pages += csrow->nr_pages;
939 }
940
941 return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages));
942}
943
944struct mcidev_attribute {
945 struct attribute attr;
946 ssize_t (*show)(struct mem_ctl_info *,char *);
947 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
948};
949
950#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
951#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
952
953static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
954 char *buffer)
955{
956 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
957 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
958
959 if (mcidev_attr->show)
960 return mcidev_attr->show(mem_ctl_info, buffer);
961 return -EIO;
962}
963
964static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
965 const char *buffer, size_t count)
966{
967 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
968 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
969
970 if (mcidev_attr->store)
971 return mcidev_attr->store(mem_ctl_info, buffer, count);
972 return -EIO;
973}
974
975static struct sysfs_ops mci_ops = {
976 .show = mcidev_show,
977 .store = mcidev_store
978};
979
980#define MCIDEV_ATTR(_name,_mode,_show,_store) \
981struct mcidev_attribute mci_attr_##_name = { \
982 .attr = {.name = __stringify(_name), .mode = _mode }, \
983 .show = _show, \
984 .store = _store, \
985};
986
987/* Control file */
988MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store);
989
990/* Attribute files */
991MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL);
992MCIDEV_ATTR(module_name,S_IRUGO,mci_mod_name_show,NULL);
993MCIDEV_ATTR(edac_capability,S_IRUGO,mci_edac_capability_show,NULL);
994MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL);
995MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL);
996MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL);
997MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL);
998MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL);
999MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL);
1000MCIDEV_ATTR(edac_current_capability,S_IRUGO,
1001 mci_edac_current_capability_show,NULL);
1002MCIDEV_ATTR(supported_mem_type,S_IRUGO,
1003 mci_supported_mem_type_show,NULL);
1004
1005
1006static struct mcidev_attribute *mci_attr[] = {
1007 &mci_attr_reset_counters,
1008 &mci_attr_module_name,
1009 &mci_attr_mc_name,
1010 &mci_attr_edac_capability,
1011 &mci_attr_edac_current_capability,
1012 &mci_attr_supported_mem_type,
1013 &mci_attr_size_mb,
1014 &mci_attr_seconds_since_reset,
1015 &mci_attr_ue_noinfo_count,
1016 &mci_attr_ce_noinfo_count,
1017 &mci_attr_ue_count,
1018 &mci_attr_ce_count,
1019 NULL
1020};
1021
1022
1023/*
1024 * Release of a MC controlling instance
1025 */
1026static void edac_mci_instance_release(struct kobject *kobj)
1027{
1028 struct mem_ctl_info *mci;
1029 mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj);
1030
1031 debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n",
1032 __func__, mci->mc_idx);
1033
1034 kfree(mci);
1035}
1036
1037static struct kobj_type ktype_mci = {
1038 .release = edac_mci_instance_release,
1039 .sysfs_ops = &mci_ops,
1040 .default_attrs = (struct attribute **) mci_attr,
1041};
1042
1043#define EDAC_DEVICE_SYMLINK "device"
1044
1045/*
1046 * Create a new Memory Controller kobject instance,
1047 * mc<id> under the 'mc' directory
1048 *
1049 * Return:
1050 * 0 Success
1051 * !0 Failure
1052 */
1053static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1054{
1055 int i;
1056 int err;
1057 struct csrow_info *csrow;
1058 struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
1059
1060 debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx);
1061
1062 memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
1063 kobject_init(edac_mci_kobj);
1064
1065 /* set the name of the mc<id> object */
1066 err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
1067 if (err)
1068 return err;
1069
1070 /* link to our parent the '..../edac/mc' object */
1071 edac_mci_kobj->parent = &edac_memctrl_kobj;
1072 edac_mci_kobj->ktype = &ktype_mci;
1073
1074 /* register the mc<id> kobject */
1075 err = kobject_register(edac_mci_kobj);
1076 if (err)
1077 return err;
1078
1079 /* create a symlink for the device */
1080 err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj,
1081 EDAC_DEVICE_SYMLINK);
1082 if (err) {
1083 kobject_unregister(edac_mci_kobj);
1084 return err;
1085 }
1086
1087 /* Make directories for each CSROW object
1088 * under the mc<id> kobject
1089 */
1090 for (i = 0; i < mci->nr_csrows; i++) {
1091
1092 csrow = &mci->csrows[i];
1093
1094 /* Only expose populated CSROWs */
1095 if (csrow->nr_pages > 0) {
1096 err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
1097 if (err)
1098 goto fail;
1099 }
1100 }
1101
1102 /* Mark this MCI instance as having sysfs entries */
1103 mci->sysfs_active = MCI_SYSFS_ACTIVE;
1104
1105 return 0;
1106
1107
1108 /* CSROW error: backout what has already been registered, */
1109fail:
1110 for ( i--; i >= 0; i--) {
1111 if (csrow->nr_pages > 0) {
1112 kobject_unregister(&mci->csrows[i].kobj);
1113 kobject_put(&mci->csrows[i].kobj);
1114 }
1115 }
1116
1117 kobject_unregister(edac_mci_kobj);
1118 kobject_put(edac_mci_kobj);
1119
1120 return err;
1121}
1122
1123/*
1124 * remove a Memory Controller instance
1125 */
1126static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1127{
1128 int i;
1129
1130 debugf0("MC: " __FILE__ ": %s()\n", __func__);
1131
1132 /* remove all csrow kobjects */
1133 for (i = 0; i < mci->nr_csrows; i++) {
1134 if (mci->csrows[i].nr_pages > 0) {
1135 kobject_unregister(&mci->csrows[i].kobj);
1136 kobject_put(&mci->csrows[i].kobj);
1137 }
1138 }
1139
1140 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
1141
1142 kobject_unregister(&mci->edac_mci_kobj);
1143 kobject_put(&mci->edac_mci_kobj);
1144}
1145
1146/* END OF sysfs data and methods */
1147
1148#ifdef CONFIG_EDAC_DEBUG
1149
1150EXPORT_SYMBOL(edac_mc_dump_channel);
1151
1152void edac_mc_dump_channel(struct channel_info *chan)
1153{
1154 debugf4("\tchannel = %p\n", chan);
1155 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
1156 debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
1157 debugf4("\tchannel->label = '%s'\n", chan->label);
1158 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
1159}
1160
1161
1162EXPORT_SYMBOL(edac_mc_dump_csrow);
1163
1164void edac_mc_dump_csrow(struct csrow_info *csrow)
1165{
1166 debugf4("\tcsrow = %p\n", csrow);
1167 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
1168 debugf4("\tcsrow->first_page = 0x%lx\n",
1169 csrow->first_page);
1170 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
1171 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
1172 debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
1173 debugf4("\tcsrow->nr_channels = %d\n",
1174 csrow->nr_channels);
1175 debugf4("\tcsrow->channels = %p\n", csrow->channels);
1176 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
1177}
1178
1179
1180EXPORT_SYMBOL(edac_mc_dump_mci);
1181
1182void edac_mc_dump_mci(struct mem_ctl_info *mci)
1183{
1184 debugf3("\tmci = %p\n", mci);
1185 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
1186 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
1187 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
1188 debugf4("\tmci->edac_check = %p\n", mci->edac_check);
1189 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
1190 mci->nr_csrows, mci->csrows);
1191 debugf3("\tpdev = %p\n", mci->pdev);
1192 debugf3("\tmod_name:ctl_name = %s:%s\n",
1193 mci->mod_name, mci->ctl_name);
1194 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
1195}
1196
1197
1198#endif /* CONFIG_EDAC_DEBUG */
1199
1200/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
1201 * Adjust 'ptr' so that its alignment is at least as stringent as what the
1202 * compiler would provide for X and return the aligned result.
1203 *
1204 * If 'size' is a constant, the compiler will optimize this whole function
1205 * down to either a no-op or the addition of a constant to the value of 'ptr'.
1206 */
1207static inline char * align_ptr (void *ptr, unsigned size)
1208{
1209 unsigned align, r;
1210
1211 /* Here we assume that the alignment of a "long long" is the most
1212 * stringent alignment that the compiler will ever provide by default.
1213 * As far as I know, this is a reasonable assumption.
1214 */
1215 if (size > sizeof(long))
1216 align = sizeof(long long);
1217 else if (size > sizeof(int))
1218 align = sizeof(long);
1219 else if (size > sizeof(short))
1220 align = sizeof(int);
1221 else if (size > sizeof(char))
1222 align = sizeof(short);
1223 else
1224 return (char *) ptr;
1225
1226 r = size % align;
1227
1228 if (r == 0)
1229 return (char *) ptr;
1230
1231 return (char *) (((unsigned long) ptr) + align - r);
1232}
1233
1234
1235EXPORT_SYMBOL(edac_mc_alloc);
1236
1237/**
1238 * edac_mc_alloc: Allocate a struct mem_ctl_info structure
1239 * @size_pvt: size of private storage needed
1240 * @nr_csrows: Number of CWROWS needed for this MC
1241 * @nr_chans: Number of channels for the MC
1242 *
1243 * Everything is kmalloc'ed as one big chunk - more efficient.
1244 * Only can be used if all structures have the same lifetime - otherwise
1245 * you have to allocate and initialize your own structures.
1246 *
1247 * Use edac_mc_free() to free mc structures allocated by this function.
1248 *
1249 * Returns:
1250 * NULL allocation failed
1251 * struct mem_ctl_info pointer
1252 */
1253struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1254 unsigned nr_chans)
1255{
1256 struct mem_ctl_info *mci;
1257 struct csrow_info *csi, *csrow;
1258 struct channel_info *chi, *chp, *chan;
1259 void *pvt;
1260 unsigned size;
1261 int row, chn;
1262
1263 /* Figure out the offsets of the various items from the start of an mc
1264 * structure. We want the alignment of each item to be at least as
1265 * stringent as what the compiler would provide if we could simply
1266 * hardcode everything into a single struct.
1267 */
1268 mci = (struct mem_ctl_info *) 0;
1269 csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi));
1270 chi = (struct channel_info *)
1271 align_ptr(&csi[nr_csrows], sizeof(*chi));
1272 pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
1273 size = ((unsigned long) pvt) + sz_pvt;
1274
1275 if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
1276 return NULL;
1277
1278 /* Adjust pointers so they point within the memory we just allocated
1279 * rather than an imaginary chunk of memory located at address 0.
1280 */
1281 csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
1282 chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
1283 pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
1284
1285 memset(mci, 0, size); /* clear all fields */
1286
1287 mci->csrows = csi;
1288 mci->pvt_info = pvt;
1289 mci->nr_csrows = nr_csrows;
1290
1291 for (row = 0; row < nr_csrows; row++) {
1292 csrow = &csi[row];
1293 csrow->csrow_idx = row;
1294 csrow->mci = mci;
1295 csrow->nr_channels = nr_chans;
1296 chp = &chi[row * nr_chans];
1297 csrow->channels = chp;
1298
1299 for (chn = 0; chn < nr_chans; chn++) {
1300 chan = &chp[chn];
1301 chan->chan_idx = chn;
1302 chan->csrow = csrow;
1303 }
1304 }
1305
1306 return mci;
1307}
1308
1309
1310EXPORT_SYMBOL(edac_mc_free);
1311
1312/**
1313 * edac_mc_free: Free a previously allocated 'mci' structure
1314 * @mci: pointer to a struct mem_ctl_info structure
1315 *
1316 * Free up a previously allocated mci structure
1317 * A MCI structure can be in 2 states after being allocated
1318 * by edac_mc_alloc().
1319 * 1) Allocated in a MC driver's probe, but not yet committed
1320 * 2) Allocated and committed, by a call to edac_mc_add_mc()
1321 * edac_mc_add_mc() is the function that adds the sysfs entries
1322 * thus, this free function must determine which state the 'mci'
1323 * structure is in, then either free it directly or
1324 * perform kobject cleanup by calling edac_remove_sysfs_mci_device().
1325 *
1326 * VOID Return
1327 */
1328void edac_mc_free(struct mem_ctl_info *mci)
1329{
1330 /* only if sysfs entries for this mci instance exist
1331 * do we remove them and defer the actual kfree via
1332 * the kobject 'release()' callback.
1333 *
1334 * Otherwise, do a straight kfree now.
1335 */
1336 if (mci->sysfs_active == MCI_SYSFS_ACTIVE)
1337 edac_remove_sysfs_mci_device(mci);
1338 else
1339 kfree(mci);
1340}
1341
1342
1343
1344EXPORT_SYMBOL(edac_mc_find_mci_by_pdev);
1345
1346struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
1347{
1348 struct mem_ctl_info *mci;
1349 struct list_head *item;
1350
1351 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1352
1353 list_for_each(item, &mc_devices) {
1354 mci = list_entry(item, struct mem_ctl_info, link);
1355
1356 if (mci->pdev == pdev)
1357 return mci;
1358 }
1359
1360 return NULL;
1361}
1362
1363static int add_mc_to_global_list (struct mem_ctl_info *mci)
1364{
1365 struct list_head *item, *insert_before;
1366 struct mem_ctl_info *p;
1367 int i;
1368
1369 if (list_empty(&mc_devices)) {
1370 mci->mc_idx = 0;
1371 insert_before = &mc_devices;
1372 } else {
1373 if (edac_mc_find_mci_by_pdev(mci->pdev)) {
1374 printk(KERN_WARNING
1375 "EDAC MC: %s (%s) %s %s already assigned %d\n",
1376 mci->pdev->dev.bus_id, pci_name(mci->pdev),
1377 mci->mod_name, mci->ctl_name, mci->mc_idx);
1378 return 1;
1379 }
1380
1381 insert_before = NULL;
1382 i = 0;
1383
1384 list_for_each(item, &mc_devices) {
1385 p = list_entry(item, struct mem_ctl_info, link);
1386
1387 if (p->mc_idx != i) {
1388 insert_before = item;
1389 break;
1390 }
1391
1392 i++;
1393 }
1394
1395 mci->mc_idx = i;
1396
1397 if (insert_before == NULL)
1398 insert_before = &mc_devices;
1399 }
1400
1401 list_add_tail_rcu(&mci->link, insert_before);
1402 return 0;
1403}
1404
1405
1406
1407EXPORT_SYMBOL(edac_mc_add_mc);
1408
1409/**
1410 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list
1411 * @mci: pointer to the mci structure to be added to the list
1412 *
1413 * Return:
1414 * 0 Success
1415 * !0 Failure
1416 */
1417
1418/* FIXME - should a warning be printed if no error detection? correction? */
1419int edac_mc_add_mc(struct mem_ctl_info *mci)
1420{
1421 int rc = 1;
1422
1423 debugf0("MC: " __FILE__ ": %s()\n", __func__);
1424#ifdef CONFIG_EDAC_DEBUG
1425 if (edac_debug_level >= 3)
1426 edac_mc_dump_mci(mci);
1427 if (edac_debug_level >= 4) {
1428 int i;
1429
1430 for (i = 0; i < mci->nr_csrows; i++) {
1431 int j;
1432 edac_mc_dump_csrow(&mci->csrows[i]);
1433 for (j = 0; j < mci->csrows[i].nr_channels; j++)
1434 edac_mc_dump_channel(&mci->csrows[i].
1435 channels[j]);
1436 }
1437 }
1438#endif
1439 down(&mem_ctls_mutex);
1440
1441 if (add_mc_to_global_list(mci))
1442 goto finish;
1443
1444 /* set load time so that error rate can be tracked */
1445 mci->start_time = jiffies;
1446
1447 if (edac_create_sysfs_mci_device(mci)) {
1448 printk(KERN_WARNING
1449 "EDAC MC%d: failed to create sysfs device\n",
1450 mci->mc_idx);
1451 /* FIXME - should there be an error code and unwind? */
1452 goto finish;
1453 }
1454
1455 /* Report action taken */
1456 printk(KERN_INFO
1457 "EDAC MC%d: Giving out device to %s %s: PCI %s\n",
1458 mci->mc_idx, mci->mod_name, mci->ctl_name,
1459 pci_name(mci->pdev));
1460
1461
1462 rc = 0;
1463
1464finish:
1465 up(&mem_ctls_mutex);
1466 return rc;
1467}
1468
1469
1470
1471static void complete_mc_list_del (struct rcu_head *head)
1472{
1473 struct mem_ctl_info *mci;
1474
1475 mci = container_of(head, struct mem_ctl_info, rcu);
1476 INIT_LIST_HEAD(&mci->link);
1477 complete(&mci->complete);
1478}
1479
1480static void del_mc_from_global_list (struct mem_ctl_info *mci)
1481{
1482 list_del_rcu(&mci->link);
1483 init_completion(&mci->complete);
1484 call_rcu(&mci->rcu, complete_mc_list_del);
1485 wait_for_completion(&mci->complete);
1486}
1487
1488EXPORT_SYMBOL(edac_mc_del_mc);
1489
1490/**
1491 * edac_mc_del_mc: Remove the specified mci structure from global list
1492 * @mci: Pointer to struct mem_ctl_info structure
1493 *
1494 * Returns:
1495 * 0 Success
1496 * 1 Failure
1497 */
1498int edac_mc_del_mc(struct mem_ctl_info *mci)
1499{
1500 int rc = 1;
1501
1502 debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
1503 down(&mem_ctls_mutex);
1504 del_mc_from_global_list(mci);
1505 printk(KERN_INFO
1506 "EDAC MC%d: Removed device %d for %s %s: PCI %s\n",
1507 mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name,
1508 pci_name(mci->pdev));
1509 rc = 0;
1510 up(&mem_ctls_mutex);
1511
1512 return rc;
1513}
1514
1515
1516EXPORT_SYMBOL(edac_mc_scrub_block);
1517
1518void edac_mc_scrub_block(unsigned long page, unsigned long offset,
1519 u32 size)
1520{
1521 struct page *pg;
1522 void *virt_addr;
1523 unsigned long flags = 0;
1524
1525 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1526
1527 /* ECC error page was not in our memory. Ignore it. */
1528 if(!pfn_valid(page))
1529 return;
1530
1531 /* Find the actual page structure then map it and fix */
1532 pg = pfn_to_page(page);
1533
1534 if (PageHighMem(pg))
1535 local_irq_save(flags);
1536
1537 virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
1538
1539 /* Perform architecture specific atomic scrub operation */
1540 atomic_scrub(virt_addr + offset, size);
1541
1542 /* Unmap and complete */
1543 kunmap_atomic(virt_addr, KM_BOUNCE_READ);
1544
1545 if (PageHighMem(pg))
1546 local_irq_restore(flags);
1547}
1548
1549
1550/* FIXME - should return -1 */
1551EXPORT_SYMBOL(edac_mc_find_csrow_by_page);
1552
1553int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
1554 unsigned long page)
1555{
1556 struct csrow_info *csrows = mci->csrows;
1557 int row, i;
1558
1559 debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__,
1560 page);
1561 row = -1;
1562
1563 for (i = 0; i < mci->nr_csrows; i++) {
1564 struct csrow_info *csrow = &csrows[i];
1565
1566 if (csrow->nr_pages == 0)
1567 continue;
1568
1569 debugf3("MC%d: " __FILE__
1570 ": %s(): first(0x%lx) page(0x%lx)"
1571 " last(0x%lx) mask(0x%lx)\n", mci->mc_idx,
1572 __func__, csrow->first_page, page,
1573 csrow->last_page, csrow->page_mask);
1574
1575 if ((page >= csrow->first_page) &&
1576 (page <= csrow->last_page) &&
1577 ((page & csrow->page_mask) ==
1578 (csrow->first_page & csrow->page_mask))) {
1579 row = i;
1580 break;
1581 }
1582 }
1583
1584 if (row == -1)
1585 printk(KERN_ERR
1586 "EDAC MC%d: could not look up page error address %lx\n",
1587 mci->mc_idx, (unsigned long) page);
1588
1589 return row;
1590}
1591
1592
1593EXPORT_SYMBOL(edac_mc_handle_ce);
1594
1595/* FIXME - setable log (warning/emerg) levels */
1596/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
1597void edac_mc_handle_ce(struct mem_ctl_info *mci,
1598 unsigned long page_frame_number,
1599 unsigned long offset_in_page,
1600 unsigned long syndrome, int row, int channel,
1601 const char *msg)
1602{
1603 unsigned long remapped_page;
1604
1605 debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
1606
1607 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1608 if (row >= mci->nr_csrows || row < 0) {
1609 /* something is wrong */
1610 printk(KERN_ERR
1611 "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
1612 mci->mc_idx, row, mci->nr_csrows);
1613 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1614 return;
1615 }
1616 if (channel >= mci->csrows[row].nr_channels || channel < 0) {
1617 /* something is wrong */
1618 printk(KERN_ERR
1619 "EDAC MC%d: INTERNAL ERROR: channel out of range "
1620 "(%d >= %d)\n",
1621 mci->mc_idx, channel, mci->csrows[row].nr_channels);
1622 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1623 return;
1624 }
1625
1626 if (log_ce)
1627 /* FIXME - put in DIMM location */
1628 printk(KERN_WARNING
1629 "EDAC MC%d: CE page 0x%lx, offset 0x%lx,"
1630 " grain %d, syndrome 0x%lx, row %d, channel %d,"
1631 " label \"%s\": %s\n", mci->mc_idx,
1632 page_frame_number, offset_in_page,
1633 mci->csrows[row].grain, syndrome, row, channel,
1634 mci->csrows[row].channels[channel].label, msg);
1635
1636 mci->ce_count++;
1637 mci->csrows[row].ce_count++;
1638 mci->csrows[row].channels[channel].ce_count++;
1639
1640 if (mci->scrub_mode & SCRUB_SW_SRC) {
1641 /*
1642 * Some MC's can remap memory so that it is still available
1643 * at a different address when PCI devices map into memory.
1644 * MC's that can't do this lose the memory where PCI devices
1645 * are mapped. This mapping is MC dependant and so we call
1646 * back into the MC driver for it to map the MC page to
1647 * a physical (CPU) page which can then be mapped to a virtual
1648 * page - which can then be scrubbed.
1649 */
1650 remapped_page = mci->ctl_page_to_phys ?
1651 mci->ctl_page_to_phys(mci, page_frame_number) :
1652 page_frame_number;
1653
1654 edac_mc_scrub_block(remapped_page, offset_in_page,
1655 mci->csrows[row].grain);
1656 }
1657}
1658
1659
1660EXPORT_SYMBOL(edac_mc_handle_ce_no_info);
1661
1662void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
1663 const char *msg)
1664{
1665 if (log_ce)
1666 printk(KERN_WARNING
1667 "EDAC MC%d: CE - no information available: %s\n",
1668 mci->mc_idx, msg);
1669 mci->ce_noinfo_count++;
1670 mci->ce_count++;
1671}
1672
1673
1674EXPORT_SYMBOL(edac_mc_handle_ue);
1675
1676void edac_mc_handle_ue(struct mem_ctl_info *mci,
1677 unsigned long page_frame_number,
1678 unsigned long offset_in_page, int row,
1679 const char *msg)
1680{
1681 int len = EDAC_MC_LABEL_LEN * 4;
1682 char labels[len + 1];
1683 char *pos = labels;
1684 int chan;
1685 int chars;
1686
1687 debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
1688
1689 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1690 if (row >= mci->nr_csrows || row < 0) {
1691 /* something is wrong */
1692 printk(KERN_ERR
1693 "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
1694 mci->mc_idx, row, mci->nr_csrows);
1695 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
1696 return;
1697 }
1698
1699 chars = snprintf(pos, len + 1, "%s",
1700 mci->csrows[row].channels[0].label);
1701 len -= chars;
1702 pos += chars;
1703 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
1704 chan++) {
1705 chars = snprintf(pos, len + 1, ":%s",
1706 mci->csrows[row].channels[chan].label);
1707 len -= chars;
1708 pos += chars;
1709 }
1710
1711 if (log_ue)
1712 printk(KERN_EMERG
1713 "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
1714 " labels \"%s\": %s\n", mci->mc_idx,
1715 page_frame_number, offset_in_page,
1716 mci->csrows[row].grain, row, labels, msg);
1717
1718 if (panic_on_ue)
1719 panic
1720 ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
1721 " labels \"%s\": %s\n", mci->mc_idx,
1722 page_frame_number, offset_in_page,
1723 mci->csrows[row].grain, row, labels, msg);
1724
1725 mci->ue_count++;
1726 mci->csrows[row].ue_count++;
1727}
1728
1729
1730EXPORT_SYMBOL(edac_mc_handle_ue_no_info);
1731
1732void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
1733 const char *msg)
1734{
1735 if (panic_on_ue)
1736 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
1737
1738 if (log_ue)
1739 printk(KERN_WARNING
1740 "EDAC MC%d: UE - no information available: %s\n",
1741 mci->mc_idx, msg);
1742 mci->ue_noinfo_count++;
1743 mci->ue_count++;
1744}
1745
1746
1747#ifdef CONFIG_PCI
1748
1749static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
1750{
1751 int where;
1752 u16 status;
1753
1754 where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
1755 pci_read_config_word(dev, where, &status);
1756
1757 /* If we get back 0xFFFF then we must suspect that the card has been pulled but
1758 the Linux PCI layer has not yet finished cleaning up. We don't want to report
1759 on such devices */
1760
1761 if (status == 0xFFFF) {
1762 u32 sanity;
1763 pci_read_config_dword(dev, 0, &sanity);
1764 if (sanity == 0xFFFFFFFF)
1765 return 0;
1766 }
1767 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
1768 PCI_STATUS_PARITY;
1769
1770 if (status)
1771 /* reset only the bits we are interested in */
1772 pci_write_config_word(dev, where, status);
1773
1774 return status;
1775}
1776
1777typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
1778
1779/* Clear any PCI parity errors logged by this device. */
1780static void edac_pci_dev_parity_clear( struct pci_dev *dev )
1781{
1782 u8 header_type;
1783
1784 get_pci_parity_status(dev, 0);
1785
1786 /* read the device TYPE, looking for bridges */
1787 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1788
1789 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
1790 get_pci_parity_status(dev, 1);
1791}
1792
1793/*
1794 * PCI Parity polling
1795 *
1796 */
1797static void edac_pci_dev_parity_test(struct pci_dev *dev)
1798{
1799 u16 status;
1800 u8 header_type;
1801
1802 /* read the STATUS register on this device
1803 */
1804 status = get_pci_parity_status(dev, 0);
1805
1806 debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
1807
1808 /* check the status reg for errors */
1809 if (status) {
1810 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1811 printk(KERN_CRIT
1812 "EDAC PCI- "
1813 "Signaled System Error on %s\n",
1814 pci_name (dev));
1815
1816 if (status & (PCI_STATUS_PARITY)) {
1817 printk(KERN_CRIT
1818 "EDAC PCI- "
1819 "Master Data Parity Error on %s\n",
1820 pci_name (dev));
1821
1822 atomic_inc(&pci_parity_count);
1823 }
1824
1825 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1826 printk(KERN_CRIT
1827 "EDAC PCI- "
1828 "Detected Parity Error on %s\n",
1829 pci_name (dev));
1830
1831 atomic_inc(&pci_parity_count);
1832 }
1833 }
1834
1835 /* read the device TYPE, looking for bridges */
1836 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1837
1838 debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
1839
1840 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
1841 /* On bridges, need to examine secondary status register */
1842 status = get_pci_parity_status(dev, 1);
1843
1844 debugf2("PCI SEC_STATUS= 0x%04x %s\n",
1845 status, dev->dev.bus_id );
1846
1847 /* check the secondary status reg for errors */
1848 if (status) {
1849 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1850 printk(KERN_CRIT
1851 "EDAC PCI-Bridge- "
1852 "Signaled System Error on %s\n",
1853 pci_name (dev));
1854
1855 if (status & (PCI_STATUS_PARITY)) {
1856 printk(KERN_CRIT
1857 "EDAC PCI-Bridge- "
1858 "Master Data Parity Error on %s\n",
1859 pci_name (dev));
1860
1861 atomic_inc(&pci_parity_count);
1862 }
1863
1864 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1865 printk(KERN_CRIT
1866 "EDAC PCI-Bridge- "
1867 "Detected Parity Error on %s\n",
1868 pci_name (dev));
1869
1870 atomic_inc(&pci_parity_count);
1871 }
1872 }
1873 }
1874}
1875
1876/*
1877 * check_dev_on_list: Scan for a PCI device on a white/black list
1878 * @list: an EDAC &edac_pci_device_list white/black list pointer
1879 * @free_index: index of next free entry on the list
1880 * @pci_dev: PCI Device pointer
1881 *
1882 * see if list contains the device.
1883 *
1884 * Returns: 0 not found
1885 * 1 found on list
1886 */
1887static int check_dev_on_list(struct edac_pci_device_list *list, int free_index,
1888 struct pci_dev *dev)
1889{
1890 int i;
1891 int rc = 0; /* Assume not found */
1892 unsigned short vendor=dev->vendor;
1893 unsigned short device=dev->device;
1894
1895 /* Scan the list, looking for a vendor/device match
1896 */
1897 for (i = 0; i < free_index; i++, list++ ) {
1898 if ( (list->vendor == vendor ) &&
1899 (list->device == device )) {
1900 rc = 1;
1901 break;
1902 }
1903 }
1904
1905 return rc;
1906}
1907
1908/*
1909 * pci_dev parity list iterator
1910 * Scan the PCI device list for one iteration, looking for SERRORs
1911 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
1912 */
1913static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
1914{
1915 struct pci_dev *dev=NULL;
1916
1917 /* request for kernel access to the next PCI device, if any,
1918 * and while we are looking at it have its reference count
1919 * bumped until we are done with it
1920 */
1921 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1922
1923 /* if whitelist exists then it has priority, so only scan those
1924 * devices on the whitelist
1925 */
1926 if (pci_whitelist_count > 0 ) {
1927 if (check_dev_on_list(pci_whitelist,
1928 pci_whitelist_count, dev))
1929 fn(dev);
1930 } else {
1931 /*
1932 * if no whitelist, then check if this devices is
1933 * blacklisted
1934 */
1935 if (!check_dev_on_list(pci_blacklist,
1936 pci_blacklist_count, dev))
1937 fn(dev);
1938 }
1939 }
1940}
1941
1942static void do_pci_parity_check(void)
1943{
1944 unsigned long flags;
1945 int before_count;
1946
1947 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1948
1949 if (!check_pci_parity)
1950 return;
1951
1952 before_count = atomic_read(&pci_parity_count);
1953
1954 /* scan all PCI devices looking for a Parity Error on devices and
1955 * bridges
1956 */
1957 local_irq_save(flags);
1958 edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
1959 local_irq_restore(flags);
1960
1961 /* Only if operator has selected panic on PCI Error */
1962 if (panic_on_pci_parity) {
1963 /* If the count is different 'after' from 'before' */
1964 if (before_count != atomic_read(&pci_parity_count))
1965 panic("EDAC: PCI Parity Error");
1966 }
1967}
1968
1969
1970static inline void clear_pci_parity_errors(void)
1971{
1972 /* Clear any PCI bus parity errors that devices initially have logged
1973 * in their registers.
1974 */
1975 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
1976}
1977
1978
1979#else /* CONFIG_PCI */
1980
1981
1982static inline void do_pci_parity_check(void)
1983{
1984 /* no-op */
1985}
1986
1987
1988static inline void clear_pci_parity_errors(void)
1989{
1990 /* no-op */
1991}
1992
1993
1994#endif /* CONFIG_PCI */
1995
1996/*
1997 * Iterate over all MC instances and check for ECC, et al, errors
1998 */
1999static inline void check_mc_devices (void)
2000{
2001 unsigned long flags;
2002 struct list_head *item;
2003 struct mem_ctl_info *mci;
2004
2005 debugf3("MC: " __FILE__ ": %s()\n", __func__);
2006
2007 /* during poll, have interrupts off */
2008 local_irq_save(flags);
2009
2010 list_for_each(item, &mc_devices) {
2011 mci = list_entry(item, struct mem_ctl_info, link);
2012
2013 if (mci->edac_check != NULL)
2014 mci->edac_check(mci);
2015 }
2016
2017 local_irq_restore(flags);
2018}
2019
2020
2021/*
2022 * Check MC status every poll_msec.
2023 * Check PCI status every poll_msec as well.
2024 *
2025 * This where the work gets done for edac.
2026 *
2027 * SMP safe, doesn't use NMI, and auto-rate-limits.
2028 */
2029static void do_edac_check(void)
2030{
2031
2032 debugf3("MC: " __FILE__ ": %s()\n", __func__);
2033
2034 check_mc_devices();
2035
2036 do_pci_parity_check();
2037}
2038
2039
2040/*
2041 * EDAC thread state information
2042 */
2043struct bs_thread_info
2044{
2045 struct task_struct *task;
2046 struct completion *event;
2047 char *name;
2048 void (*run)(void);
2049};
2050
2051static struct bs_thread_info bs_thread;
2052
2053/*
2054 * edac_kernel_thread
2055 * This the kernel thread that processes edac operations
2056 * in a normal thread environment
2057 */
2058static int edac_kernel_thread(void *arg)
2059{
2060 struct bs_thread_info *thread = (struct bs_thread_info *) arg;
2061
2062 /* detach thread */
2063 daemonize(thread->name);
2064
2065 current->exit_signal = SIGCHLD;
2066 allow_signal(SIGKILL);
2067 thread->task = current;
2068
2069 /* indicate to starting task we have started */
2070 complete(thread->event);
2071
2072 /* loop forever, until we are told to stop */
2073 while(thread->run != NULL) {
2074 void (*run)(void);
2075
2076 /* call the function to check the memory controllers */
2077 run = thread->run;
2078 if (run)
2079 run();
2080
2081 if (signal_pending(current))
2082 flush_signals(current);
2083
2084 /* ensure we are interruptable */
2085 set_current_state(TASK_INTERRUPTIBLE);
2086
2087 /* goto sleep for the interval */
2088 schedule_timeout((HZ * poll_msec) / 1000);
2089 try_to_freeze();
2090 }
2091
2092 /* notify waiter that we are exiting */
2093 complete(thread->event);
2094
2095 return 0;
2096}
2097
2098/*
2099 * edac_mc_init
2100 * module initialization entry point
2101 */
2102static int __init edac_mc_init(void)
2103{
2104 int ret;
2105 struct completion event;
2106
2107 printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n");
2108
2109 /*
2110 * Harvest and clear any boot/initialization PCI parity errors
2111 *
2112 * FIXME: This only clears errors logged by devices present at time of
2113 * module initialization. We should also do an initial clear
2114 * of each newly hotplugged device.
2115 */
2116 clear_pci_parity_errors();
2117
2118 /* perform check for first time to harvest boot leftovers */
2119 do_edac_check();
2120
2121 /* Create the MC sysfs entires */
2122 if (edac_sysfs_memctrl_setup()) {
2123 printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n");
2124 return -ENODEV;
2125 }
2126
2127 /* Create the PCI parity sysfs entries */
2128 if (edac_sysfs_pci_setup()) {
2129 edac_sysfs_memctrl_teardown();
2130 printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n");
2131 return -ENODEV;
2132 }
2133
2134 /* Create our kernel thread */
2135 init_completion(&event);
2136 bs_thread.event = &event;
2137 bs_thread.name = "kedac";
2138 bs_thread.run = do_edac_check;
2139
2140 /* create our kernel thread */
2141 ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL);
2142 if (ret < 0) {
2143 /* remove the sysfs entries */
2144 edac_sysfs_memctrl_teardown();
2145 edac_sysfs_pci_teardown();
2146 return -ENOMEM;
2147 }
2148
2149 /* wait for our kernel theard ack that it is up and running */
2150 wait_for_completion(&event);
2151
2152 return 0;
2153}
2154
2155
2156/*
2157 * edac_mc_exit()
2158 * module exit/termination functioni
2159 */
2160static void __exit edac_mc_exit(void)
2161{
2162 struct completion event;
2163
2164 debugf0("MC: " __FILE__ ": %s()\n", __func__);
2165
2166 init_completion(&event);
2167 bs_thread.event = &event;
2168
2169 /* As soon as ->run is set to NULL, the task could disappear,
2170 * so we need to hold tasklist_lock until we have sent the signal
2171 */
2172 read_lock(&tasklist_lock);
2173 bs_thread.run = NULL;
2174 send_sig(SIGKILL, bs_thread.task, 1);
2175 read_unlock(&tasklist_lock);
2176 wait_for_completion(&event);
2177
2178 /* tear down the sysfs device */
2179 edac_sysfs_memctrl_teardown();
2180 edac_sysfs_pci_teardown();
2181}
2182
2183
2184
2185
2186module_init(edac_mc_init);
2187module_exit(edac_mc_exit);
2188
2189MODULE_LICENSE("GPL");
2190MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
2191 "Based on.work by Dan Hollis et al");
2192MODULE_DESCRIPTION("Core library routines for MC reporting");
2193
2194module_param(panic_on_ue, int, 0644);
2195MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
2196module_param(check_pci_parity, int, 0644);
2197MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
2198module_param(panic_on_pci_parity, int, 0644);
2199MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
2200module_param(log_ue, int, 0644);
2201MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
2202module_param(log_ce, int, 0644);
2203MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on");
2204module_param(poll_msec, int, 0644);
2205MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds");
2206#ifdef CONFIG_EDAC_DEBUG
2207module_param(edac_debug_level, int, 0644);
2208MODULE_PARM_DESC(edac_debug_level, "Debug level");
2209#endif
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
new file mode 100644
index 00000000000..75ecf484a43
--- /dev/null
+++ b/drivers/edac/edac_mc.h
@@ -0,0 +1,448 @@
1/*
2 * MC kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * NMI handling support added by
12 * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
13 *
14 * $Id: edac_mc.h,v 1.4.2.10 2005/10/05 00:43:44 dsp_llnl Exp $
15 *
16 */
17
18
19#ifndef _EDAC_MC_H_
20#define _EDAC_MC_H_
21
22
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/types.h>
26#include <linux/module.h>
27#include <linux/spinlock.h>
28#include <linux/smp.h>
29#include <linux/pci.h>
30#include <linux/time.h>
31#include <linux/nmi.h>
32#include <linux/rcupdate.h>
33#include <linux/completion.h>
34#include <linux/kobject.h>
35
36
37#define EDAC_MC_LABEL_LEN 31
38#define MC_PROC_NAME_MAX_LEN 7
39
40#if PAGE_SHIFT < 20
41#define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) )
42#else /* PAGE_SHIFT > 20 */
43#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
44#endif
45
46#ifdef CONFIG_EDAC_DEBUG
47extern int edac_debug_level;
48#define edac_debug_printk(level, fmt, args...) \
49do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0)
50#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
51#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
52#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
53#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
54#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
55#else /* !CONFIG_EDAC_DEBUG */
56#define debugf0( ... )
57#define debugf1( ... )
58#define debugf2( ... )
59#define debugf3( ... )
60#define debugf4( ... )
61#endif /* !CONFIG_EDAC_DEBUG */
62
63
64#define bs_xstr(s) bs_str(s)
65#define bs_str(s) #s
66#define BS_MOD_STR bs_xstr(KBUILD_BASENAME)
67
68#define BIT(x) (1 << (x))
69
70#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev
71
72/* memory devices */
73enum dev_type {
74 DEV_UNKNOWN = 0,
75 DEV_X1,
76 DEV_X2,
77 DEV_X4,
78 DEV_X8,
79 DEV_X16,
80 DEV_X32, /* Do these parts exist? */
81 DEV_X64 /* Do these parts exist? */
82};
83
84#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN)
85#define DEV_FLAG_X1 BIT(DEV_X1)
86#define DEV_FLAG_X2 BIT(DEV_X2)
87#define DEV_FLAG_X4 BIT(DEV_X4)
88#define DEV_FLAG_X8 BIT(DEV_X8)
89#define DEV_FLAG_X16 BIT(DEV_X16)
90#define DEV_FLAG_X32 BIT(DEV_X32)
91#define DEV_FLAG_X64 BIT(DEV_X64)
92
93/* memory types */
94enum mem_type {
95 MEM_EMPTY = 0, /* Empty csrow */
96 MEM_RESERVED, /* Reserved csrow type */
97 MEM_UNKNOWN, /* Unknown csrow type */
98 MEM_FPM, /* Fast page mode */
99 MEM_EDO, /* Extended data out */
100 MEM_BEDO, /* Burst Extended data out */
101 MEM_SDR, /* Single data rate SDRAM */
102 MEM_RDR, /* Registered single data rate SDRAM */
103 MEM_DDR, /* Double data rate SDRAM */
104 MEM_RDDR, /* Registered Double data rate SDRAM */
105 MEM_RMBS /* Rambus DRAM */
106};
107
108#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
109#define MEM_FLAG_RESERVED BIT(MEM_RESERVED)
110#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN)
111#define MEM_FLAG_FPM BIT(MEM_FPM)
112#define MEM_FLAG_EDO BIT(MEM_EDO)
113#define MEM_FLAG_BEDO BIT(MEM_BEDO)
114#define MEM_FLAG_SDR BIT(MEM_SDR)
115#define MEM_FLAG_RDR BIT(MEM_RDR)
116#define MEM_FLAG_DDR BIT(MEM_DDR)
117#define MEM_FLAG_RDDR BIT(MEM_RDDR)
118#define MEM_FLAG_RMBS BIT(MEM_RMBS)
119
120
121/* chipset Error Detection and Correction capabilities and mode */
122enum edac_type {
123 EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
124 EDAC_NONE, /* Doesnt support ECC */
125 EDAC_RESERVED, /* Reserved ECC type */
126 EDAC_PARITY, /* Detects parity errors */
127 EDAC_EC, /* Error Checking - no correction */
128 EDAC_SECDED, /* Single bit error correction, Double detection */
129 EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */
130 EDAC_S4ECD4ED, /* Chipkill x4 devices */
131 EDAC_S8ECD8ED, /* Chipkill x8 devices */
132 EDAC_S16ECD16ED, /* Chipkill x16 devices */
133};
134
135#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
136#define EDAC_FLAG_NONE BIT(EDAC_NONE)
137#define EDAC_FLAG_PARITY BIT(EDAC_PARITY)
138#define EDAC_FLAG_EC BIT(EDAC_EC)
139#define EDAC_FLAG_SECDED BIT(EDAC_SECDED)
140#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED)
141#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED)
142#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
143#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
144
145
146/* scrubbing capabilities */
147enum scrub_type {
148 SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
149 SCRUB_NONE, /* No scrubber */
150 SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */
151 SCRUB_SW_SRC, /* Software scrub only errors */
152 SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */
153 SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */
154 SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */
155 SCRUB_HW_SRC, /* Hardware scrub only errors */
156 SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */
157 SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */
158};
159
160#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
161#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC_CORR)
162#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC_CORR)
163#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
164#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
165#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC_CORR)
166#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR)
167#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
168
169enum mci_sysfs_status {
170 MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */
171 MCI_SYSFS_ACTIVE /* sysfs entries ARE registered */
172};
173
174/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
175
176/*
177 * There are several things to be aware of that aren't at all obvious:
178 *
179 *
180 * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
181 *
182 * These are some of the many terms that are thrown about that don't always
183 * mean what people think they mean (Inconceivable!). In the interest of
184 * creating a common ground for discussion, terms and their definitions
185 * will be established.
186 *
187 * Memory devices: The individual chip on a memory stick. These devices
188 * commonly output 4 and 8 bits each. Grouping several
189 * of these in parallel provides 64 bits which is common
190 * for a memory stick.
191 *
192 * Memory Stick: A printed circuit board that agregates multiple
193 * memory devices in parallel. This is the atomic
194 * memory component that is purchaseable by Joe consumer
195 * and loaded into a memory socket.
196 *
197 * Socket: A physical connector on the motherboard that accepts
198 * a single memory stick.
199 *
200 * Channel: Set of memory devices on a memory stick that must be
201 * grouped in parallel with one or more additional
202 * channels from other memory sticks. This parallel
203 * grouping of the output from multiple channels are
204 * necessary for the smallest granularity of memory access.
205 * Some memory controllers are capable of single channel -
206 * which means that memory sticks can be loaded
207 * individually. Other memory controllers are only
208 * capable of dual channel - which means that memory
209 * sticks must be loaded as pairs (see "socket set").
210 *
211 * Chip-select row: All of the memory devices that are selected together.
212 * for a single, minimum grain of memory access.
213 * This selects all of the parallel memory devices across
214 * all of the parallel channels. Common chip-select rows
215 * for single channel are 64 bits, for dual channel 128
216 * bits.
217 *
218 * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory.
219 * Motherboards commonly drive two chip-select pins to
220 * a memory stick. A single-ranked stick, will occupy
221 * only one of those rows. The other will be unused.
222 *
223 * Double-Ranked stick: A double-ranked stick has two chip-select rows which
224 * access different sets of memory devices. The two
225 * rows cannot be accessed concurrently.
226 *
227 * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
228 * A double-sided stick has two chip-select rows which
229 * access different sets of memory devices. The two
230 * rows cannot be accessed concurrently. "Double-sided"
231 * is irrespective of the memory devices being mounted
232 * on both sides of the memory stick.
233 *
234 * Socket set: All of the memory sticks that are required for for
235 * a single memory access or all of the memory sticks
236 * spanned by a chip-select row. A single socket set
237 * has two chip-select rows and if double-sided sticks
238 * are used these will occupy those chip-select rows.
239 *
240 * Bank: This term is avoided because it is unclear when
241 * needing to distinguish between chip-select rows and
242 * socket sets.
243 *
244 * Controller pages:
245 *
246 * Physical pages:
247 *
248 * Virtual pages:
249 *
250 *
251 * STRUCTURE ORGANIZATION AND CHOICES
252 *
253 *
254 *
255 * PS - I enjoyed writing all that about as much as you enjoyed reading it.
256 */
257
258
259struct channel_info {
260 int chan_idx; /* channel index */
261 u32 ce_count; /* Correctable Errors for this CHANNEL */
262 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
263 struct csrow_info *csrow; /* the parent */
264};
265
266
267struct csrow_info {
268 unsigned long first_page; /* first page number in dimm */
269 unsigned long last_page; /* last page number in dimm */
270 unsigned long page_mask; /* used for interleaving -
271 0UL for non intlv */
272 u32 nr_pages; /* number of pages in csrow */
273 u32 grain; /* granularity of reported error in bytes */
274 int csrow_idx; /* the chip-select row */
275 enum dev_type dtype; /* memory device type */
276 u32 ue_count; /* Uncorrectable Errors for this csrow */
277 u32 ce_count; /* Correctable Errors for this csrow */
278 enum mem_type mtype; /* memory csrow type */
279 enum edac_type edac_mode; /* EDAC mode for this csrow */
280 struct mem_ctl_info *mci; /* the parent */
281
282 struct kobject kobj; /* sysfs kobject for this csrow */
283
284 /* FIXME the number of CHANNELs might need to become dynamic */
285 u32 nr_channels;
286 struct channel_info *channels;
287};
288
289
290struct mem_ctl_info {
291 struct list_head link; /* for global list of mem_ctl_info structs */
292 unsigned long mtype_cap; /* memory types supported by mc */
293 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
294 unsigned long edac_cap; /* configuration capabilities - this is
295 closely related to edac_ctl_cap. The
296 difference is that the controller
297 may be capable of s4ecd4ed which would
298 be listed in edac_ctl_cap, but if
299 channels aren't capable of s4ecd4ed then the
300 edac_cap would not have that capability. */
301 unsigned long scrub_cap; /* chipset scrub capabilities */
302 enum scrub_type scrub_mode; /* current scrub mode */
303
304 enum mci_sysfs_status sysfs_active; /* status of sysfs */
305
306 /* pointer to edac checking routine */
307 void (*edac_check) (struct mem_ctl_info * mci);
308 /*
309 * Remaps memory pages: controller pages to physical pages.
310 * For most MC's, this will be NULL.
311 */
312 /* FIXME - why not send the phys page to begin with? */
313 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
314 unsigned long page);
315 int mc_idx;
316 int nr_csrows;
317 struct csrow_info *csrows;
318 /*
319 * FIXME - what about controllers on other busses? - IDs must be
320 * unique. pdev pointer should be sufficiently unique, but
321 * BUS:SLOT.FUNC numbers may not be unique.
322 */
323 struct pci_dev *pdev;
324 const char *mod_name;
325 const char *mod_ver;
326 const char *ctl_name;
327 char proc_name[MC_PROC_NAME_MAX_LEN + 1];
328 void *pvt_info;
329 u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
330 u32 ce_noinfo_count; /* Correctable Errors w/o info */
331 u32 ue_count; /* Total Uncorrectable Errors for this MC */
332 u32 ce_count; /* Total Correctable Errors for this MC */
333 unsigned long start_time; /* mci load start time (in jiffies) */
334
335 /* this stuff is for safe removal of mc devices from global list while
336 * NMI handlers may be traversing list
337 */
338 struct rcu_head rcu;
339 struct completion complete;
340
341 /* edac sysfs device control */
342 struct kobject edac_mci_kobj;
343};
344
345
346
347/* write all or some bits in a byte-register*/
348static inline void pci_write_bits8(struct pci_dev *pdev, int offset,
349 u8 value, u8 mask)
350{
351 if (mask != 0xff) {
352 u8 buf;
353 pci_read_config_byte(pdev, offset, &buf);
354 value &= mask;
355 buf &= ~mask;
356 value |= buf;
357 }
358 pci_write_config_byte(pdev, offset, value);
359}
360
361
362/* write all or some bits in a word-register*/
363static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
364 u16 value, u16 mask)
365{
366 if (mask != 0xffff) {
367 u16 buf;
368 pci_read_config_word(pdev, offset, &buf);
369 value &= mask;
370 buf &= ~mask;
371 value |= buf;
372 }
373 pci_write_config_word(pdev, offset, value);
374}
375
376
377/* write all or some bits in a dword-register*/
378static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
379 u32 value, u32 mask)
380{
381 if (mask != 0xffff) {
382 u32 buf;
383 pci_read_config_dword(pdev, offset, &buf);
384 value &= mask;
385 buf &= ~mask;
386 value |= buf;
387 }
388 pci_write_config_dword(pdev, offset, value);
389}
390
391
392#ifdef CONFIG_EDAC_DEBUG
393void edac_mc_dump_channel(struct channel_info *chan);
394void edac_mc_dump_mci(struct mem_ctl_info *mci);
395void edac_mc_dump_csrow(struct csrow_info *csrow);
396#endif /* CONFIG_EDAC_DEBUG */
397
398extern int edac_mc_add_mc(struct mem_ctl_info *mci);
399extern int edac_mc_del_mc(struct mem_ctl_info *mci);
400
401extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
402 unsigned long page);
403
404extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev
405 *pdev);
406
407extern void edac_mc_scrub_block(unsigned long page,
408 unsigned long offset, u32 size);
409
410/*
411 * The no info errors are used when error overflows are reported.
412 * There are a limited number of error logging registers that can
413 * be exausted. When all registers are exhausted and an additional
414 * error occurs then an error overflow register records that an
415 * error occured and the type of error, but doesn't have any
416 * further information. The ce/ue versions make for cleaner
417 * reporting logic and function interface - reduces conditional
418 * statement clutter and extra function arguments.
419 */
420extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
421 unsigned long page_frame_number,
422 unsigned long offset_in_page,
423 unsigned long syndrome,
424 int row, int channel, const char *msg);
425
426extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
427 const char *msg);
428
429extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
430 unsigned long page_frame_number,
431 unsigned long offset_in_page,
432 int row, const char *msg);
433
434extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
435 const char *msg);
436
437/*
438 * This kmalloc's and initializes all the structures.
439 * Can't be used if all structures don't have the same lifetime.
440 */
441extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt,
442 unsigned nr_csrows, unsigned nr_chans);
443
444/* Free an mc previously allocated by edac_mc_alloc() */
445extern void edac_mc_free(struct mem_ctl_info *mci);
446
447
448#endif /* _EDAC_MC_H_ */
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
new file mode 100644
index 00000000000..52596e75f9c
--- /dev/null
+++ b/drivers/edac/i82860_edac.c
@@ -0,0 +1,299 @@
1/*
2 * Intel 82860 Memory Controller kernel module
3 * (C) 2005 Red Hat (http://www.redhat.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Ben Woodard <woodard@redhat.com>
8 * shamelessly copied from and based upon the edac_i82875 driver
9 * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
10 */
11
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/pci.h>
17#include <linux/pci_ids.h>
18#include <linux/slab.h>
19#include "edac_mc.h"
20
21
22#ifndef PCI_DEVICE_ID_INTEL_82860_0
23#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
24#endif /* PCI_DEVICE_ID_INTEL_82860_0 */
25
26#define I82860_MCHCFG 0x50
27#define I82860_GBA 0x60
28#define I82860_GBA_MASK 0x7FF
29#define I82860_GBA_SHIFT 24
30#define I82860_ERRSTS 0xC8
31#define I82860_EAP 0xE4
32#define I82860_DERRCTL_STS 0xE2
33
34enum i82860_chips {
35 I82860 = 0,
36};
37
38struct i82860_dev_info {
39 const char *ctl_name;
40};
41
42struct i82860_error_info {
43 u16 errsts;
44 u32 eap;
45 u16 derrsyn;
46 u16 errsts2;
47};
48
49static const struct i82860_dev_info i82860_devs[] = {
50 [I82860] = {
51 .ctl_name = "i82860"},
52};
53
54static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
55 has already registered driver */
56
57static int i82860_registered = 1;
58
59static void i82860_get_error_info (struct mem_ctl_info *mci,
60 struct i82860_error_info *info)
61{
62 /*
63 * This is a mess because there is no atomic way to read all the
64 * registers at once and the registers can transition from CE being
65 * overwritten by UE.
66 */
67 pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts);
68 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
69 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, &info->derrsyn);
70 pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts2);
71
72 pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
73
74 /*
75 * If the error is the same for both reads then the first set of reads
76 * is valid. If there is a change then there is a CE no info and the
77 * second set of reads is valid and should be UE info.
78 */
79 if (!(info->errsts2 & 0x0003))
80 return;
81 if ((info->errsts ^ info->errsts2) & 0x0003) {
82 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
83 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS,
84 &info->derrsyn);
85 }
86}
87
88static int i82860_process_error_info (struct mem_ctl_info *mci,
89 struct i82860_error_info *info, int handle_errors)
90{
91 int row;
92
93 if (!(info->errsts2 & 0x0003))
94 return 0;
95
96 if (!handle_errors)
97 return 1;
98
99 if ((info->errsts ^ info->errsts2) & 0x0003) {
100 edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
101 info->errsts = info->errsts2;
102 }
103
104 info->eap >>= PAGE_SHIFT;
105 row = edac_mc_find_csrow_by_page(mci, info->eap);
106
107 if (info->errsts & 0x0002)
108 edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
109 else
110 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
111 0, "i82860 UE");
112
113 return 1;
114}
115
116static void i82860_check(struct mem_ctl_info *mci)
117{
118 struct i82860_error_info info;
119
120 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
121 i82860_get_error_info(mci, &info);
122 i82860_process_error_info(mci, &info, 1);
123}
124
125static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
126{
127 int rc = -ENODEV;
128 int index;
129 struct mem_ctl_info *mci = NULL;
130 unsigned long last_cumul_size;
131
132 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
133
134 /* RDRAM has channels but these don't map onto the abstractions that
135 edac uses.
136 The device groups from the GRA registers seem to map reasonably
137 well onto the notion of a chip select row.
138 There are 16 GRA registers and since the name is associated with
139 the channel and the GRA registers map to physical devices so we are
140 going to make 1 channel for group.
141 */
142 mci = edac_mc_alloc(0, 16, 1);
143 if (!mci)
144 return -ENOMEM;
145
146 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
147
148 mci->pdev = pdev;
149 mci->mtype_cap = MEM_FLAG_DDR;
150
151
152 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
153 /* I"m not sure about this but I think that all RDRAM is SECDED */
154 mci->edac_cap = EDAC_FLAG_SECDED;
155 /* adjust FLAGS */
156
157 mci->mod_name = BS_MOD_STR;
158 mci->mod_ver = "$Revision: 1.1.2.6 $";
159 mci->ctl_name = i82860_devs[dev_idx].ctl_name;
160 mci->edac_check = i82860_check;
161 mci->ctl_page_to_phys = NULL;
162
163 pci_read_config_word(mci->pdev, I82860_MCHCFG, &mchcfg_ddim);
164 mchcfg_ddim = mchcfg_ddim & 0x180;
165
166 /*
167 * The group row boundary (GRA) reg values are boundary address
168 * for each DRAM row with a granularity of 16MB. GRA regs are
169 * cumulative; therefore GRA15 will contain the total memory contained
170 * in all eight rows.
171 */
172 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
173 u16 value;
174 u32 cumul_size;
175 struct csrow_info *csrow = &mci->csrows[index];
176
177 pci_read_config_word(mci->pdev, I82860_GBA + index * 2,
178 &value);
179
180 cumul_size = (value & I82860_GBA_MASK) <<
181 (I82860_GBA_SHIFT - PAGE_SHIFT);
182 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
183 __func__, index, cumul_size);
184 if (cumul_size == last_cumul_size)
185 continue; /* not populated */
186
187 csrow->first_page = last_cumul_size;
188 csrow->last_page = cumul_size - 1;
189 csrow->nr_pages = cumul_size - last_cumul_size;
190 last_cumul_size = cumul_size;
191 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
192 csrow->mtype = MEM_RMBS;
193 csrow->dtype = DEV_UNKNOWN;
194 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
195 }
196
197 /* clear counters */
198 pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
199
200 if (edac_mc_add_mc(mci)) {
201 debugf3("MC: " __FILE__
202 ": %s(): failed edac_mc_add_mc()\n",
203 __func__);
204 edac_mc_free(mci);
205 } else {
206 /* get this far and it's successful */
207 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
208 rc = 0;
209 }
210 return rc;
211}
212
213/* returns count (>= 0), or negative on error */
214static int __devinit i82860_init_one(struct pci_dev *pdev,
215 const struct pci_device_id *ent)
216{
217 int rc;
218
219 debugf0("MC: " __FILE__ ": %s()\n", __func__);
220
221 printk(KERN_INFO "i82860 init one\n");
222 if(pci_enable_device(pdev) < 0)
223 return -EIO;
224 rc = i82860_probe1(pdev, ent->driver_data);
225 if(rc == 0)
226 mci_pdev = pci_dev_get(pdev);
227 return rc;
228}
229
230static void __devexit i82860_remove_one(struct pci_dev *pdev)
231{
232 struct mem_ctl_info *mci;
233
234 debugf0(__FILE__ ": %s()\n", __func__);
235
236 mci = edac_mc_find_mci_by_pdev(pdev);
237 if ((mci != NULL) && (edac_mc_del_mc(mci) == 0))
238 edac_mc_free(mci);
239}
240
241static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
242 {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
243 I82860},
244 {0,} /* 0 terminated list. */
245};
246
247MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
248
249static struct pci_driver i82860_driver = {
250 .name = BS_MOD_STR,
251 .probe = i82860_init_one,
252 .remove = __devexit_p(i82860_remove_one),
253 .id_table = i82860_pci_tbl,
254};
255
256static int __init i82860_init(void)
257{
258 int pci_rc;
259
260 debugf3("MC: " __FILE__ ": %s()\n", __func__);
261 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
262 return pci_rc;
263
264 if (!mci_pdev) {
265 i82860_registered = 0;
266 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
267 PCI_DEVICE_ID_INTEL_82860_0, NULL);
268 if (mci_pdev == NULL) {
269 debugf0("860 pci_get_device fail\n");
270 return -ENODEV;
271 }
272 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
273 if (pci_rc < 0) {
274 debugf0("860 init fail\n");
275 pci_dev_put(mci_pdev);
276 return -ENODEV;
277 }
278 }
279 return 0;
280}
281
282static void __exit i82860_exit(void)
283{
284 debugf3("MC: " __FILE__ ": %s()\n", __func__);
285
286 pci_unregister_driver(&i82860_driver);
287 if (!i82860_registered) {
288 i82860_remove_one(mci_pdev);
289 pci_dev_put(mci_pdev);
290 }
291}
292
293module_init(i82860_init);
294module_exit(i82860_exit);
295
296MODULE_LICENSE("GPL");
297MODULE_AUTHOR
298 ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>");
299MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
new file mode 100644
index 00000000000..009c08fe5d6
--- /dev/null
+++ b/drivers/edac/i82875p_edac.c
@@ -0,0 +1,532 @@
1/*
2 * Intel D82875P Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Contributors:
9 * Wang Zhenyu at intel.com
10 *
11 * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
12 *
13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
14 */
15
16
17#include <linux/config.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <linux/pci.h>
22#include <linux/pci_ids.h>
23
24#include <linux/slab.h>
25
26#include "edac_mc.h"
27
28
29#ifndef PCI_DEVICE_ID_INTEL_82875_0
30#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
31#endif /* PCI_DEVICE_ID_INTEL_82875_0 */
32
33#ifndef PCI_DEVICE_ID_INTEL_82875_6
34#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
35#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
36
37
38/* four csrows in dual channel, eight in single channel */
39#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
40
41
42/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
43#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
44 *
45 * 31:12 block address
46 * 11:0 reserved
47 */
48
49#define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
50 *
51 * 7:0 DRAM ECC Syndrome
52 */
53
54#define I82875P_DES 0x5d /* DRAM Error Status (8b)
55 *
56 * 7:1 reserved
57 * 0 Error channel 0/1
58 */
59
60#define I82875P_ERRSTS 0xc8 /* Error Status Register (16b)
61 *
62 * 15:10 reserved
63 * 9 non-DRAM lock error (ndlock)
64 * 8 Sftwr Generated SMI
65 * 7 ECC UE
66 * 6 reserved
67 * 5 MCH detects unimplemented cycle
68 * 4 AGP access outside GA
69 * 3 Invalid AGP access
70 * 2 Invalid GA translation table
71 * 1 Unsupported AGP command
72 * 0 ECC CE
73 */
74
75#define I82875P_ERRCMD 0xca /* Error Command (16b)
76 *
77 * 15:10 reserved
78 * 9 SERR on non-DRAM lock
79 * 8 SERR on ECC UE
80 * 7 SERR on ECC CE
81 * 6 target abort on high exception
82 * 5 detect unimplemented cyc
83 * 4 AGP access outside of GA
84 * 3 SERR on invalid AGP access
85 * 2 invalid translation table
86 * 1 SERR on unsupported AGP command
87 * 0 reserved
88 */
89
90
91/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
92#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
93 *
94 * 15:10 reserved
95 * 9 fast back-to-back - ro 0
96 * 8 SERR enable - ro 0
97 * 7 addr/data stepping - ro 0
98 * 6 parity err enable - ro 0
99 * 5 VGA palette snoop - ro 0
100 * 4 mem wr & invalidate - ro 0
101 * 3 special cycle - ro 0
102 * 2 bus master - ro 0
103 * 1 mem access dev6 - 0(dis),1(en)
104 * 0 IO access dev3 - 0(dis),1(en)
105 */
106
107#define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b)
108 *
109 * 31:12 mem base addr [31:12]
110 * 11:4 address mask - ro 0
111 * 3 prefetchable - ro 0(non),1(pre)
112 * 2:1 mem type - ro 0
113 * 0 mem space - ro 0
114 */
115
116/* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
117
118#define I82875P_DRB_SHIFT 26 /* 64MiB grain */
119#define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8)
120 *
121 * 7 reserved
122 * 6:0 64MiB row boundary addr
123 */
124
125#define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8)
126 *
127 * 7 reserved
128 * 6:4 row attr row 1
129 * 3 reserved
130 * 2:0 row attr row 0
131 *
132 * 000 = 4KiB
133 * 001 = 8KiB
134 * 010 = 16KiB
135 * 011 = 32KiB
136 */
137
138#define I82875P_DRC 0x68 /* DRAM Controller Mode (32b)
139 *
140 * 31:30 reserved
141 * 29 init complete
142 * 28:23 reserved
143 * 22:21 nr chan 00=1,01=2
144 * 20 reserved
145 * 19:18 Data Integ Mode 00=none,01=ecc
146 * 17:11 reserved
147 * 10:8 refresh mode
148 * 7 reserved
149 * 6:4 mode select
150 * 3:2 reserved
151 * 1:0 DRAM type 01=DDR
152 */
153
154
155enum i82875p_chips {
156 I82875P = 0,
157};
158
159
160struct i82875p_pvt {
161 struct pci_dev *ovrfl_pdev;
162 void *ovrfl_window;
163};
164
165
166struct i82875p_dev_info {
167 const char *ctl_name;
168};
169
170
171struct i82875p_error_info {
172 u16 errsts;
173 u32 eap;
174 u8 des;
175 u8 derrsyn;
176 u16 errsts2;
177};
178
179
180static const struct i82875p_dev_info i82875p_devs[] = {
181 [I82875P] = {
182 .ctl_name = "i82875p"},
183};
184
185static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
186 has already registered driver */
187static int i82875p_registered = 1;
188
189static void i82875p_get_error_info (struct mem_ctl_info *mci,
190 struct i82875p_error_info *info)
191{
192 /*
193 * This is a mess because there is no atomic way to read all the
194 * registers at once and the registers can transition from CE being
195 * overwritten by UE.
196 */
197 pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts);
198 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
199 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
200 pci_read_config_byte(mci->pdev, I82875P_DERRSYN, &info->derrsyn);
201 pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts2);
202
203 pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
204
205 /*
206 * If the error is the same then we can for both reads then
207 * the first set of reads is valid. If there is a change then
208 * there is a CE no info and the second set of reads is valid
209 * and should be UE info.
210 */
211 if (!(info->errsts2 & 0x0081))
212 return;
213 if ((info->errsts ^ info->errsts2) & 0x0081) {
214 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
215 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
216 pci_read_config_byte(mci->pdev, I82875P_DERRSYN,
217 &info->derrsyn);
218 }
219}
220
221static int i82875p_process_error_info (struct mem_ctl_info *mci,
222 struct i82875p_error_info *info, int handle_errors)
223{
224 int row, multi_chan;
225
226 multi_chan = mci->csrows[0].nr_channels - 1;
227
228 if (!(info->errsts2 & 0x0081))
229 return 0;
230
231 if (!handle_errors)
232 return 1;
233
234 if ((info->errsts ^ info->errsts2) & 0x0081) {
235 edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
236 info->errsts = info->errsts2;
237 }
238
239 info->eap >>= PAGE_SHIFT;
240 row = edac_mc_find_csrow_by_page(mci, info->eap);
241
242 if (info->errsts & 0x0080)
243 edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
244 else
245 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
246 multi_chan ? (info->des & 0x1) : 0,
247 "i82875p CE");
248
249 return 1;
250}
251
252
253static void i82875p_check(struct mem_ctl_info *mci)
254{
255 struct i82875p_error_info info;
256
257 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
258 i82875p_get_error_info(mci, &info);
259 i82875p_process_error_info(mci, &info, 1);
260}
261
262
263#ifdef CONFIG_PROC_FS
264extern int pci_proc_attach_device(struct pci_dev *);
265#endif
266
267static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
268{
269 int rc = -ENODEV;
270 int index;
271 struct mem_ctl_info *mci = NULL;
272 struct i82875p_pvt *pvt = NULL;
273 unsigned long last_cumul_size;
274 struct pci_dev *ovrfl_pdev;
275 void __iomem *ovrfl_window = NULL;
276
277 u32 drc;
278 u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
279 u32 nr_chans;
280 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
281
282 debugf0("MC: " __FILE__ ": %s()\n", __func__);
283
284 ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
285
286 if (!ovrfl_pdev) {
287 /*
288 * Intel tells BIOS developers to hide device 6 which
289 * configures the overflow device access containing
290 * the DRBs - this is where we expose device 6.
291 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
292 */
293 pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
294 ovrfl_pdev =
295 pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
296 if (!ovrfl_pdev)
297 goto fail;
298 }
299#ifdef CONFIG_PROC_FS
300 if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) {
301 printk(KERN_ERR "MC: " __FILE__
302 ": %s(): Failed to attach overflow device\n",
303 __func__);
304 goto fail;
305 }
306#endif /* CONFIG_PROC_FS */
307 if (pci_enable_device(ovrfl_pdev)) {
308 printk(KERN_ERR "MC: " __FILE__
309 ": %s(): Failed to enable overflow device\n",
310 __func__);
311 goto fail;
312 }
313
314 if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) {
315#ifdef CORRECT_BIOS
316 goto fail;
317#endif
318 }
319 /* cache is irrelevant for PCI bus reads/writes */
320 ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0),
321 pci_resource_len(ovrfl_pdev, 0));
322
323 if (!ovrfl_window) {
324 printk(KERN_ERR "MC: " __FILE__
325 ": %s(): Failed to ioremap bar6\n", __func__);
326 goto fail;
327 }
328
329 /* need to find out the number of channels */
330 drc = readl(ovrfl_window + I82875P_DRC);
331 drc_chan = ((drc >> 21) & 0x1);
332 nr_chans = drc_chan + 1;
333 drc_ddim = (drc >> 18) & 0x1;
334
335 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
336 nr_chans);
337
338 if (!mci) {
339 rc = -ENOMEM;
340 goto fail;
341 }
342
343 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
344
345 mci->pdev = pdev;
346 mci->mtype_cap = MEM_FLAG_DDR;
347
348 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
349 mci->edac_cap = EDAC_FLAG_UNKNOWN;
350 /* adjust FLAGS */
351
352 mci->mod_name = BS_MOD_STR;
353 mci->mod_ver = "$Revision: 1.5.2.11 $";
354 mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
355 mci->edac_check = i82875p_check;
356 mci->ctl_page_to_phys = NULL;
357
358 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
359
360 pvt = (struct i82875p_pvt *) mci->pvt_info;
361 pvt->ovrfl_pdev = ovrfl_pdev;
362 pvt->ovrfl_window = ovrfl_window;
363
364 /*
365 * The dram row boundary (DRB) reg values are boundary address
366 * for each DRAM row with a granularity of 32 or 64MB (single/dual
367 * channel operation). DRB regs are cumulative; therefore DRB7 will
368 * contain the total memory contained in all eight rows.
369 */
370 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
371 u8 value;
372 u32 cumul_size;
373 struct csrow_info *csrow = &mci->csrows[index];
374
375 value = readb(ovrfl_window + I82875P_DRB + index);
376 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
377 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
378 __func__, index, cumul_size);
379 if (cumul_size == last_cumul_size)
380 continue; /* not populated */
381
382 csrow->first_page = last_cumul_size;
383 csrow->last_page = cumul_size - 1;
384 csrow->nr_pages = cumul_size - last_cumul_size;
385 last_cumul_size = cumul_size;
386 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
387 csrow->mtype = MEM_DDR;
388 csrow->dtype = DEV_UNKNOWN;
389 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
390 }
391
392 /* clear counters */
393 pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
394
395 if (edac_mc_add_mc(mci)) {
396 debugf3("MC: " __FILE__
397 ": %s(): failed edac_mc_add_mc()\n", __func__);
398 goto fail;
399 }
400
401 /* get this far and it's successful */
402 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
403 return 0;
404
405 fail:
406 if (mci)
407 edac_mc_free(mci);
408
409 if (ovrfl_window)
410 iounmap(ovrfl_window);
411
412 if (ovrfl_pdev) {
413 pci_release_regions(ovrfl_pdev);
414 pci_disable_device(ovrfl_pdev);
415 }
416
417 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
418 return rc;
419}
420
421
422/* returns count (>= 0), or negative on error */
423static int __devinit i82875p_init_one(struct pci_dev *pdev,
424 const struct pci_device_id *ent)
425{
426 int rc;
427
428 debugf0("MC: " __FILE__ ": %s()\n", __func__);
429
430 printk(KERN_INFO "i82875p init one\n");
431 if(pci_enable_device(pdev) < 0)
432 return -EIO;
433 rc = i82875p_probe1(pdev, ent->driver_data);
434 if (mci_pdev == NULL)
435 mci_pdev = pci_dev_get(pdev);
436 return rc;
437}
438
439
440static void __devexit i82875p_remove_one(struct pci_dev *pdev)
441{
442 struct mem_ctl_info *mci;
443 struct i82875p_pvt *pvt = NULL;
444
445 debugf0(__FILE__ ": %s()\n", __func__);
446
447 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
448 return;
449
450 pvt = (struct i82875p_pvt *) mci->pvt_info;
451 if (pvt->ovrfl_window)
452 iounmap(pvt->ovrfl_window);
453
454 if (pvt->ovrfl_pdev) {
455#ifdef CORRECT_BIOS
456 pci_release_regions(pvt->ovrfl_pdev);
457#endif /*CORRECT_BIOS */
458 pci_disable_device(pvt->ovrfl_pdev);
459 pci_dev_put(pvt->ovrfl_pdev);
460 }
461
462 if (edac_mc_del_mc(mci))
463 return;
464
465 edac_mc_free(mci);
466}
467
468
469static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
470 {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
471 I82875P},
472 {0,} /* 0 terminated list. */
473};
474
475MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
476
477
478static struct pci_driver i82875p_driver = {
479 .name = BS_MOD_STR,
480 .probe = i82875p_init_one,
481 .remove = __devexit_p(i82875p_remove_one),
482 .id_table = i82875p_pci_tbl,
483};
484
485
486static int __init i82875p_init(void)
487{
488 int pci_rc;
489
490 debugf3("MC: " __FILE__ ": %s()\n", __func__);
491 pci_rc = pci_register_driver(&i82875p_driver);
492 if (pci_rc < 0)
493 return pci_rc;
494 if (mci_pdev == NULL) {
495 i82875p_registered = 0;
496 mci_pdev =
497 pci_get_device(PCI_VENDOR_ID_INTEL,
498 PCI_DEVICE_ID_INTEL_82875_0, NULL);
499 if (!mci_pdev) {
500 debugf0("875p pci_get_device fail\n");
501 return -ENODEV;
502 }
503 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
504 if (pci_rc < 0) {
505 debugf0("875p init fail\n");
506 pci_dev_put(mci_pdev);
507 return -ENODEV;
508 }
509 }
510 return 0;
511}
512
513
514static void __exit i82875p_exit(void)
515{
516 debugf3("MC: " __FILE__ ": %s()\n", __func__);
517
518 pci_unregister_driver(&i82875p_driver);
519 if (!i82875p_registered) {
520 i82875p_remove_one(mci_pdev);
521 pci_dev_put(mci_pdev);
522 }
523}
524
525
526module_init(i82875p_init);
527module_exit(i82875p_exit);
528
529
530MODULE_LICENSE("GPL");
531MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
532MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
new file mode 100644
index 00000000000..e90892831b9
--- /dev/null
+++ b/drivers/edac/r82600_edac.c
@@ -0,0 +1,407 @@
1/*
2 * Radisys 82600 Embedded chipset Memory Controller kernel module
3 * (C) 2005 EADS Astrium
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne
8 * Harbaugh, Dan Hollis <goemon at anime dot net> and others.
9 *
10 * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
11 *
12 * Written with reference to 82600 High Integration Dual PCI System
13 * Controller Data Book:
14 * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
15 * references to this document given in []
16 */
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/init.h>
21
22#include <linux/pci.h>
23#include <linux/pci_ids.h>
24
25#include <linux/slab.h>
26
27#include "edac_mc.h"
28
29/* Radisys say "The 82600 integrates a main memory SDRAM controller that
30 * supports up to four banks of memory. The four banks can support a mix of
31 * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
32 * each of which can be any size from 16MB to 512MB. Both registered (control
33 * signals buffered) and unbuffered DIMM types are supported. Mixing of
34 * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
35 * is not allowed. The 82600 SDRAM interface operates at the same frequency as
36 * the CPU bus, 66MHz, 100MHz or 133MHz."
37 */
38
39#define R82600_NR_CSROWS 4
40#define R82600_NR_CHANS 1
41#define R82600_NR_DIMMS 4
42
43#define R82600_BRIDGE_ID 0x8200
44
45/* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
46#define R82600_DRAMC 0x57 /* Various SDRAM related control bits
47 * all bits are R/W
48 *
49 * 7 SDRAM ISA Hole Enable
50 * 6 Flash Page Mode Enable
51 * 5 ECC Enable: 1=ECC 0=noECC
52 * 4 DRAM DIMM Type: 1=
53 * 3 BIOS Alias Disable
54 * 2 SDRAM BIOS Flash Write Enable
55 * 1:0 SDRAM Refresh Rate: 00=Disabled
56 * 01=7.8usec (256Mbit SDRAMs)
57 * 10=15.6us 11=125usec
58 */
59
60#define R82600_SDRAMC 0x76 /* "SDRAM Control Register"
61 * More SDRAM related control bits
62 * all bits are R/W
63 *
64 * 15:8 Reserved.
65 *
66 * 7:5 Special SDRAM Mode Select
67 *
68 * 4 Force ECC
69 *
70 * 1=Drive ECC bits to 0 during
71 * write cycles (i.e. ECC test mode)
72 *
73 * 0=Normal ECC functioning
74 *
75 * 3 Enhanced Paging Enable
76 *
77 * 2 CAS# Latency 0=3clks 1=2clks
78 *
79 * 1 RAS# to CAS# Delay 0=3 1=2
80 *
81 * 0 RAS# Precharge 0=3 1=2
82 */
83
84#define R82600_EAP 0x80 /* ECC Error Address Pointer Register
85 *
86 * 31 Disable Hardware Scrubbing (RW)
87 * 0=Scrub on corrected read
88 * 1=Don't scrub on corrected read
89 *
90 * 30:12 Error Address Pointer (RO)
91 * Upper 19 bits of error address
92 *
93 * 11:4 Syndrome Bits (RO)
94 *
95 * 3 BSERR# on multibit error (RW)
96 * 1=enable 0=disable
97 *
98 * 2 NMI on Single Bit Eror (RW)
99 * 1=NMI triggered by SBE n.b. other
100 * prerequeists
101 * 0=NMI not triggered
102 *
103 * 1 MBE (R/WC)
104 * read 1=MBE at EAP (see above)
105 * read 0=no MBE, or SBE occurred first
106 * write 1=Clear MBE status (must also
107 * clear SBE)
108 * write 0=NOP
109 *
110 * 1 SBE (R/WC)
111 * read 1=SBE at EAP (see above)
112 * read 0=no SBE, or MBE occurred first
113 * write 1=Clear SBE status (must also
114 * clear MBE)
115 * write 0=NOP
116 */
117
118#define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundry Address
119 * Registers
120 *
121 * 7:0 Address lines 30:24 - upper limit of
122 * each row [p57]
123 */
124
125struct r82600_error_info {
126 u32 eapr;
127};
128
129
130static unsigned int disable_hardware_scrub = 0;
131
132
133static void r82600_get_error_info (struct mem_ctl_info *mci,
134 struct r82600_error_info *info)
135{
136 pci_read_config_dword(mci->pdev, R82600_EAP, &info->eapr);
137
138 if (info->eapr & BIT(0))
139 /* Clear error to allow next error to be reported [p.62] */
140 pci_write_bits32(mci->pdev, R82600_EAP,
141 ((u32) BIT(0) & (u32) BIT(1)),
142 ((u32) BIT(0) & (u32) BIT(1)));
143
144 if (info->eapr & BIT(1))
145 /* Clear error to allow next error to be reported [p.62] */
146 pci_write_bits32(mci->pdev, R82600_EAP,
147 ((u32) BIT(0) & (u32) BIT(1)),
148 ((u32) BIT(0) & (u32) BIT(1)));
149}
150
151
152static int r82600_process_error_info (struct mem_ctl_info *mci,
153 struct r82600_error_info *info, int handle_errors)
154{
155 int error_found;
156 u32 eapaddr, page;
157 u32 syndrome;
158
159 error_found = 0;
160
161 /* bits 30:12 store the upper 19 bits of the 32 bit error address */
162 eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
163 /* Syndrome in bits 11:4 [p.62] */
164 syndrome = (info->eapr >> 4) & 0xFF;
165
166 /* the R82600 reports at less than page *
167 * granularity (upper 19 bits only) */
168 page = eapaddr >> PAGE_SHIFT;
169
170 if (info->eapr & BIT(0)) { /* CE? */
171 error_found = 1;
172
173 if (handle_errors)
174 edac_mc_handle_ce(
175 mci, page, 0, /* not avail */
176 syndrome,
177 edac_mc_find_csrow_by_page(mci, page),
178 0, /* channel */
179 mci->ctl_name);
180 }
181
182 if (info->eapr & BIT(1)) { /* UE? */
183 error_found = 1;
184
185 if (handle_errors)
186 /* 82600 doesn't give enough info */
187 edac_mc_handle_ue(mci, page, 0,
188 edac_mc_find_csrow_by_page(mci, page),
189 mci->ctl_name);
190 }
191
192 return error_found;
193}
194
195static void r82600_check(struct mem_ctl_info *mci)
196{
197 struct r82600_error_info info;
198
199 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
200 r82600_get_error_info(mci, &info);
201 r82600_process_error_info(mci, &info, 1);
202}
203
204static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
205{
206 int rc = -ENODEV;
207 int index;
208 struct mem_ctl_info *mci = NULL;
209 u8 dramcr;
210 u32 ecc_on;
211 u32 reg_sdram;
212 u32 eapr;
213 u32 scrub_disabled;
214 u32 sdram_refresh_rate;
215 u32 row_high_limit_last = 0;
216 u32 eap_init_bits;
217
218 debugf0("MC: " __FILE__ ": %s()\n", __func__);
219
220
221 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
222 pci_read_config_dword(pdev, R82600_EAP, &eapr);
223
224 ecc_on = dramcr & BIT(5);
225 reg_sdram = dramcr & BIT(4);
226 scrub_disabled = eapr & BIT(31);
227 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
228
229 debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n",
230 __func__, sdram_refresh_rate);
231
232 debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__,
233 dramcr);
234
235 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
236
237 if (mci == NULL) {
238 rc = -ENOMEM;
239 goto fail;
240 }
241
242 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
243
244 mci->pdev = pdev;
245 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
246
247 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
248 /* FIXME try to work out if the chip leads have been *
249 * used for COM2 instead on this board? [MA6?] MAYBE: */
250
251 /* On the R82600, the pins for memory bits 72:65 - i.e. the *
252 * EC bits are shared with the pins for COM2 (!), so if COM2 *
253 * is enabled, we assume COM2 is wired up, and thus no EDAC *
254 * is possible. */
255 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
256 if (ecc_on) {
257 if (scrub_disabled)
258 debugf3("MC: " __FILE__ ": %s(): mci = %p - "
259 "Scrubbing disabled! EAP: %#0x\n", __func__,
260 mci, eapr);
261 } else
262 mci->edac_cap = EDAC_FLAG_NONE;
263
264 mci->mod_name = BS_MOD_STR;
265 mci->mod_ver = "$Revision: 1.1.2.6 $";
266 mci->ctl_name = "R82600";
267 mci->edac_check = r82600_check;
268 mci->ctl_page_to_phys = NULL;
269
270 for (index = 0; index < mci->nr_csrows; index++) {
271 struct csrow_info *csrow = &mci->csrows[index];
272 u8 drbar; /* sDram Row Boundry Address Register */
273 u32 row_high_limit;
274 u32 row_base;
275
276 /* find the DRAM Chip Select Base address and mask */
277 pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar);
278
279 debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n",
280 mci->mc_idx, __func__, index, drbar);
281
282 row_high_limit = ((u32) drbar << 24);
283/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
284
285 debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
286 "Boundry Address=%#0x, Last = %#0x \n",
287 mci->mc_idx, __func__, index, row_high_limit,
288 row_high_limit_last);
289
290 /* Empty row [p.57] */
291 if (row_high_limit == row_high_limit_last)
292 continue;
293
294 row_base = row_high_limit_last;
295
296 csrow->first_page = row_base >> PAGE_SHIFT;
297 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
298 csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
299 /* Error address is top 19 bits - so granularity is *
300 * 14 bits */
301 csrow->grain = 1 << 14;
302 csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
303 /* FIXME - check that this is unknowable with this chipset */
304 csrow->dtype = DEV_UNKNOWN;
305
306 /* Mode is global on 82600 */
307 csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
308 row_high_limit_last = row_high_limit;
309 }
310
311 /* clear counters */
312 /* FIXME should we? */
313
314 if (edac_mc_add_mc(mci)) {
315 debugf3("MC: " __FILE__
316 ": %s(): failed edac_mc_add_mc()\n", __func__);
317 goto fail;
318 }
319
320 /* get this far and it's successful */
321
322 /* Clear error flags to allow next error to be reported [p.62] */
323 /* Test systems seem to always have the UE flag raised on boot */
324
325 eap_init_bits = BIT(0) & BIT(1);
326 if (disable_hardware_scrub) {
327 eap_init_bits |= BIT(31);
328 debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub "
329 "(scrub on error)\n", __func__);
330 }
331
332 pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits,
333 eap_init_bits);
334
335 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
336 return 0;
337
338fail:
339 if (mci)
340 edac_mc_free(mci);
341
342 return rc;
343}
344
345/* returns count (>= 0), or negative on error */
346static int __devinit r82600_init_one(struct pci_dev *pdev,
347 const struct pci_device_id *ent)
348{
349 debugf0("MC: " __FILE__ ": %s()\n", __func__);
350
351 /* don't need to call pci_device_enable() */
352 return r82600_probe1(pdev, ent->driver_data);
353}
354
355
356static void __devexit r82600_remove_one(struct pci_dev *pdev)
357{
358 struct mem_ctl_info *mci;
359
360 debugf0(__FILE__ ": %s()\n", __func__);
361
362 if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) &&
363 !edac_mc_del_mc(mci))
364 edac_mc_free(mci);
365}
366
367
368static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
369 {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)},
370 {0,} /* 0 terminated list. */
371};
372
373MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
374
375
376static struct pci_driver r82600_driver = {
377 .name = BS_MOD_STR,
378 .probe = r82600_init_one,
379 .remove = __devexit_p(r82600_remove_one),
380 .id_table = r82600_pci_tbl,
381};
382
383
384static int __init r82600_init(void)
385{
386 return pci_register_driver(&r82600_driver);
387}
388
389
390static void __exit r82600_exit(void)
391{
392 pci_unregister_driver(&r82600_driver);
393}
394
395
396module_init(r82600_init);
397module_exit(r82600_exit);
398
399
400MODULE_LICENSE("GPL");
401MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
402 "on behalf of EADS Astrium");
403MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
404
405module_param(disable_hardware_scrub, bool, 0644);
406MODULE_PARM_DESC(disable_hardware_scrub,
407 "If set, disable the chipset's automatic scrub for CEs");
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index ca99979c868..8b3515f394a 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -8,6 +8,7 @@
8 * completion notification. 8 * completion notification.
9 */ 9 */
10 10
11#include <asm/types.h>
11#include <asm/atomic.h> 12#include <asm/atomic.h>
12 13
13#include <linux/blkdev.h> 14#include <linux/blkdev.h>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1421941487c..626508afe1b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -7,6 +7,7 @@ menu "Network device support"
7 7
8config NETDEVICES 8config NETDEVICES
9 depends on NET 9 depends on NET
10 default y if UML
10 bool "Network device support" 11 bool "Network device support"
11 ---help--- 12 ---help---
12 You can say N here if you don't intend to connect your Linux box to 13 You can say N here if you don't intend to connect your Linux box to
@@ -1914,6 +1915,15 @@ config E1000_NAPI
1914 1915
1915 If in doubt, say N. 1916 If in doubt, say N.
1916 1917
1918config E1000_DISABLE_PACKET_SPLIT
1919 bool "Disable Packet Split for PCI express adapters"
1920 depends on E1000
1921 help
1922 Say Y here if you want to use the legacy receive path for PCI express
1923 hadware.
1924
1925 If in doubt, say N.
1926
1917source "drivers/net/ixp2000/Kconfig" 1927source "drivers/net/ixp2000/Kconfig"
1918 1928
1919config MYRI_SBUS 1929config MYRI_SBUS
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index dde631f8f68..6e295fce5c6 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -335,6 +335,30 @@ static inline void cas_mask_intr(struct cas *cp)
335 cas_disable_irq(cp, i); 335 cas_disable_irq(cp, i);
336} 336}
337 337
338static inline void cas_buffer_init(cas_page_t *cp)
339{
340 struct page *page = cp->buffer;
341 atomic_set((atomic_t *)&page->lru.next, 1);
342}
343
344static inline int cas_buffer_count(cas_page_t *cp)
345{
346 struct page *page = cp->buffer;
347 return atomic_read((atomic_t *)&page->lru.next);
348}
349
350static inline void cas_buffer_inc(cas_page_t *cp)
351{
352 struct page *page = cp->buffer;
353 atomic_inc((atomic_t *)&page->lru.next);
354}
355
356static inline void cas_buffer_dec(cas_page_t *cp)
357{
358 struct page *page = cp->buffer;
359 atomic_dec((atomic_t *)&page->lru.next);
360}
361
338static void cas_enable_irq(struct cas *cp, const int ring) 362static void cas_enable_irq(struct cas *cp, const int ring)
339{ 363{
340 if (ring == 0) { /* all but TX_DONE */ 364 if (ring == 0) { /* all but TX_DONE */
@@ -472,6 +496,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
472{ 496{
473 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 497 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
474 PCI_DMA_FROMDEVICE); 498 PCI_DMA_FROMDEVICE);
499 cas_buffer_dec(page);
475 __free_pages(page->buffer, cp->page_order); 500 __free_pages(page->buffer, cp->page_order);
476 kfree(page); 501 kfree(page);
477 return 0; 502 return 0;
@@ -501,6 +526,7 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
501 page->buffer = alloc_pages(flags, cp->page_order); 526 page->buffer = alloc_pages(flags, cp->page_order);
502 if (!page->buffer) 527 if (!page->buffer)
503 goto page_err; 528 goto page_err;
529 cas_buffer_init(page);
504 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, 530 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
505 cp->page_size, PCI_DMA_FROMDEVICE); 531 cp->page_size, PCI_DMA_FROMDEVICE);
506 return page; 532 return page;
@@ -579,7 +605,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
579 list_for_each_safe(elem, tmp, &list) { 605 list_for_each_safe(elem, tmp, &list) {
580 cas_page_t *page = list_entry(elem, cas_page_t, list); 606 cas_page_t *page = list_entry(elem, cas_page_t, list);
581 607
582 if (page_count(page->buffer) > 1) 608 if (cas_buffer_count(page) > 1)
583 continue; 609 continue;
584 610
585 list_del(elem); 611 list_del(elem);
@@ -1347,7 +1373,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1347 cas_page_t *page = cp->rx_pages[1][index]; 1373 cas_page_t *page = cp->rx_pages[1][index];
1348 cas_page_t *new; 1374 cas_page_t *new;
1349 1375
1350 if (page_count(page->buffer) == 1) 1376 if (cas_buffer_count(page) == 1)
1351 return page; 1377 return page;
1352 1378
1353 new = cas_page_dequeue(cp); 1379 new = cas_page_dequeue(cp);
@@ -1367,7 +1393,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1367 cas_page_t **page1 = cp->rx_pages[1]; 1393 cas_page_t **page1 = cp->rx_pages[1];
1368 1394
1369 /* swap if buffer is in use */ 1395 /* swap if buffer is in use */
1370 if (page_count(page0[index]->buffer) > 1) { 1396 if (cas_buffer_count(page0[index]) > 1) {
1371 cas_page_t *new = cas_page_spare(cp, index); 1397 cas_page_t *new = cas_page_spare(cp, index);
1372 if (new) { 1398 if (new) {
1373 page1[index] = page0[index]; 1399 page1[index] = page0[index];
@@ -2039,6 +2065,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2039 skb->len += hlen - swivel; 2065 skb->len += hlen - swivel;
2040 2066
2041 get_page(page->buffer); 2067 get_page(page->buffer);
2068 cas_buffer_inc(page);
2042 frag->page = page->buffer; 2069 frag->page = page->buffer;
2043 frag->page_offset = off; 2070 frag->page_offset = off;
2044 frag->size = hlen - swivel; 2071 frag->size = hlen - swivel;
@@ -2063,6 +2090,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2063 frag++; 2090 frag++;
2064 2091
2065 get_page(page->buffer); 2092 get_page(page->buffer);
2093 cas_buffer_inc(page);
2066 frag->page = page->buffer; 2094 frag->page = page->buffer;
2067 frag->page_offset = 0; 2095 frag->page_offset = 0;
2068 frag->size = hlen; 2096 frag->size = hlen;
@@ -2225,7 +2253,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2225 released = 0; 2253 released = 0;
2226 while (entry != last) { 2254 while (entry != last) {
2227 /* make a new buffer if it's still in use */ 2255 /* make a new buffer if it's still in use */
2228 if (page_count(page[entry]->buffer) > 1) { 2256 if (cas_buffer_count(page[entry]) > 1) {
2229 cas_page_t *new = cas_page_dequeue(cp); 2257 cas_page_t *new = cas_page_dequeue(cp);
2230 if (!new) { 2258 if (!new) {
2231 /* let the timer know that we need to 2259 /* let the timer know that we need to
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d252297e4db..5cedc81786e 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -121,7 +121,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
121 struct e1000_adapter *adapter = netdev_priv(netdev); 121 struct e1000_adapter *adapter = netdev_priv(netdev);
122 struct e1000_hw *hw = &adapter->hw; 122 struct e1000_hw *hw = &adapter->hw;
123 123
124 if(hw->media_type == e1000_media_type_copper) { 124 if (hw->media_type == e1000_media_type_copper) {
125 125
126 ecmd->supported = (SUPPORTED_10baseT_Half | 126 ecmd->supported = (SUPPORTED_10baseT_Half |
127 SUPPORTED_10baseT_Full | 127 SUPPORTED_10baseT_Full |
@@ -133,7 +133,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
133 133
134 ecmd->advertising = ADVERTISED_TP; 134 ecmd->advertising = ADVERTISED_TP;
135 135
136 if(hw->autoneg == 1) { 136 if (hw->autoneg == 1) {
137 ecmd->advertising |= ADVERTISED_Autoneg; 137 ecmd->advertising |= ADVERTISED_Autoneg;
138 138
139 /* the e1000 autoneg seems to match ethtool nicely */ 139 /* the e1000 autoneg seems to match ethtool nicely */
@@ -144,7 +144,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
144 ecmd->port = PORT_TP; 144 ecmd->port = PORT_TP;
145 ecmd->phy_address = hw->phy_addr; 145 ecmd->phy_address = hw->phy_addr;
146 146
147 if(hw->mac_type == e1000_82543) 147 if (hw->mac_type == e1000_82543)
148 ecmd->transceiver = XCVR_EXTERNAL; 148 ecmd->transceiver = XCVR_EXTERNAL;
149 else 149 else
150 ecmd->transceiver = XCVR_INTERNAL; 150 ecmd->transceiver = XCVR_INTERNAL;
@@ -160,13 +160,13 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
160 160
161 ecmd->port = PORT_FIBRE; 161 ecmd->port = PORT_FIBRE;
162 162
163 if(hw->mac_type >= e1000_82545) 163 if (hw->mac_type >= e1000_82545)
164 ecmd->transceiver = XCVR_INTERNAL; 164 ecmd->transceiver = XCVR_INTERNAL;
165 else 165 else
166 ecmd->transceiver = XCVR_EXTERNAL; 166 ecmd->transceiver = XCVR_EXTERNAL;
167 } 167 }
168 168
169 if(netif_carrier_ok(adapter->netdev)) { 169 if (netif_carrier_ok(adapter->netdev)) {
170 170
171 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 171 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
172 &adapter->link_duplex); 172 &adapter->link_duplex);
@@ -175,7 +175,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
175 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL 175 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
176 * and HALF_DUPLEX != DUPLEX_HALF */ 176 * and HALF_DUPLEX != DUPLEX_HALF */
177 177
178 if(adapter->link_duplex == FULL_DUPLEX) 178 if (adapter->link_duplex == FULL_DUPLEX)
179 ecmd->duplex = DUPLEX_FULL; 179 ecmd->duplex = DUPLEX_FULL;
180 else 180 else
181 ecmd->duplex = DUPLEX_HALF; 181 ecmd->duplex = DUPLEX_HALF;
@@ -205,11 +205,11 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
205 205
206 if (ecmd->autoneg == AUTONEG_ENABLE) { 206 if (ecmd->autoneg == AUTONEG_ENABLE) {
207 hw->autoneg = 1; 207 hw->autoneg = 1;
208 if(hw->media_type == e1000_media_type_fiber) 208 if (hw->media_type == e1000_media_type_fiber)
209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full | 209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
210 ADVERTISED_FIBRE | 210 ADVERTISED_FIBRE |
211 ADVERTISED_Autoneg; 211 ADVERTISED_Autoneg;
212 else 212 else
213 hw->autoneg_advertised = ADVERTISED_10baseT_Half | 213 hw->autoneg_advertised = ADVERTISED_10baseT_Half |
214 ADVERTISED_10baseT_Full | 214 ADVERTISED_10baseT_Full |
215 ADVERTISED_100baseT_Half | 215 ADVERTISED_100baseT_Half |
@@ -219,12 +219,12 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
219 ADVERTISED_TP; 219 ADVERTISED_TP;
220 ecmd->advertising = hw->autoneg_advertised; 220 ecmd->advertising = hw->autoneg_advertised;
221 } else 221 } else
222 if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) 222 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
223 return -EINVAL; 223 return -EINVAL;
224 224
225 /* reset the link */ 225 /* reset the link */
226 226
227 if(netif_running(adapter->netdev)) { 227 if (netif_running(adapter->netdev)) {
228 e1000_down(adapter); 228 e1000_down(adapter);
229 e1000_reset(adapter); 229 e1000_reset(adapter);
230 e1000_up(adapter); 230 e1000_up(adapter);
@@ -241,14 +241,14 @@ e1000_get_pauseparam(struct net_device *netdev,
241 struct e1000_adapter *adapter = netdev_priv(netdev); 241 struct e1000_adapter *adapter = netdev_priv(netdev);
242 struct e1000_hw *hw = &adapter->hw; 242 struct e1000_hw *hw = &adapter->hw;
243 243
244 pause->autoneg = 244 pause->autoneg =
245 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 245 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
246 246
247 if(hw->fc == e1000_fc_rx_pause) 247 if (hw->fc == e1000_fc_rx_pause)
248 pause->rx_pause = 1; 248 pause->rx_pause = 1;
249 else if(hw->fc == e1000_fc_tx_pause) 249 else if (hw->fc == e1000_fc_tx_pause)
250 pause->tx_pause = 1; 250 pause->tx_pause = 1;
251 else if(hw->fc == e1000_fc_full) { 251 else if (hw->fc == e1000_fc_full) {
252 pause->rx_pause = 1; 252 pause->rx_pause = 1;
253 pause->tx_pause = 1; 253 pause->tx_pause = 1;
254 } 254 }
@@ -260,31 +260,30 @@ e1000_set_pauseparam(struct net_device *netdev,
260{ 260{
261 struct e1000_adapter *adapter = netdev_priv(netdev); 261 struct e1000_adapter *adapter = netdev_priv(netdev);
262 struct e1000_hw *hw = &adapter->hw; 262 struct e1000_hw *hw = &adapter->hw;
263 263
264 adapter->fc_autoneg = pause->autoneg; 264 adapter->fc_autoneg = pause->autoneg;
265 265
266 if(pause->rx_pause && pause->tx_pause) 266 if (pause->rx_pause && pause->tx_pause)
267 hw->fc = e1000_fc_full; 267 hw->fc = e1000_fc_full;
268 else if(pause->rx_pause && !pause->tx_pause) 268 else if (pause->rx_pause && !pause->tx_pause)
269 hw->fc = e1000_fc_rx_pause; 269 hw->fc = e1000_fc_rx_pause;
270 else if(!pause->rx_pause && pause->tx_pause) 270 else if (!pause->rx_pause && pause->tx_pause)
271 hw->fc = e1000_fc_tx_pause; 271 hw->fc = e1000_fc_tx_pause;
272 else if(!pause->rx_pause && !pause->tx_pause) 272 else if (!pause->rx_pause && !pause->tx_pause)
273 hw->fc = e1000_fc_none; 273 hw->fc = e1000_fc_none;
274 274
275 hw->original_fc = hw->fc; 275 hw->original_fc = hw->fc;
276 276
277 if(adapter->fc_autoneg == AUTONEG_ENABLE) { 277 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
278 if(netif_running(adapter->netdev)) { 278 if (netif_running(adapter->netdev)) {
279 e1000_down(adapter); 279 e1000_down(adapter);
280 e1000_up(adapter); 280 e1000_up(adapter);
281 } else 281 } else
282 e1000_reset(adapter); 282 e1000_reset(adapter);
283 } 283 } else
284 else
285 return ((hw->media_type == e1000_media_type_fiber) ? 284 return ((hw->media_type == e1000_media_type_fiber) ?
286 e1000_setup_link(hw) : e1000_force_mac_fc(hw)); 285 e1000_setup_link(hw) : e1000_force_mac_fc(hw));
287 286
288 return 0; 287 return 0;
289} 288}
290 289
@@ -301,14 +300,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
301 struct e1000_adapter *adapter = netdev_priv(netdev); 300 struct e1000_adapter *adapter = netdev_priv(netdev);
302 adapter->rx_csum = data; 301 adapter->rx_csum = data;
303 302
304 if(netif_running(netdev)) { 303 if (netif_running(netdev)) {
305 e1000_down(adapter); 304 e1000_down(adapter);
306 e1000_up(adapter); 305 e1000_up(adapter);
307 } else 306 } else
308 e1000_reset(adapter); 307 e1000_reset(adapter);
309 return 0; 308 return 0;
310} 309}
311 310
312static uint32_t 311static uint32_t
313e1000_get_tx_csum(struct net_device *netdev) 312e1000_get_tx_csum(struct net_device *netdev)
314{ 313{
@@ -320,7 +319,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
320{ 319{
321 struct e1000_adapter *adapter = netdev_priv(netdev); 320 struct e1000_adapter *adapter = netdev_priv(netdev);
322 321
323 if(adapter->hw.mac_type < e1000_82543) { 322 if (adapter->hw.mac_type < e1000_82543) {
324 if (!data) 323 if (!data)
325 return -EINVAL; 324 return -EINVAL;
326 return 0; 325 return 0;
@@ -339,8 +338,8 @@ static int
339e1000_set_tso(struct net_device *netdev, uint32_t data) 338e1000_set_tso(struct net_device *netdev, uint32_t data)
340{ 339{
341 struct e1000_adapter *adapter = netdev_priv(netdev); 340 struct e1000_adapter *adapter = netdev_priv(netdev);
342 if((adapter->hw.mac_type < e1000_82544) || 341 if ((adapter->hw.mac_type < e1000_82544) ||
343 (adapter->hw.mac_type == e1000_82547)) 342 (adapter->hw.mac_type == e1000_82547))
344 return data ? -EINVAL : 0; 343 return data ? -EINVAL : 0;
345 344
346 if (data) 345 if (data)
@@ -348,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
348 else 347 else
349 netdev->features &= ~NETIF_F_TSO; 348 netdev->features &= ~NETIF_F_TSO;
350 return 0; 349 return 0;
351} 350}
352#endif /* NETIF_F_TSO */ 351#endif /* NETIF_F_TSO */
353 352
354static uint32_t 353static uint32_t
@@ -365,7 +364,7 @@ e1000_set_msglevel(struct net_device *netdev, uint32_t data)
365 adapter->msg_enable = data; 364 adapter->msg_enable = data;
366} 365}
367 366
368static int 367static int
369e1000_get_regs_len(struct net_device *netdev) 368e1000_get_regs_len(struct net_device *netdev)
370{ 369{
371#define E1000_REGS_LEN 32 370#define E1000_REGS_LEN 32
@@ -401,7 +400,7 @@ e1000_get_regs(struct net_device *netdev,
401 regs_buff[11] = E1000_READ_REG(hw, TIDV); 400 regs_buff[11] = E1000_READ_REG(hw, TIDV);
402 401
403 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */ 402 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
404 if(hw->phy_type == e1000_phy_igp) { 403 if (hw->phy_type == e1000_phy_igp) {
405 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 404 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
406 IGP01E1000_PHY_AGC_A); 405 IGP01E1000_PHY_AGC_A);
407 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & 406 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
@@ -455,7 +454,7 @@ e1000_get_regs(struct net_device *netdev,
455 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 454 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
456 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ 455 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
457 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 456 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
458 if(hw->mac_type >= e1000_82540 && 457 if (hw->mac_type >= e1000_82540 &&
459 hw->media_type == e1000_media_type_copper) { 458 hw->media_type == e1000_media_type_copper) {
460 regs_buff[26] = E1000_READ_REG(hw, MANC); 459 regs_buff[26] = E1000_READ_REG(hw, MANC);
461 } 460 }
@@ -479,7 +478,7 @@ e1000_get_eeprom(struct net_device *netdev,
479 int ret_val = 0; 478 int ret_val = 0;
480 uint16_t i; 479 uint16_t i;
481 480
482 if(eeprom->len == 0) 481 if (eeprom->len == 0)
483 return -EINVAL; 482 return -EINVAL;
484 483
485 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 484 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -489,16 +488,16 @@ e1000_get_eeprom(struct net_device *netdev,
489 488
490 eeprom_buff = kmalloc(sizeof(uint16_t) * 489 eeprom_buff = kmalloc(sizeof(uint16_t) *
491 (last_word - first_word + 1), GFP_KERNEL); 490 (last_word - first_word + 1), GFP_KERNEL);
492 if(!eeprom_buff) 491 if (!eeprom_buff)
493 return -ENOMEM; 492 return -ENOMEM;
494 493
495 if(hw->eeprom.type == e1000_eeprom_spi) 494 if (hw->eeprom.type == e1000_eeprom_spi)
496 ret_val = e1000_read_eeprom(hw, first_word, 495 ret_val = e1000_read_eeprom(hw, first_word,
497 last_word - first_word + 1, 496 last_word - first_word + 1,
498 eeprom_buff); 497 eeprom_buff);
499 else { 498 else {
500 for (i = 0; i < last_word - first_word + 1; i++) 499 for (i = 0; i < last_word - first_word + 1; i++)
501 if((ret_val = e1000_read_eeprom(hw, first_word + i, 1, 500 if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
502 &eeprom_buff[i]))) 501 &eeprom_buff[i])))
503 break; 502 break;
504 } 503 }
@@ -525,10 +524,10 @@ e1000_set_eeprom(struct net_device *netdev,
525 int max_len, first_word, last_word, ret_val = 0; 524 int max_len, first_word, last_word, ret_val = 0;
526 uint16_t i; 525 uint16_t i;
527 526
528 if(eeprom->len == 0) 527 if (eeprom->len == 0)
529 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
530 529
531 if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 530 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
532 return -EFAULT; 531 return -EFAULT;
533 532
534 max_len = hw->eeprom.word_size * 2; 533 max_len = hw->eeprom.word_size * 2;
@@ -536,19 +535,19 @@ e1000_set_eeprom(struct net_device *netdev,
536 first_word = eeprom->offset >> 1; 535 first_word = eeprom->offset >> 1;
537 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 536 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
538 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 537 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
539 if(!eeprom_buff) 538 if (!eeprom_buff)
540 return -ENOMEM; 539 return -ENOMEM;
541 540
542 ptr = (void *)eeprom_buff; 541 ptr = (void *)eeprom_buff;
543 542
544 if(eeprom->offset & 1) { 543 if (eeprom->offset & 1) {
545 /* need read/modify/write of first changed EEPROM word */ 544 /* need read/modify/write of first changed EEPROM word */
546 /* only the second byte of the word is being modified */ 545 /* only the second byte of the word is being modified */
547 ret_val = e1000_read_eeprom(hw, first_word, 1, 546 ret_val = e1000_read_eeprom(hw, first_word, 1,
548 &eeprom_buff[0]); 547 &eeprom_buff[0]);
549 ptr++; 548 ptr++;
550 } 549 }
551 if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 550 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
552 /* need read/modify/write of last changed EEPROM word */ 551 /* need read/modify/write of last changed EEPROM word */
553 /* only the first byte of the word is being modified */ 552 /* only the first byte of the word is being modified */
554 ret_val = e1000_read_eeprom(hw, last_word, 1, 553 ret_val = e1000_read_eeprom(hw, last_word, 1,
@@ -567,9 +566,9 @@ e1000_set_eeprom(struct net_device *netdev,
567 ret_val = e1000_write_eeprom(hw, first_word, 566 ret_val = e1000_write_eeprom(hw, first_word,
568 last_word - first_word + 1, eeprom_buff); 567 last_word - first_word + 1, eeprom_buff);
569 568
570 /* Update the checksum over the first part of the EEPROM if needed 569 /* Update the checksum over the first part of the EEPROM if needed
571 * and flush shadow RAM for 82573 conrollers */ 570 * and flush shadow RAM for 82573 conrollers */
572 if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || 571 if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
573 (hw->mac_type == e1000_82573))) 572 (hw->mac_type == e1000_82573)))
574 e1000_update_eeprom_checksum(hw); 573 e1000_update_eeprom_checksum(hw);
575 574
@@ -633,7 +632,7 @@ e1000_get_ringparam(struct net_device *netdev,
633 ring->rx_jumbo_pending = 0; 632 ring->rx_jumbo_pending = 0;
634} 633}
635 634
636static int 635static int
637e1000_set_ringparam(struct net_device *netdev, 636e1000_set_ringparam(struct net_device *netdev,
638 struct ethtool_ringparam *ring) 637 struct ethtool_ringparam *ring)
639{ 638{
@@ -670,25 +669,25 @@ e1000_set_ringparam(struct net_device *netdev,
670 txdr = adapter->tx_ring; 669 txdr = adapter->tx_ring;
671 rxdr = adapter->rx_ring; 670 rxdr = adapter->rx_ring;
672 671
673 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 672 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
674 return -EINVAL; 673 return -EINVAL;
675 674
676 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 675 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
677 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 676 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
678 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 677 E1000_MAX_RXD : E1000_MAX_82544_RXD));
679 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 678 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
680 679
681 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); 680 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
682 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? 681 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
683 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 682 E1000_MAX_TXD : E1000_MAX_82544_TXD));
684 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 683 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
685 684
686 for (i = 0; i < adapter->num_tx_queues; i++) 685 for (i = 0; i < adapter->num_tx_queues; i++)
687 txdr[i].count = txdr->count; 686 txdr[i].count = txdr->count;
688 for (i = 0; i < adapter->num_rx_queues; i++) 687 for (i = 0; i < adapter->num_rx_queues; i++)
689 rxdr[i].count = rxdr->count; 688 rxdr[i].count = rxdr->count;
690 689
691 if(netif_running(adapter->netdev)) { 690 if (netif_running(adapter->netdev)) {
692 /* Try to get new resources before deleting old */ 691 /* Try to get new resources before deleting old */
693 if ((err = e1000_setup_all_rx_resources(adapter))) 692 if ((err = e1000_setup_all_rx_resources(adapter)))
694 goto err_setup_rx; 693 goto err_setup_rx;
@@ -708,7 +707,7 @@ e1000_set_ringparam(struct net_device *netdev,
708 kfree(rx_old); 707 kfree(rx_old);
709 adapter->rx_ring = rx_new; 708 adapter->rx_ring = rx_new;
710 adapter->tx_ring = tx_new; 709 adapter->tx_ring = tx_new;
711 if((err = e1000_up(adapter))) 710 if ((err = e1000_up(adapter)))
712 return err; 711 return err;
713 } 712 }
714 713
@@ -727,10 +726,10 @@ err_setup_rx:
727 uint32_t pat, value; \ 726 uint32_t pat, value; \
728 uint32_t test[] = \ 727 uint32_t test[] = \
729 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 728 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
730 for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \ 729 for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
731 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \ 730 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
732 value = E1000_READ_REG(&adapter->hw, R); \ 731 value = E1000_READ_REG(&adapter->hw, R); \
733 if(value != (test[pat] & W & M)) { \ 732 if (value != (test[pat] & W & M)) { \
734 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \ 733 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
735 "0x%08X expected 0x%08X\n", \ 734 "0x%08X expected 0x%08X\n", \
736 E1000_##R, value, (test[pat] & W & M)); \ 735 E1000_##R, value, (test[pat] & W & M)); \
@@ -746,7 +745,7 @@ err_setup_rx:
746 uint32_t value; \ 745 uint32_t value; \
747 E1000_WRITE_REG(&adapter->hw, R, W & M); \ 746 E1000_WRITE_REG(&adapter->hw, R, W & M); \
748 value = E1000_READ_REG(&adapter->hw, R); \ 747 value = E1000_READ_REG(&adapter->hw, R); \
749 if((W & M) != (value & M)) { \ 748 if ((W & M) != (value & M)) { \
750 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ 749 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
751 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \ 750 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
752 *data = (adapter->hw.mac_type < e1000_82543) ? \ 751 *data = (adapter->hw.mac_type < e1000_82543) ? \
@@ -782,7 +781,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
782 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); 781 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
783 E1000_WRITE_REG(&adapter->hw, STATUS, toggle); 782 E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
784 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; 783 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
785 if(value != after) { 784 if (value != after) {
786 DPRINTK(DRV, ERR, "failed STATUS register test got: " 785 DPRINTK(DRV, ERR, "failed STATUS register test got: "
787 "0x%08X expected: 0x%08X\n", after, value); 786 "0x%08X expected: 0x%08X\n", after, value);
788 *data = 1; 787 *data = 1;
@@ -810,7 +809,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
810 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); 809 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
811 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); 810 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
812 811
813 if(adapter->hw.mac_type >= e1000_82543) { 812 if (adapter->hw.mac_type >= e1000_82543) {
814 813
815 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); 814 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
816 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 815 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
@@ -818,7 +817,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
818 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 817 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
819 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); 818 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
820 819
821 for(i = 0; i < E1000_RAR_ENTRIES; i++) { 820 for (i = 0; i < E1000_RAR_ENTRIES; i++) {
822 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, 821 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
823 0xFFFFFFFF); 822 0xFFFFFFFF);
824 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 823 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
@@ -834,7 +833,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
834 833
835 } 834 }
836 835
837 for(i = 0; i < E1000_MC_TBL_SIZE; i++) 836 for (i = 0; i < E1000_MC_TBL_SIZE; i++)
838 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); 837 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
839 838
840 *data = 0; 839 *data = 0;
@@ -850,8 +849,8 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
850 849
851 *data = 0; 850 *data = 0;
852 /* Read and add up the contents of the EEPROM */ 851 /* Read and add up the contents of the EEPROM */
853 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 852 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
854 if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) { 853 if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
855 *data = 1; 854 *data = 1;
856 break; 855 break;
857 } 856 }
@@ -859,7 +858,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
859 } 858 }
860 859
861 /* If Checksum is not Correct return error else test passed */ 860 /* If Checksum is not Correct return error else test passed */
862 if((checksum != (uint16_t) EEPROM_SUM) && !(*data)) 861 if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
863 *data = 2; 862 *data = 2;
864 863
865 return *data; 864 return *data;
@@ -888,9 +887,9 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
888 *data = 0; 887 *data = 0;
889 888
890 /* Hook up test interrupt handler just for this test */ 889 /* Hook up test interrupt handler just for this test */
891 if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 890 if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
892 shared_int = FALSE; 891 shared_int = FALSE;
893 } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, 892 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
894 netdev->name, netdev)){ 893 netdev->name, netdev)){
895 *data = 1; 894 *data = 1;
896 return -1; 895 return -1;
@@ -901,12 +900,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
901 msec_delay(10); 900 msec_delay(10);
902 901
903 /* Test each interrupt */ 902 /* Test each interrupt */
904 for(; i < 10; i++) { 903 for (; i < 10; i++) {
905 904
906 /* Interrupt to test */ 905 /* Interrupt to test */
907 mask = 1 << i; 906 mask = 1 << i;
908 907
909 if(!shared_int) { 908 if (!shared_int) {
910 /* Disable the interrupt to be reported in 909 /* Disable the interrupt to be reported in
911 * the cause register and then force the same 910 * the cause register and then force the same
912 * interrupt and see if one gets posted. If 911 * interrupt and see if one gets posted. If
@@ -917,8 +916,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
917 E1000_WRITE_REG(&adapter->hw, IMC, mask); 916 E1000_WRITE_REG(&adapter->hw, IMC, mask);
918 E1000_WRITE_REG(&adapter->hw, ICS, mask); 917 E1000_WRITE_REG(&adapter->hw, ICS, mask);
919 msec_delay(10); 918 msec_delay(10);
920 919
921 if(adapter->test_icr & mask) { 920 if (adapter->test_icr & mask) {
922 *data = 3; 921 *data = 3;
923 break; 922 break;
924 } 923 }
@@ -935,12 +934,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
935 E1000_WRITE_REG(&adapter->hw, ICS, mask); 934 E1000_WRITE_REG(&adapter->hw, ICS, mask);
936 msec_delay(10); 935 msec_delay(10);
937 936
938 if(!(adapter->test_icr & mask)) { 937 if (!(adapter->test_icr & mask)) {
939 *data = 4; 938 *data = 4;
940 break; 939 break;
941 } 940 }
942 941
943 if(!shared_int) { 942 if (!shared_int) {
944 /* Disable the other interrupts to be reported in 943 /* Disable the other interrupts to be reported in
945 * the cause register and then force the other 944 * the cause register and then force the other
946 * interrupts and see if any get posted. If 945 * interrupts and see if any get posted. If
@@ -952,7 +951,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
952 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); 951 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
953 msec_delay(10); 952 msec_delay(10);
954 953
955 if(adapter->test_icr) { 954 if (adapter->test_icr) {
956 *data = 5; 955 *data = 5;
957 break; 956 break;
958 } 957 }
@@ -977,24 +976,24 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
977 struct pci_dev *pdev = adapter->pdev; 976 struct pci_dev *pdev = adapter->pdev;
978 int i; 977 int i;
979 978
980 if(txdr->desc && txdr->buffer_info) { 979 if (txdr->desc && txdr->buffer_info) {
981 for(i = 0; i < txdr->count; i++) { 980 for (i = 0; i < txdr->count; i++) {
982 if(txdr->buffer_info[i].dma) 981 if (txdr->buffer_info[i].dma)
983 pci_unmap_single(pdev, txdr->buffer_info[i].dma, 982 pci_unmap_single(pdev, txdr->buffer_info[i].dma,
984 txdr->buffer_info[i].length, 983 txdr->buffer_info[i].length,
985 PCI_DMA_TODEVICE); 984 PCI_DMA_TODEVICE);
986 if(txdr->buffer_info[i].skb) 985 if (txdr->buffer_info[i].skb)
987 dev_kfree_skb(txdr->buffer_info[i].skb); 986 dev_kfree_skb(txdr->buffer_info[i].skb);
988 } 987 }
989 } 988 }
990 989
991 if(rxdr->desc && rxdr->buffer_info) { 990 if (rxdr->desc && rxdr->buffer_info) {
992 for(i = 0; i < rxdr->count; i++) { 991 for (i = 0; i < rxdr->count; i++) {
993 if(rxdr->buffer_info[i].dma) 992 if (rxdr->buffer_info[i].dma)
994 pci_unmap_single(pdev, rxdr->buffer_info[i].dma, 993 pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
995 rxdr->buffer_info[i].length, 994 rxdr->buffer_info[i].length,
996 PCI_DMA_FROMDEVICE); 995 PCI_DMA_FROMDEVICE);
997 if(rxdr->buffer_info[i].skb) 996 if (rxdr->buffer_info[i].skb)
998 dev_kfree_skb(rxdr->buffer_info[i].skb); 997 dev_kfree_skb(rxdr->buffer_info[i].skb);
999 } 998 }
1000 } 999 }
@@ -1027,11 +1026,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1027 1026
1028 /* Setup Tx descriptor ring and Tx buffers */ 1027 /* Setup Tx descriptor ring and Tx buffers */
1029 1028
1030 if(!txdr->count) 1029 if (!txdr->count)
1031 txdr->count = E1000_DEFAULT_TXD; 1030 txdr->count = E1000_DEFAULT_TXD;
1032 1031
1033 size = txdr->count * sizeof(struct e1000_buffer); 1032 size = txdr->count * sizeof(struct e1000_buffer);
1034 if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1033 if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1035 ret_val = 1; 1034 ret_val = 1;
1036 goto err_nomem; 1035 goto err_nomem;
1037 } 1036 }
@@ -1039,7 +1038,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1039 1038
1040 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1039 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1041 E1000_ROUNDUP(txdr->size, 4096); 1040 E1000_ROUNDUP(txdr->size, 4096);
1042 if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) { 1041 if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
1043 ret_val = 2; 1042 ret_val = 2;
1044 goto err_nomem; 1043 goto err_nomem;
1045 } 1044 }
@@ -1058,12 +1057,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1058 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1057 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1059 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1058 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1060 1059
1061 for(i = 0; i < txdr->count; i++) { 1060 for (i = 0; i < txdr->count; i++) {
1062 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); 1061 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
1063 struct sk_buff *skb; 1062 struct sk_buff *skb;
1064 unsigned int size = 1024; 1063 unsigned int size = 1024;
1065 1064
1066 if(!(skb = alloc_skb(size, GFP_KERNEL))) { 1065 if (!(skb = alloc_skb(size, GFP_KERNEL))) {
1067 ret_val = 3; 1066 ret_val = 3;
1068 goto err_nomem; 1067 goto err_nomem;
1069 } 1068 }
@@ -1083,18 +1082,18 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1083 1082
1084 /* Setup Rx descriptor ring and Rx buffers */ 1083 /* Setup Rx descriptor ring and Rx buffers */
1085 1084
1086 if(!rxdr->count) 1085 if (!rxdr->count)
1087 rxdr->count = E1000_DEFAULT_RXD; 1086 rxdr->count = E1000_DEFAULT_RXD;
1088 1087
1089 size = rxdr->count * sizeof(struct e1000_buffer); 1088 size = rxdr->count * sizeof(struct e1000_buffer);
1090 if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1089 if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1091 ret_val = 4; 1090 ret_val = 4;
1092 goto err_nomem; 1091 goto err_nomem;
1093 } 1092 }
1094 memset(rxdr->buffer_info, 0, size); 1093 memset(rxdr->buffer_info, 0, size);
1095 1094
1096 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1095 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1097 if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) { 1096 if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
1098 ret_val = 5; 1097 ret_val = 5;
1099 goto err_nomem; 1098 goto err_nomem;
1100 } 1099 }
@@ -1114,11 +1113,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1114 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1113 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1115 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1114 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1116 1115
1117 for(i = 0; i < rxdr->count; i++) { 1116 for (i = 0; i < rxdr->count; i++) {
1118 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); 1117 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
1119 struct sk_buff *skb; 1118 struct sk_buff *skb;
1120 1119
1121 if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, 1120 if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
1122 GFP_KERNEL))) { 1121 GFP_KERNEL))) {
1123 ret_val = 6; 1122 ret_val = 6;
1124 goto err_nomem; 1123 goto err_nomem;
@@ -1227,15 +1226,15 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1227 1226
1228 /* Check Phy Configuration */ 1227 /* Check Phy Configuration */
1229 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); 1228 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
1230 if(phy_reg != 0x4100) 1229 if (phy_reg != 0x4100)
1231 return 9; 1230 return 9;
1232 1231
1233 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); 1232 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1234 if(phy_reg != 0x0070) 1233 if (phy_reg != 0x0070)
1235 return 10; 1234 return 10;
1236 1235
1237 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); 1236 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
1238 if(phy_reg != 0x001A) 1237 if (phy_reg != 0x001A)
1239 return 11; 1238 return 11;
1240 1239
1241 return 0; 1240 return 0;
@@ -1249,7 +1248,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1249 1248
1250 adapter->hw.autoneg = FALSE; 1249 adapter->hw.autoneg = FALSE;
1251 1250
1252 if(adapter->hw.phy_type == e1000_phy_m88) { 1251 if (adapter->hw.phy_type == e1000_phy_m88) {
1253 /* Auto-MDI/MDIX Off */ 1252 /* Auto-MDI/MDIX Off */
1254 e1000_write_phy_reg(&adapter->hw, 1253 e1000_write_phy_reg(&adapter->hw,
1255 M88E1000_PHY_SPEC_CTRL, 0x0808); 1254 M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1269,14 +1268,14 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1269 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1268 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1270 E1000_CTRL_FD); /* Force Duplex to FULL */ 1269 E1000_CTRL_FD); /* Force Duplex to FULL */
1271 1270
1272 if(adapter->hw.media_type == e1000_media_type_copper && 1271 if (adapter->hw.media_type == e1000_media_type_copper &&
1273 adapter->hw.phy_type == e1000_phy_m88) { 1272 adapter->hw.phy_type == e1000_phy_m88) {
1274 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1273 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1275 } else { 1274 } else {
1276 /* Set the ILOS bit on the fiber Nic is half 1275 /* Set the ILOS bit on the fiber Nic is half
1277 * duplex link is detected. */ 1276 * duplex link is detected. */
1278 stat_reg = E1000_READ_REG(&adapter->hw, STATUS); 1277 stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
1279 if((stat_reg & E1000_STATUS_FD) == 0) 1278 if ((stat_reg & E1000_STATUS_FD) == 0)
1280 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1279 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1281 } 1280 }
1282 1281
@@ -1285,7 +1284,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1285 /* Disable the receiver on the PHY so when a cable is plugged in, the 1284 /* Disable the receiver on the PHY so when a cable is plugged in, the
1286 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1285 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1287 */ 1286 */
1288 if(adapter->hw.phy_type == e1000_phy_m88) 1287 if (adapter->hw.phy_type == e1000_phy_m88)
1289 e1000_phy_disable_receiver(adapter); 1288 e1000_phy_disable_receiver(adapter);
1290 1289
1291 udelay(500); 1290 udelay(500);
@@ -1301,14 +1300,14 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1301 1300
1302 switch (adapter->hw.mac_type) { 1301 switch (adapter->hw.mac_type) {
1303 case e1000_82543: 1302 case e1000_82543:
1304 if(adapter->hw.media_type == e1000_media_type_copper) { 1303 if (adapter->hw.media_type == e1000_media_type_copper) {
1305 /* Attempt to setup Loopback mode on Non-integrated PHY. 1304 /* Attempt to setup Loopback mode on Non-integrated PHY.
1306 * Some PHY registers get corrupted at random, so 1305 * Some PHY registers get corrupted at random, so
1307 * attempt this 10 times. 1306 * attempt this 10 times.
1308 */ 1307 */
1309 while(e1000_nonintegrated_phy_loopback(adapter) && 1308 while (e1000_nonintegrated_phy_loopback(adapter) &&
1310 count++ < 10); 1309 count++ < 10);
1311 if(count < 11) 1310 if (count < 11)
1312 return 0; 1311 return 0;
1313 } 1312 }
1314 break; 1313 break;
@@ -1430,8 +1429,8 @@ static int
1430e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1429e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1431{ 1430{
1432 frame_size &= ~1; 1431 frame_size &= ~1;
1433 if(*(skb->data + 3) == 0xFF) { 1432 if (*(skb->data + 3) == 0xFF) {
1434 if((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1433 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1435 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1434 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1436 return 0; 1435 return 0;
1437 } 1436 }
@@ -1450,53 +1449,53 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1450 1449
1451 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); 1450 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
1452 1451
1453 /* Calculate the loop count based on the largest descriptor ring 1452 /* Calculate the loop count based on the largest descriptor ring
1454 * The idea is to wrap the largest ring a number of times using 64 1453 * The idea is to wrap the largest ring a number of times using 64
1455 * send/receive pairs during each loop 1454 * send/receive pairs during each loop
1456 */ 1455 */
1457 1456
1458 if(rxdr->count <= txdr->count) 1457 if (rxdr->count <= txdr->count)
1459 lc = ((txdr->count / 64) * 2) + 1; 1458 lc = ((txdr->count / 64) * 2) + 1;
1460 else 1459 else
1461 lc = ((rxdr->count / 64) * 2) + 1; 1460 lc = ((rxdr->count / 64) * 2) + 1;
1462 1461
1463 k = l = 0; 1462 k = l = 0;
1464 for(j = 0; j <= lc; j++) { /* loop count loop */ 1463 for (j = 0; j <= lc; j++) { /* loop count loop */
1465 for(i = 0; i < 64; i++) { /* send the packets */ 1464 for (i = 0; i < 64; i++) { /* send the packets */
1466 e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1465 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1467 1024); 1466 1024);
1468 pci_dma_sync_single_for_device(pdev, 1467 pci_dma_sync_single_for_device(pdev,
1469 txdr->buffer_info[k].dma, 1468 txdr->buffer_info[k].dma,
1470 txdr->buffer_info[k].length, 1469 txdr->buffer_info[k].length,
1471 PCI_DMA_TODEVICE); 1470 PCI_DMA_TODEVICE);
1472 if(unlikely(++k == txdr->count)) k = 0; 1471 if (unlikely(++k == txdr->count)) k = 0;
1473 } 1472 }
1474 E1000_WRITE_REG(&adapter->hw, TDT, k); 1473 E1000_WRITE_REG(&adapter->hw, TDT, k);
1475 msec_delay(200); 1474 msec_delay(200);
1476 time = jiffies; /* set the start time for the receive */ 1475 time = jiffies; /* set the start time for the receive */
1477 good_cnt = 0; 1476 good_cnt = 0;
1478 do { /* receive the sent packets */ 1477 do { /* receive the sent packets */
1479 pci_dma_sync_single_for_cpu(pdev, 1478 pci_dma_sync_single_for_cpu(pdev,
1480 rxdr->buffer_info[l].dma, 1479 rxdr->buffer_info[l].dma,
1481 rxdr->buffer_info[l].length, 1480 rxdr->buffer_info[l].length,
1482 PCI_DMA_FROMDEVICE); 1481 PCI_DMA_FROMDEVICE);
1483 1482
1484 ret_val = e1000_check_lbtest_frame( 1483 ret_val = e1000_check_lbtest_frame(
1485 rxdr->buffer_info[l].skb, 1484 rxdr->buffer_info[l].skb,
1486 1024); 1485 1024);
1487 if(!ret_val) 1486 if (!ret_val)
1488 good_cnt++; 1487 good_cnt++;
1489 if(unlikely(++l == rxdr->count)) l = 0; 1488 if (unlikely(++l == rxdr->count)) l = 0;
1490 /* time + 20 msecs (200 msecs on 2.4) is more than 1489 /* time + 20 msecs (200 msecs on 2.4) is more than
1491 * enough time to complete the receives, if it's 1490 * enough time to complete the receives, if it's
1492 * exceeded, break and error off 1491 * exceeded, break and error off
1493 */ 1492 */
1494 } while (good_cnt < 64 && jiffies < (time + 20)); 1493 } while (good_cnt < 64 && jiffies < (time + 20));
1495 if(good_cnt != 64) { 1494 if (good_cnt != 64) {
1496 ret_val = 13; /* ret_val is the same as mis-compare */ 1495 ret_val = 13; /* ret_val is the same as mis-compare */
1497 break; 1496 break;
1498 } 1497 }
1499 if(jiffies >= (time + 2)) { 1498 if (jiffies >= (time + 2)) {
1500 ret_val = 14; /* error code for time out error */ 1499 ret_val = 14; /* error code for time out error */
1501 break; 1500 break;
1502 } 1501 }
@@ -1549,17 +1548,17 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
1549 *data = 1; 1548 *data = 1;
1550 } else { 1549 } else {
1551 e1000_check_for_link(&adapter->hw); 1550 e1000_check_for_link(&adapter->hw);
1552 if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ 1551 if (adapter->hw.autoneg) /* if auto_neg is set wait for it */
1553 msec_delay(4000); 1552 msec_delay(4000);
1554 1553
1555 if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 1554 if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
1556 *data = 1; 1555 *data = 1;
1557 } 1556 }
1558 } 1557 }
1559 return *data; 1558 return *data;
1560} 1559}
1561 1560
1562static int 1561static int
1563e1000_diag_test_count(struct net_device *netdev) 1562e1000_diag_test_count(struct net_device *netdev)
1564{ 1563{
1565 return E1000_TEST_LEN; 1564 return E1000_TEST_LEN;
@@ -1572,7 +1571,7 @@ e1000_diag_test(struct net_device *netdev,
1572 struct e1000_adapter *adapter = netdev_priv(netdev); 1571 struct e1000_adapter *adapter = netdev_priv(netdev);
1573 boolean_t if_running = netif_running(netdev); 1572 boolean_t if_running = netif_running(netdev);
1574 1573
1575 if(eth_test->flags == ETH_TEST_FL_OFFLINE) { 1574 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1576 /* Offline tests */ 1575 /* Offline tests */
1577 1576
1578 /* save speed, duplex, autoneg settings */ 1577 /* save speed, duplex, autoneg settings */
@@ -1582,27 +1581,27 @@ e1000_diag_test(struct net_device *netdev,
1582 1581
1583 /* Link test performed before hardware reset so autoneg doesn't 1582 /* Link test performed before hardware reset so autoneg doesn't
1584 * interfere with test result */ 1583 * interfere with test result */
1585 if(e1000_link_test(adapter, &data[4])) 1584 if (e1000_link_test(adapter, &data[4]))
1586 eth_test->flags |= ETH_TEST_FL_FAILED; 1585 eth_test->flags |= ETH_TEST_FL_FAILED;
1587 1586
1588 if(if_running) 1587 if (if_running)
1589 e1000_down(adapter); 1588 e1000_down(adapter);
1590 else 1589 else
1591 e1000_reset(adapter); 1590 e1000_reset(adapter);
1592 1591
1593 if(e1000_reg_test(adapter, &data[0])) 1592 if (e1000_reg_test(adapter, &data[0]))
1594 eth_test->flags |= ETH_TEST_FL_FAILED; 1593 eth_test->flags |= ETH_TEST_FL_FAILED;
1595 1594
1596 e1000_reset(adapter); 1595 e1000_reset(adapter);
1597 if(e1000_eeprom_test(adapter, &data[1])) 1596 if (e1000_eeprom_test(adapter, &data[1]))
1598 eth_test->flags |= ETH_TEST_FL_FAILED; 1597 eth_test->flags |= ETH_TEST_FL_FAILED;
1599 1598
1600 e1000_reset(adapter); 1599 e1000_reset(adapter);
1601 if(e1000_intr_test(adapter, &data[2])) 1600 if (e1000_intr_test(adapter, &data[2]))
1602 eth_test->flags |= ETH_TEST_FL_FAILED; 1601 eth_test->flags |= ETH_TEST_FL_FAILED;
1603 1602
1604 e1000_reset(adapter); 1603 e1000_reset(adapter);
1605 if(e1000_loopback_test(adapter, &data[3])) 1604 if (e1000_loopback_test(adapter, &data[3]))
1606 eth_test->flags |= ETH_TEST_FL_FAILED; 1605 eth_test->flags |= ETH_TEST_FL_FAILED;
1607 1606
1608 /* restore speed, duplex, autoneg settings */ 1607 /* restore speed, duplex, autoneg settings */
@@ -1611,11 +1610,11 @@ e1000_diag_test(struct net_device *netdev,
1611 adapter->hw.autoneg = autoneg; 1610 adapter->hw.autoneg = autoneg;
1612 1611
1613 e1000_reset(adapter); 1612 e1000_reset(adapter);
1614 if(if_running) 1613 if (if_running)
1615 e1000_up(adapter); 1614 e1000_up(adapter);
1616 } else { 1615 } else {
1617 /* Online tests */ 1616 /* Online tests */
1618 if(e1000_link_test(adapter, &data[4])) 1617 if (e1000_link_test(adapter, &data[4]))
1619 eth_test->flags |= ETH_TEST_FL_FAILED; 1618 eth_test->flags |= ETH_TEST_FL_FAILED;
1620 1619
1621 /* Offline tests aren't run; pass by default */ 1620 /* Offline tests aren't run; pass by default */
@@ -1633,7 +1632,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1633 struct e1000_adapter *adapter = netdev_priv(netdev); 1632 struct e1000_adapter *adapter = netdev_priv(netdev);
1634 struct e1000_hw *hw = &adapter->hw; 1633 struct e1000_hw *hw = &adapter->hw;
1635 1634
1636 switch(adapter->hw.device_id) { 1635 switch (adapter->hw.device_id) {
1637 case E1000_DEV_ID_82542: 1636 case E1000_DEV_ID_82542:
1638 case E1000_DEV_ID_82543GC_FIBER: 1637 case E1000_DEV_ID_82543GC_FIBER:
1639 case E1000_DEV_ID_82543GC_COPPER: 1638 case E1000_DEV_ID_82543GC_COPPER:
@@ -1649,7 +1648,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1649 case E1000_DEV_ID_82546GB_FIBER: 1648 case E1000_DEV_ID_82546GB_FIBER:
1650 case E1000_DEV_ID_82571EB_FIBER: 1649 case E1000_DEV_ID_82571EB_FIBER:
1651 /* Wake events only supported on port A for dual fiber */ 1650 /* Wake events only supported on port A for dual fiber */
1652 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { 1651 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
1653 wol->supported = 0; 1652 wol->supported = 0;
1654 wol->wolopts = 0; 1653 wol->wolopts = 0;
1655 return; 1654 return;
@@ -1661,13 +1660,13 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1661 WAKE_BCAST | WAKE_MAGIC; 1660 WAKE_BCAST | WAKE_MAGIC;
1662 1661
1663 wol->wolopts = 0; 1662 wol->wolopts = 0;
1664 if(adapter->wol & E1000_WUFC_EX) 1663 if (adapter->wol & E1000_WUFC_EX)
1665 wol->wolopts |= WAKE_UCAST; 1664 wol->wolopts |= WAKE_UCAST;
1666 if(adapter->wol & E1000_WUFC_MC) 1665 if (adapter->wol & E1000_WUFC_MC)
1667 wol->wolopts |= WAKE_MCAST; 1666 wol->wolopts |= WAKE_MCAST;
1668 if(adapter->wol & E1000_WUFC_BC) 1667 if (adapter->wol & E1000_WUFC_BC)
1669 wol->wolopts |= WAKE_BCAST; 1668 wol->wolopts |= WAKE_BCAST;
1670 if(adapter->wol & E1000_WUFC_MAG) 1669 if (adapter->wol & E1000_WUFC_MAG)
1671 wol->wolopts |= WAKE_MAGIC; 1670 wol->wolopts |= WAKE_MAGIC;
1672 return; 1671 return;
1673 } 1672 }
@@ -1679,7 +1678,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1679 struct e1000_adapter *adapter = netdev_priv(netdev); 1678 struct e1000_adapter *adapter = netdev_priv(netdev);
1680 struct e1000_hw *hw = &adapter->hw; 1679 struct e1000_hw *hw = &adapter->hw;
1681 1680
1682 switch(adapter->hw.device_id) { 1681 switch (adapter->hw.device_id) {
1683 case E1000_DEV_ID_82542: 1682 case E1000_DEV_ID_82542:
1684 case E1000_DEV_ID_82543GC_FIBER: 1683 case E1000_DEV_ID_82543GC_FIBER:
1685 case E1000_DEV_ID_82543GC_COPPER: 1684 case E1000_DEV_ID_82543GC_COPPER:
@@ -1693,23 +1692,23 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1693 case E1000_DEV_ID_82546GB_FIBER: 1692 case E1000_DEV_ID_82546GB_FIBER:
1694 case E1000_DEV_ID_82571EB_FIBER: 1693 case E1000_DEV_ID_82571EB_FIBER:
1695 /* Wake events only supported on port A for dual fiber */ 1694 /* Wake events only supported on port A for dual fiber */
1696 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) 1695 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
1697 return wol->wolopts ? -EOPNOTSUPP : 0; 1696 return wol->wolopts ? -EOPNOTSUPP : 0;
1698 /* Fall Through */ 1697 /* Fall Through */
1699 1698
1700 default: 1699 default:
1701 if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1700 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1702 return -EOPNOTSUPP; 1701 return -EOPNOTSUPP;
1703 1702
1704 adapter->wol = 0; 1703 adapter->wol = 0;
1705 1704
1706 if(wol->wolopts & WAKE_UCAST) 1705 if (wol->wolopts & WAKE_UCAST)
1707 adapter->wol |= E1000_WUFC_EX; 1706 adapter->wol |= E1000_WUFC_EX;
1708 if(wol->wolopts & WAKE_MCAST) 1707 if (wol->wolopts & WAKE_MCAST)
1709 adapter->wol |= E1000_WUFC_MC; 1708 adapter->wol |= E1000_WUFC_MC;
1710 if(wol->wolopts & WAKE_BCAST) 1709 if (wol->wolopts & WAKE_BCAST)
1711 adapter->wol |= E1000_WUFC_BC; 1710 adapter->wol |= E1000_WUFC_BC;
1712 if(wol->wolopts & WAKE_MAGIC) 1711 if (wol->wolopts & WAKE_MAGIC)
1713 adapter->wol |= E1000_WUFC_MAG; 1712 adapter->wol |= E1000_WUFC_MAG;
1714 } 1713 }
1715 1714
@@ -1727,7 +1726,7 @@ e1000_led_blink_callback(unsigned long data)
1727{ 1726{
1728 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 1727 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1729 1728
1730 if(test_and_change_bit(E1000_LED_ON, &adapter->led_status)) 1729 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1731 e1000_led_off(&adapter->hw); 1730 e1000_led_off(&adapter->hw);
1732 else 1731 else
1733 e1000_led_on(&adapter->hw); 1732 e1000_led_on(&adapter->hw);
@@ -1740,11 +1739,11 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1740{ 1739{
1741 struct e1000_adapter *adapter = netdev_priv(netdev); 1740 struct e1000_adapter *adapter = netdev_priv(netdev);
1742 1741
1743 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) 1742 if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
1744 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); 1743 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
1745 1744
1746 if(adapter->hw.mac_type < e1000_82571) { 1745 if (adapter->hw.mac_type < e1000_82571) {
1747 if(!adapter->blink_timer.function) { 1746 if (!adapter->blink_timer.function) {
1748 init_timer(&adapter->blink_timer); 1747 init_timer(&adapter->blink_timer);
1749 adapter->blink_timer.function = e1000_led_blink_callback; 1748 adapter->blink_timer.function = e1000_led_blink_callback;
1750 adapter->blink_timer.data = (unsigned long) adapter; 1749 adapter->blink_timer.data = (unsigned long) adapter;
@@ -1782,21 +1781,21 @@ static int
1782e1000_nway_reset(struct net_device *netdev) 1781e1000_nway_reset(struct net_device *netdev)
1783{ 1782{
1784 struct e1000_adapter *adapter = netdev_priv(netdev); 1783 struct e1000_adapter *adapter = netdev_priv(netdev);
1785 if(netif_running(netdev)) { 1784 if (netif_running(netdev)) {
1786 e1000_down(adapter); 1785 e1000_down(adapter);
1787 e1000_up(adapter); 1786 e1000_up(adapter);
1788 } 1787 }
1789 return 0; 1788 return 0;
1790} 1789}
1791 1790
1792static int 1791static int
1793e1000_get_stats_count(struct net_device *netdev) 1792e1000_get_stats_count(struct net_device *netdev)
1794{ 1793{
1795 return E1000_STATS_LEN; 1794 return E1000_STATS_LEN;
1796} 1795}
1797 1796
1798static void 1797static void
1799e1000_get_ethtool_stats(struct net_device *netdev, 1798e1000_get_ethtool_stats(struct net_device *netdev,
1800 struct ethtool_stats *stats, uint64_t *data) 1799 struct ethtool_stats *stats, uint64_t *data)
1801{ 1800{
1802 struct e1000_adapter *adapter = netdev_priv(netdev); 1801 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1830,7 +1829,7 @@ e1000_get_ethtool_stats(struct net_device *netdev,
1830/* BUG_ON(i != E1000_STATS_LEN); */ 1829/* BUG_ON(i != E1000_STATS_LEN); */
1831} 1830}
1832 1831
1833static void 1832static void
1834e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1833e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1835{ 1834{
1836#ifdef CONFIG_E1000_MQ 1835#ifdef CONFIG_E1000_MQ
@@ -1839,9 +1838,9 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1839 uint8_t *p = data; 1838 uint8_t *p = data;
1840 int i; 1839 int i;
1841 1840
1842 switch(stringset) { 1841 switch (stringset) {
1843 case ETH_SS_TEST: 1842 case ETH_SS_TEST:
1844 memcpy(data, *e1000_gstrings_test, 1843 memcpy(data, *e1000_gstrings_test,
1845 E1000_TEST_LEN*ETH_GSTRING_LEN); 1844 E1000_TEST_LEN*ETH_GSTRING_LEN);
1846 break; 1845 break;
1847 case ETH_SS_STATS: 1846 case ETH_SS_STATS:
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 2437d362ff6..beeec0fbbea 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -1600,10 +1600,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1600 if(ret_val) 1600 if(ret_val)
1601 return ret_val; 1601 return ret_val;
1602 1602
1603 /* Read the MII 1000Base-T Control Register (Address 9). */ 1603 /* Read the MII 1000Base-T Control Register (Address 9). */
1604 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1604 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1605 if(ret_val) 1605 if(ret_val)
1606 return ret_val; 1606 return ret_val;
1607 1607
1608 /* Need to parse both autoneg_advertised and fc and set up 1608 /* Need to parse both autoneg_advertised and fc and set up
1609 * the appropriate PHY registers. First we will parse for 1609 * the appropriate PHY registers. First we will parse for
@@ -3916,7 +3916,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
3916 } 3916 }
3917 } 3917 }
3918 3918
3919 if(eeprom->use_eerd == TRUE) { 3919 if (eeprom->use_eerd == TRUE) {
3920 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); 3920 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
3921 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3921 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
3922 (hw->mac_type != e1000_82573)) 3922 (hw->mac_type != e1000_82573))
@@ -4423,7 +4423,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4423 return -E1000_ERR_EEPROM; 4423 return -E1000_ERR_EEPROM;
4424 } 4424 }
4425 4425
4426 /* If STM opcode located in bits 15:8 of flop, reset firmware */ 4426 /* If STM opcode located in bits 15:8 of flop, reset firmware */
4427 if ((flop & 0xFF00) == E1000_STM_OPCODE) { 4427 if ((flop & 0xFF00) == E1000_STM_OPCODE) {
4428 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); 4428 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
4429 } 4429 }
@@ -4431,7 +4431,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4431 /* Perform the flash update */ 4431 /* Perform the flash update */
4432 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); 4432 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
4433 4433
4434 for (i=0; i < attempts; i++) { 4434 for (i=0; i < attempts; i++) {
4435 eecd = E1000_READ_REG(hw, EECD); 4435 eecd = E1000_READ_REG(hw, EECD);
4436 if ((eecd & E1000_EECD_FLUPD) == 0) { 4436 if ((eecd & E1000_EECD_FLUPD) == 0) {
4437 break; 4437 break;
@@ -4504,6 +4504,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
4504 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); 4504 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
4505 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); 4505 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
4506 } 4506 }
4507
4507 switch (hw->mac_type) { 4508 switch (hw->mac_type) {
4508 default: 4509 default:
4509 break; 4510 break;
@@ -6840,7 +6841,8 @@ int32_t
6840e1000_check_phy_reset_block(struct e1000_hw *hw) 6841e1000_check_phy_reset_block(struct e1000_hw *hw)
6841{ 6842{
6842 uint32_t manc = 0; 6843 uint32_t manc = 0;
6843 if(hw->mac_type > e1000_82547_rev_2) 6844
6845 if (hw->mac_type > e1000_82547_rev_2)
6844 manc = E1000_READ_REG(hw, MANC); 6846 manc = E1000_READ_REG(hw, MANC);
6845 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 6847 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
6846 E1000_BLK_PHY_RESET : E1000_SUCCESS; 6848 E1000_BLK_PHY_RESET : E1000_SUCCESS;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 0b8f6f2b774..f1219dd9dba 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -377,6 +377,7 @@ int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
377void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); 377void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
378 378
379/* Filters (multicast, vlan, receive) */ 379/* Filters (multicast, vlan, receive) */
380void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
380uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); 381uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
381void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); 382void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
382void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); 383void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -401,7 +402,9 @@ void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
401void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 402void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
402/* Port I/O is only supported on 82544 and newer */ 403/* Port I/O is only supported on 82544 and newer */
403uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); 404uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
405uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
404void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); 406void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
407void e1000_enable_pciex_master(struct e1000_hw *hw);
405int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 408int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
406int32_t e1000_get_software_semaphore(struct e1000_hw *hw); 409int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
407void e1000_release_software_semaphore(struct e1000_hw *hw); 410void e1000_release_software_semaphore(struct e1000_hw *hw);
@@ -899,14 +902,14 @@ struct e1000_ffvt_entry {
899#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 902#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
900#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 903#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
901#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 904#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
902#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ 905#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
903#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ 906#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
904#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ 907#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
905#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ 908#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
906#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ 909#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
907#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ 910#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
908#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ 911#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
909#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ 912#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
910#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 913#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
911#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 914#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
912#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ 915#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
@@ -1761,7 +1764,6 @@ struct e1000_hw {
1761#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1764#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
1762#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 1765#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
1763 still to be processed. */ 1766 still to be processed. */
1764
1765/* Transmit Configuration Word */ 1767/* Transmit Configuration Word */
1766#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ 1768#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
1767#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ 1769#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d0a5d1656c5..31e332935e5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -29,11 +29,71 @@
29#include "e1000.h" 29#include "e1000.h"
30 30
31/* Change Log 31/* Change Log
32 * 6.0.58 4/20/05 32 * 6.3.9 12/16/2005
33 * o Accepted ethtool cleanup patch from Stephen Hemminger 33 * o incorporate fix for recycled skbs from IBM LTC
34 * 6.0.44+ 2/15/05 34 * 6.3.7 11/18/2005
35 * o applied Anton's patch to resolve tx hang in hardware 35 * o Honor eeprom setting for enabling/disabling Wake On Lan
36 * o Applied Andrew Mortons patch - e1000 stops working after resume 36 * 6.3.5 11/17/2005
37 * o Fix memory leak in rx ring handling for PCI Express adapters
38 * 6.3.4 11/8/05
39 * o Patch from Jesper Juhl to remove redundant NULL checks for kfree
40 * 6.3.2 9/20/05
41 * o Render logic that sets/resets DRV_LOAD as inline functions to
42 * avoid code replication. If f/w is AMT then set DRV_LOAD only when
43 * network interface is open.
44 * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
45 * o Adjust PBA partioning for Jumbo frames using MTU size and not
46 * rx_buffer_len
47 * 6.3.1 9/19/05
48 * o Use adapter->tx_timeout_factor in Tx Hung Detect logic
49 (e1000_clean_tx_irq)
50 * o Support for 8086:10B5 device (Quad Port)
51 * 6.2.14 9/15/05
52 * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface
53 * open/close
54 * 6.2.13 9/14/05
55 * o Invoke e1000_check_mng_mode only for 8257x controllers since it
56 * accesses the FWSM that is not supported in other controllers
57 * 6.2.12 9/9/05
58 * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER
59 * o set RCTL:SECRC only for controllers newer than 82543.
60 * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w.
61 * This code was moved from e1000_remove to e1000_close
62 * 6.2.10 9/6/05
63 * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off.
64 * o Enable fc by default on 82573 controllers (do not read eeprom)
65 * o Fix rx_errors statistic not to include missed_packet_count
66 * o Fix rx_dropped statistic not to include missed_packet_count
67 (Padraig Brady)
68 * 6.2.9 8/30/05
69 * o Remove call to update statistics from the controller ib e1000_get_stats
70 * 6.2.8 8/30/05
71 * o Improved algorithm for rx buffer allocation/rdt update
72 * o Flow control watermarks relative to rx PBA size
73 * o Simplified 'Tx Hung' detect logic
74 * 6.2.7 8/17/05
75 * o Report rx buffer allocation failures and tx timeout counts in stats
76 * 6.2.6 8/16/05
77 * o Implement workaround for controller erratum -- linear non-tso packet
78 * following a TSO gets written back prematurely
79 * 6.2.5 8/15/05
80 * o Set netdev->tx_queue_len based on link speed/duplex settings.
81 * o Fix net_stats.rx_fifo_errors <p@draigBrady.com>
82 * o Do not power off PHY if SoL/IDER session is active
83 * 6.2.4 8/10/05
84 * o Fix loopback test setup/cleanup for 82571/3 controllers
85 * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat
86 * all packets as raw
87 * o Prevent operations that will cause the PHY to be reset if SoL/IDER
88 * sessions are active and log a message
89 * 6.2.2 7/21/05
90 * o used fixed size descriptors for all MTU sizes, reduces memory load
91 * 6.1.2 4/13/05
92 * o Fixed ethtool diagnostics
93 * o Enabled flow control to take default eeprom settings
94 * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
95 * calls, one from mii_ioctl and other from within update_stats while
96 * processing MIIREG ioctl.
37 */ 97 */
38 98
39char e1000_driver_name[] = "e1000"; 99char e1000_driver_name[] = "e1000";
@@ -295,7 +355,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
295static inline void 355static inline void
296e1000_irq_enable(struct e1000_adapter *adapter) 356e1000_irq_enable(struct e1000_adapter *adapter)
297{ 357{
298 if(likely(atomic_dec_and_test(&adapter->irq_sem))) { 358 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
299 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); 359 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
300 E1000_WRITE_FLUSH(&adapter->hw); 360 E1000_WRITE_FLUSH(&adapter->hw);
301 } 361 }
@@ -307,17 +367,17 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
307 struct net_device *netdev = adapter->netdev; 367 struct net_device *netdev = adapter->netdev;
308 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 368 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
309 uint16_t old_vid = adapter->mng_vlan_id; 369 uint16_t old_vid = adapter->mng_vlan_id;
310 if(adapter->vlgrp) { 370 if (adapter->vlgrp) {
311 if(!adapter->vlgrp->vlan_devices[vid]) { 371 if (!adapter->vlgrp->vlan_devices[vid]) {
312 if(adapter->hw.mng_cookie.status & 372 if (adapter->hw.mng_cookie.status &
313 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 373 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
314 e1000_vlan_rx_add_vid(netdev, vid); 374 e1000_vlan_rx_add_vid(netdev, vid);
315 adapter->mng_vlan_id = vid; 375 adapter->mng_vlan_id = vid;
316 } else 376 } else
317 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 377 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
318 378
319 if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 379 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
320 (vid != old_vid) && 380 (vid != old_vid) &&
321 !adapter->vlgrp->vlan_devices[old_vid]) 381 !adapter->vlgrp->vlan_devices[old_vid])
322 e1000_vlan_rx_kill_vid(netdev, old_vid); 382 e1000_vlan_rx_kill_vid(netdev, old_vid);
323 } 383 }
@@ -401,10 +461,10 @@ e1000_up(struct e1000_adapter *adapter)
401 /* hardware has been reset, we need to reload some things */ 461 /* hardware has been reset, we need to reload some things */
402 462
403 /* Reset the PHY if it was previously powered down */ 463 /* Reset the PHY if it was previously powered down */
404 if(adapter->hw.media_type == e1000_media_type_copper) { 464 if (adapter->hw.media_type == e1000_media_type_copper) {
405 uint16_t mii_reg; 465 uint16_t mii_reg;
406 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 466 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
407 if(mii_reg & MII_CR_POWER_DOWN) 467 if (mii_reg & MII_CR_POWER_DOWN)
408 e1000_phy_reset(&adapter->hw); 468 e1000_phy_reset(&adapter->hw);
409 } 469 }
410 470
@@ -425,16 +485,16 @@ e1000_up(struct e1000_adapter *adapter)
425 } 485 }
426 486
427#ifdef CONFIG_PCI_MSI 487#ifdef CONFIG_PCI_MSI
428 if(adapter->hw.mac_type > e1000_82547_rev_2) { 488 if (adapter->hw.mac_type > e1000_82547_rev_2) {
429 adapter->have_msi = TRUE; 489 adapter->have_msi = TRUE;
430 if((err = pci_enable_msi(adapter->pdev))) { 490 if ((err = pci_enable_msi(adapter->pdev))) {
431 DPRINTK(PROBE, ERR, 491 DPRINTK(PROBE, ERR,
432 "Unable to allocate MSI interrupt Error: %d\n", err); 492 "Unable to allocate MSI interrupt Error: %d\n", err);
433 adapter->have_msi = FALSE; 493 adapter->have_msi = FALSE;
434 } 494 }
435 } 495 }
436#endif 496#endif
437 if((err = request_irq(adapter->pdev->irq, &e1000_intr, 497 if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
438 SA_SHIRQ | SA_SAMPLE_RANDOM, 498 SA_SHIRQ | SA_SAMPLE_RANDOM,
439 netdev->name, netdev))) { 499 netdev->name, netdev))) {
440 DPRINTK(PROBE, ERR, 500 DPRINTK(PROBE, ERR,
@@ -471,7 +531,7 @@ e1000_down(struct e1000_adapter *adapter)
471#endif 531#endif
472 free_irq(adapter->pdev->irq, netdev); 532 free_irq(adapter->pdev->irq, netdev);
473#ifdef CONFIG_PCI_MSI 533#ifdef CONFIG_PCI_MSI
474 if(adapter->hw.mac_type > e1000_82547_rev_2 && 534 if (adapter->hw.mac_type > e1000_82547_rev_2 &&
475 adapter->have_msi == TRUE) 535 adapter->have_msi == TRUE)
476 pci_disable_msi(adapter->pdev); 536 pci_disable_msi(adapter->pdev);
477#endif 537#endif
@@ -537,12 +597,12 @@ e1000_reset(struct e1000_adapter *adapter)
537 break; 597 break;
538 } 598 }
539 599
540 if((adapter->hw.mac_type != e1000_82573) && 600 if ((adapter->hw.mac_type != e1000_82573) &&
541 (adapter->netdev->mtu > E1000_RXBUFFER_8192)) 601 (adapter->netdev->mtu > E1000_RXBUFFER_8192))
542 pba -= 8; /* allocate more FIFO for Tx */ 602 pba -= 8; /* allocate more FIFO for Tx */
543 603
544 604
545 if(adapter->hw.mac_type == e1000_82547) { 605 if (adapter->hw.mac_type == e1000_82547) {
546 adapter->tx_fifo_head = 0; 606 adapter->tx_fifo_head = 0;
547 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 607 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
548 adapter->tx_fifo_size = 608 adapter->tx_fifo_size =
@@ -565,9 +625,9 @@ e1000_reset(struct e1000_adapter *adapter)
565 625
566 /* Allow time for pending master requests to run */ 626 /* Allow time for pending master requests to run */
567 e1000_reset_hw(&adapter->hw); 627 e1000_reset_hw(&adapter->hw);
568 if(adapter->hw.mac_type >= e1000_82544) 628 if (adapter->hw.mac_type >= e1000_82544)
569 E1000_WRITE_REG(&adapter->hw, WUC, 0); 629 E1000_WRITE_REG(&adapter->hw, WUC, 0);
570 if(e1000_init_hw(&adapter->hw)) 630 if (e1000_init_hw(&adapter->hw))
571 DPRINTK(PROBE, ERR, "Hardware Error\n"); 631 DPRINTK(PROBE, ERR, "Hardware Error\n");
572 e1000_update_mng_vlan(adapter); 632 e1000_update_mng_vlan(adapter);
573 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 633 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -606,26 +666,26 @@ e1000_probe(struct pci_dev *pdev,
606 int i, err, pci_using_dac; 666 int i, err, pci_using_dac;
607 uint16_t eeprom_data; 667 uint16_t eeprom_data;
608 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 668 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
609 if((err = pci_enable_device(pdev))) 669 if ((err = pci_enable_device(pdev)))
610 return err; 670 return err;
611 671
612 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 672 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
613 pci_using_dac = 1; 673 pci_using_dac = 1;
614 } else { 674 } else {
615 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 675 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
616 E1000_ERR("No usable DMA configuration, aborting\n"); 676 E1000_ERR("No usable DMA configuration, aborting\n");
617 return err; 677 return err;
618 } 678 }
619 pci_using_dac = 0; 679 pci_using_dac = 0;
620 } 680 }
621 681
622 if((err = pci_request_regions(pdev, e1000_driver_name))) 682 if ((err = pci_request_regions(pdev, e1000_driver_name)))
623 return err; 683 return err;
624 684
625 pci_set_master(pdev); 685 pci_set_master(pdev);
626 686
627 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 687 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
628 if(!netdev) { 688 if (!netdev) {
629 err = -ENOMEM; 689 err = -ENOMEM;
630 goto err_alloc_etherdev; 690 goto err_alloc_etherdev;
631 } 691 }
@@ -644,15 +704,15 @@ e1000_probe(struct pci_dev *pdev,
644 mmio_len = pci_resource_len(pdev, BAR_0); 704 mmio_len = pci_resource_len(pdev, BAR_0);
645 705
646 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 706 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
647 if(!adapter->hw.hw_addr) { 707 if (!adapter->hw.hw_addr) {
648 err = -EIO; 708 err = -EIO;
649 goto err_ioremap; 709 goto err_ioremap;
650 } 710 }
651 711
652 for(i = BAR_1; i <= BAR_5; i++) { 712 for (i = BAR_1; i <= BAR_5; i++) {
653 if(pci_resource_len(pdev, i) == 0) 713 if (pci_resource_len(pdev, i) == 0)
654 continue; 714 continue;
655 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { 715 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
656 adapter->hw.io_base = pci_resource_start(pdev, i); 716 adapter->hw.io_base = pci_resource_start(pdev, i);
657 break; 717 break;
658 } 718 }
@@ -689,13 +749,13 @@ e1000_probe(struct pci_dev *pdev,
689 749
690 /* setup the private structure */ 750 /* setup the private structure */
691 751
692 if((err = e1000_sw_init(adapter))) 752 if ((err = e1000_sw_init(adapter)))
693 goto err_sw_init; 753 goto err_sw_init;
694 754
695 if((err = e1000_check_phy_reset_block(&adapter->hw))) 755 if ((err = e1000_check_phy_reset_block(&adapter->hw)))
696 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 756 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
697 757
698 if(adapter->hw.mac_type >= e1000_82543) { 758 if (adapter->hw.mac_type >= e1000_82543) {
699 netdev->features = NETIF_F_SG | 759 netdev->features = NETIF_F_SG |
700 NETIF_F_HW_CSUM | 760 NETIF_F_HW_CSUM |
701 NETIF_F_HW_VLAN_TX | 761 NETIF_F_HW_VLAN_TX |
@@ -704,16 +764,16 @@ e1000_probe(struct pci_dev *pdev,
704 } 764 }
705 765
706#ifdef NETIF_F_TSO 766#ifdef NETIF_F_TSO
707 if((adapter->hw.mac_type >= e1000_82544) && 767 if ((adapter->hw.mac_type >= e1000_82544) &&
708 (adapter->hw.mac_type != e1000_82547)) 768 (adapter->hw.mac_type != e1000_82547))
709 netdev->features |= NETIF_F_TSO; 769 netdev->features |= NETIF_F_TSO;
710 770
711#ifdef NETIF_F_TSO_IPV6 771#ifdef NETIF_F_TSO_IPV6
712 if(adapter->hw.mac_type > e1000_82547_rev_2) 772 if (adapter->hw.mac_type > e1000_82547_rev_2)
713 netdev->features |= NETIF_F_TSO_IPV6; 773 netdev->features |= NETIF_F_TSO_IPV6;
714#endif 774#endif
715#endif 775#endif
716 if(pci_using_dac) 776 if (pci_using_dac)
717 netdev->features |= NETIF_F_HIGHDMA; 777 netdev->features |= NETIF_F_HIGHDMA;
718 778
719 /* hard_start_xmit is safe against parallel locking */ 779 /* hard_start_xmit is safe against parallel locking */
@@ -721,14 +781,14 @@ e1000_probe(struct pci_dev *pdev,
721 781
722 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 782 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
723 783
724 /* before reading the EEPROM, reset the controller to 784 /* before reading the EEPROM, reset the controller to
725 * put the device in a known good starting state */ 785 * put the device in a known good starting state */
726 786
727 e1000_reset_hw(&adapter->hw); 787 e1000_reset_hw(&adapter->hw);
728 788
729 /* make sure the EEPROM is good */ 789 /* make sure the EEPROM is good */
730 790
731 if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) { 791 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
732 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 792 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
733 err = -EIO; 793 err = -EIO;
734 goto err_eeprom; 794 goto err_eeprom;
@@ -736,12 +796,12 @@ e1000_probe(struct pci_dev *pdev,
736 796
737 /* copy the MAC address out of the EEPROM */ 797 /* copy the MAC address out of the EEPROM */
738 798
739 if(e1000_read_mac_addr(&adapter->hw)) 799 if (e1000_read_mac_addr(&adapter->hw))
740 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 800 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
741 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 801 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
742 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 802 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
743 803
744 if(!is_valid_ether_addr(netdev->perm_addr)) { 804 if (!is_valid_ether_addr(netdev->perm_addr)) {
745 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 805 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
746 err = -EIO; 806 err = -EIO;
747 goto err_eeprom; 807 goto err_eeprom;
@@ -781,7 +841,7 @@ e1000_probe(struct pci_dev *pdev,
781 * enable the ACPI Magic Packet filter 841 * enable the ACPI Magic Packet filter
782 */ 842 */
783 843
784 switch(adapter->hw.mac_type) { 844 switch (adapter->hw.mac_type) {
785 case e1000_82542_rev2_0: 845 case e1000_82542_rev2_0:
786 case e1000_82542_rev2_1: 846 case e1000_82542_rev2_1:
787 case e1000_82543: 847 case e1000_82543:
@@ -794,7 +854,7 @@ e1000_probe(struct pci_dev *pdev,
794 case e1000_82546: 854 case e1000_82546:
795 case e1000_82546_rev_3: 855 case e1000_82546_rev_3:
796 case e1000_82571: 856 case e1000_82571:
797 if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ 857 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
798 e1000_read_eeprom(&adapter->hw, 858 e1000_read_eeprom(&adapter->hw,
799 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 859 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
800 break; 860 break;
@@ -805,7 +865,7 @@ e1000_probe(struct pci_dev *pdev,
805 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 865 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
806 break; 866 break;
807 } 867 }
808 if(eeprom_data & eeprom_apme_mask) 868 if (eeprom_data & eeprom_apme_mask)
809 adapter->wol |= E1000_WUFC_MAG; 869 adapter->wol |= E1000_WUFC_MAG;
810 870
811 /* print bus type/speed/width info */ 871 /* print bus type/speed/width info */
@@ -840,7 +900,7 @@ e1000_probe(struct pci_dev *pdev,
840 e1000_get_hw_control(adapter); 900 e1000_get_hw_control(adapter);
841 901
842 strcpy(netdev->name, "eth%d"); 902 strcpy(netdev->name, "eth%d");
843 if((err = register_netdev(netdev))) 903 if ((err = register_netdev(netdev)))
844 goto err_register; 904 goto err_register;
845 905
846 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 906 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
@@ -881,10 +941,10 @@ e1000_remove(struct pci_dev *pdev)
881 941
882 flush_scheduled_work(); 942 flush_scheduled_work();
883 943
884 if(adapter->hw.mac_type >= e1000_82540 && 944 if (adapter->hw.mac_type >= e1000_82540 &&
885 adapter->hw.media_type == e1000_media_type_copper) { 945 adapter->hw.media_type == e1000_media_type_copper) {
886 manc = E1000_READ_REG(&adapter->hw, MANC); 946 manc = E1000_READ_REG(&adapter->hw, MANC);
887 if(manc & E1000_MANC_SMBUS_EN) { 947 if (manc & E1000_MANC_SMBUS_EN) {
888 manc |= E1000_MANC_ARP_EN; 948 manc |= E1000_MANC_ARP_EN;
889 E1000_WRITE_REG(&adapter->hw, MANC, manc); 949 E1000_WRITE_REG(&adapter->hw, MANC, manc);
890 } 950 }
@@ -900,7 +960,7 @@ e1000_remove(struct pci_dev *pdev)
900 __dev_put(&adapter->polling_netdev[i]); 960 __dev_put(&adapter->polling_netdev[i]);
901#endif 961#endif
902 962
903 if(!e1000_check_phy_reset_block(&adapter->hw)) 963 if (!e1000_check_phy_reset_block(&adapter->hw))
904 e1000_phy_hw_reset(&adapter->hw); 964 e1000_phy_hw_reset(&adapter->hw);
905 965
906 kfree(adapter->tx_ring); 966 kfree(adapter->tx_ring);
@@ -959,19 +1019,19 @@ e1000_sw_init(struct e1000_adapter *adapter)
959 1019
960 /* identify the MAC */ 1020 /* identify the MAC */
961 1021
962 if(e1000_set_mac_type(hw)) { 1022 if (e1000_set_mac_type(hw)) {
963 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); 1023 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
964 return -EIO; 1024 return -EIO;
965 } 1025 }
966 1026
967 /* initialize eeprom parameters */ 1027 /* initialize eeprom parameters */
968 1028
969 if(e1000_init_eeprom_params(hw)) { 1029 if (e1000_init_eeprom_params(hw)) {
970 E1000_ERR("EEPROM initialization failed\n"); 1030 E1000_ERR("EEPROM initialization failed\n");
971 return -EIO; 1031 return -EIO;
972 } 1032 }
973 1033
974 switch(hw->mac_type) { 1034 switch (hw->mac_type) {
975 default: 1035 default:
976 break; 1036 break;
977 case e1000_82541: 1037 case e1000_82541:
@@ -990,7 +1050,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
990 1050
991 /* Copper options */ 1051 /* Copper options */
992 1052
993 if(hw->media_type == e1000_media_type_copper) { 1053 if (hw->media_type == e1000_media_type_copper) {
994 hw->mdix = AUTO_ALL_MODES; 1054 hw->mdix = AUTO_ALL_MODES;
995 hw->disable_polarity_correction = FALSE; 1055 hw->disable_polarity_correction = FALSE;
996 hw->master_slave = E1000_MASTER_SLAVE; 1056 hw->master_slave = E1000_MASTER_SLAVE;
@@ -1166,10 +1226,10 @@ e1000_open(struct net_device *netdev)
1166 if ((err = e1000_setup_all_rx_resources(adapter))) 1226 if ((err = e1000_setup_all_rx_resources(adapter)))
1167 goto err_setup_rx; 1227 goto err_setup_rx;
1168 1228
1169 if((err = e1000_up(adapter))) 1229 if ((err = e1000_up(adapter)))
1170 goto err_up; 1230 goto err_up;
1171 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1231 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1172 if((adapter->hw.mng_cookie.status & 1232 if ((adapter->hw.mng_cookie.status &
1173 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1233 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1174 e1000_update_mng_vlan(adapter); 1234 e1000_update_mng_vlan(adapter);
1175 } 1235 }
@@ -1214,7 +1274,7 @@ e1000_close(struct net_device *netdev)
1214 e1000_free_all_tx_resources(adapter); 1274 e1000_free_all_tx_resources(adapter);
1215 e1000_free_all_rx_resources(adapter); 1275 e1000_free_all_rx_resources(adapter);
1216 1276
1217 if((adapter->hw.mng_cookie.status & 1277 if ((adapter->hw.mng_cookie.status &
1218 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1278 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1219 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1279 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1220 } 1280 }
@@ -1269,7 +1329,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1269 size = sizeof(struct e1000_buffer) * txdr->count; 1329 size = sizeof(struct e1000_buffer) * txdr->count;
1270 1330
1271 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); 1331 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1272 if(!txdr->buffer_info) { 1332 if (!txdr->buffer_info) {
1273 DPRINTK(PROBE, ERR, 1333 DPRINTK(PROBE, ERR,
1274 "Unable to allocate memory for the transmit descriptor ring\n"); 1334 "Unable to allocate memory for the transmit descriptor ring\n");
1275 return -ENOMEM; 1335 return -ENOMEM;
@@ -1282,7 +1342,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1282 E1000_ROUNDUP(txdr->size, 4096); 1342 E1000_ROUNDUP(txdr->size, 4096);
1283 1343
1284 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1344 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1285 if(!txdr->desc) { 1345 if (!txdr->desc) {
1286setup_tx_desc_die: 1346setup_tx_desc_die:
1287 vfree(txdr->buffer_info); 1347 vfree(txdr->buffer_info);
1288 DPRINTK(PROBE, ERR, 1348 DPRINTK(PROBE, ERR,
@@ -1298,8 +1358,8 @@ setup_tx_desc_die:
1298 "at %p\n", txdr->size, txdr->desc); 1358 "at %p\n", txdr->size, txdr->desc);
1299 /* Try again, without freeing the previous */ 1359 /* Try again, without freeing the previous */
1300 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1360 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1301 if(!txdr->desc) {
1302 /* Failed allocation, critical failure */ 1361 /* Failed allocation, critical failure */
1362 if (!txdr->desc) {
1303 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1363 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1304 goto setup_tx_desc_die; 1364 goto setup_tx_desc_die;
1305 } 1365 }
@@ -1499,7 +1559,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1499 1559
1500 size = sizeof(struct e1000_ps_page) * rxdr->count; 1560 size = sizeof(struct e1000_ps_page) * rxdr->count;
1501 rxdr->ps_page = kmalloc(size, GFP_KERNEL); 1561 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1502 if(!rxdr->ps_page) { 1562 if (!rxdr->ps_page) {
1503 vfree(rxdr->buffer_info); 1563 vfree(rxdr->buffer_info);
1504 DPRINTK(PROBE, ERR, 1564 DPRINTK(PROBE, ERR,
1505 "Unable to allocate memory for the receive descriptor ring\n"); 1565 "Unable to allocate memory for the receive descriptor ring\n");
@@ -1509,7 +1569,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1509 1569
1510 size = sizeof(struct e1000_ps_page_dma) * rxdr->count; 1570 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1511 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); 1571 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1512 if(!rxdr->ps_page_dma) { 1572 if (!rxdr->ps_page_dma) {
1513 vfree(rxdr->buffer_info); 1573 vfree(rxdr->buffer_info);
1514 kfree(rxdr->ps_page); 1574 kfree(rxdr->ps_page);
1515 DPRINTK(PROBE, ERR, 1575 DPRINTK(PROBE, ERR,
@@ -1518,7 +1578,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1518 } 1578 }
1519 memset(rxdr->ps_page_dma, 0, size); 1579 memset(rxdr->ps_page_dma, 0, size);
1520 1580
1521 if(adapter->hw.mac_type <= e1000_82547_rev_2) 1581 if (adapter->hw.mac_type <= e1000_82547_rev_2)
1522 desc_len = sizeof(struct e1000_rx_desc); 1582 desc_len = sizeof(struct e1000_rx_desc);
1523 else 1583 else
1524 desc_len = sizeof(union e1000_rx_desc_packet_split); 1584 desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1621,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1621{ 1681{
1622 uint32_t rctl, rfctl; 1682 uint32_t rctl, rfctl;
1623 uint32_t psrctl = 0; 1683 uint32_t psrctl = 0;
1624#ifdef CONFIG_E1000_PACKET_SPLIT 1684#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1625 uint32_t pages = 0; 1685 uint32_t pages = 0;
1626#endif 1686#endif
1627 1687
@@ -1647,32 +1707,17 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1647 rctl |= E1000_RCTL_LPE; 1707 rctl |= E1000_RCTL_LPE;
1648 1708
1649 /* Setup buffer sizes */ 1709 /* Setup buffer sizes */
1650 if(adapter->hw.mac_type >= e1000_82571) { 1710 if (adapter->hw.mac_type >= e1000_82571) {
1651 /* We can now specify buffers in 1K increments. 1711 /* We can now specify buffers in 1K increments.
1652 * BSIZE and BSEX are ignored in this case. */ 1712 * BSIZE and BSEX are ignored in this case. */
1653 rctl |= adapter->rx_buffer_len << 0x11; 1713 rctl |= adapter->rx_buffer_len << 0x11;
1654 } else { 1714 } else {
1655 rctl &= ~E1000_RCTL_SZ_4096; 1715 rctl &= ~E1000_RCTL_SZ_4096;
1656 rctl |= E1000_RCTL_BSEX; 1716 rctl &= ~E1000_RCTL_BSEX;
1657 switch (adapter->rx_buffer_len) { 1717 rctl |= E1000_RCTL_SZ_2048;
1658 case E1000_RXBUFFER_2048:
1659 default:
1660 rctl |= E1000_RCTL_SZ_2048;
1661 rctl &= ~E1000_RCTL_BSEX;
1662 break;
1663 case E1000_RXBUFFER_4096:
1664 rctl |= E1000_RCTL_SZ_4096;
1665 break;
1666 case E1000_RXBUFFER_8192:
1667 rctl |= E1000_RCTL_SZ_8192;
1668 break;
1669 case E1000_RXBUFFER_16384:
1670 rctl |= E1000_RCTL_SZ_16384;
1671 break;
1672 }
1673 } 1718 }
1674 1719
1675#ifdef CONFIG_E1000_PACKET_SPLIT 1720#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1676 /* 82571 and greater support packet-split where the protocol 1721 /* 82571 and greater support packet-split where the protocol
1677 * header is placed in skb->data and the packet data is 1722 * header is placed in skb->data and the packet data is
1678 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 1723 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
@@ -1696,7 +1741,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1696 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1741 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1697 1742
1698 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; 1743 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
1699 1744
1700 psrctl |= adapter->rx_ps_bsize0 >> 1745 psrctl |= adapter->rx_ps_bsize0 >>
1701 E1000_PSRCTL_BSIZE0_SHIFT; 1746 E1000_PSRCTL_BSIZE0_SHIFT;
1702 1747
@@ -1758,7 +1803,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1758 1803
1759 if (hw->mac_type >= e1000_82540) { 1804 if (hw->mac_type >= e1000_82540) {
1760 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); 1805 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1761 if(adapter->itr > 1) 1806 if (adapter->itr > 1)
1762 E1000_WRITE_REG(hw, ITR, 1807 E1000_WRITE_REG(hw, ITR,
1763 1000000000 / (adapter->itr * 256)); 1808 1000000000 / (adapter->itr * 256));
1764 } 1809 }
@@ -1847,13 +1892,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1847 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1892 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1848 if (hw->mac_type >= e1000_82543) { 1893 if (hw->mac_type >= e1000_82543) {
1849 rxcsum = E1000_READ_REG(hw, RXCSUM); 1894 rxcsum = E1000_READ_REG(hw, RXCSUM);
1850 if(adapter->rx_csum == TRUE) { 1895 if (adapter->rx_csum == TRUE) {
1851 rxcsum |= E1000_RXCSUM_TUOFL; 1896 rxcsum |= E1000_RXCSUM_TUOFL;
1852 1897
1853 /* Enable 82571 IPv4 payload checksum for UDP fragments 1898 /* Enable 82571 IPv4 payload checksum for UDP fragments
1854 * Must be used in conjunction with packet-split. */ 1899 * Must be used in conjunction with packet-split. */
1855 if ((hw->mac_type >= e1000_82571) && 1900 if ((hw->mac_type >= e1000_82571) &&
1856 (adapter->rx_ps_pages)) { 1901 (adapter->rx_ps_pages)) {
1857 rxcsum |= E1000_RXCSUM_IPPCSE; 1902 rxcsum |= E1000_RXCSUM_IPPCSE;
1858 } 1903 }
1859 } else { 1904 } else {
@@ -1915,7 +1960,7 @@ static inline void
1915e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1960e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1916 struct e1000_buffer *buffer_info) 1961 struct e1000_buffer *buffer_info)
1917{ 1962{
1918 if(buffer_info->dma) { 1963 if (buffer_info->dma) {
1919 pci_unmap_page(adapter->pdev, 1964 pci_unmap_page(adapter->pdev,
1920 buffer_info->dma, 1965 buffer_info->dma,
1921 buffer_info->length, 1966 buffer_info->length,
@@ -1942,7 +1987,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
1942 1987
1943 /* Free all the Tx ring sk_buffs */ 1988 /* Free all the Tx ring sk_buffs */
1944 1989
1945 for(i = 0; i < tx_ring->count; i++) { 1990 for (i = 0; i < tx_ring->count; i++) {
1946 buffer_info = &tx_ring->buffer_info[i]; 1991 buffer_info = &tx_ring->buffer_info[i];
1947 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1992 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1948 } 1993 }
@@ -2038,10 +2083,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
2038 unsigned int i, j; 2083 unsigned int i, j;
2039 2084
2040 /* Free all the Rx ring sk_buffs */ 2085 /* Free all the Rx ring sk_buffs */
2041 2086 for (i = 0; i < rx_ring->count; i++) {
2042 for(i = 0; i < rx_ring->count; i++) {
2043 buffer_info = &rx_ring->buffer_info[i]; 2087 buffer_info = &rx_ring->buffer_info[i];
2044 if(buffer_info->skb) { 2088 if (buffer_info->skb) {
2045 pci_unmap_single(pdev, 2089 pci_unmap_single(pdev,
2046 buffer_info->dma, 2090 buffer_info->dma,
2047 buffer_info->length, 2091 buffer_info->length,
@@ -2122,7 +2166,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
2122 E1000_WRITE_FLUSH(&adapter->hw); 2166 E1000_WRITE_FLUSH(&adapter->hw);
2123 mdelay(5); 2167 mdelay(5);
2124 2168
2125 if(netif_running(netdev)) 2169 if (netif_running(netdev))
2126 e1000_clean_all_rx_rings(adapter); 2170 e1000_clean_all_rx_rings(adapter);
2127} 2171}
2128 2172
@@ -2138,13 +2182,13 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
2138 E1000_WRITE_FLUSH(&adapter->hw); 2182 E1000_WRITE_FLUSH(&adapter->hw);
2139 mdelay(5); 2183 mdelay(5);
2140 2184
2141 if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) 2185 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
2142 e1000_pci_set_mwi(&adapter->hw); 2186 e1000_pci_set_mwi(&adapter->hw);
2143 2187
2144 if(netif_running(netdev)) { 2188 if (netif_running(netdev)) {
2145 e1000_configure_rx(adapter);
2146 /* No need to loop, because 82542 supports only 1 queue */ 2189 /* No need to loop, because 82542 supports only 1 queue */
2147 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2190 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2191 e1000_configure_rx(adapter);
2148 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2192 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2149 } 2193 }
2150} 2194}
@@ -2163,12 +2207,12 @@ e1000_set_mac(struct net_device *netdev, void *p)
2163 struct e1000_adapter *adapter = netdev_priv(netdev); 2207 struct e1000_adapter *adapter = netdev_priv(netdev);
2164 struct sockaddr *addr = p; 2208 struct sockaddr *addr = p;
2165 2209
2166 if(!is_valid_ether_addr(addr->sa_data)) 2210 if (!is_valid_ether_addr(addr->sa_data))
2167 return -EADDRNOTAVAIL; 2211 return -EADDRNOTAVAIL;
2168 2212
2169 /* 82542 2.0 needs to be in reset to write receive address registers */ 2213 /* 82542 2.0 needs to be in reset to write receive address registers */
2170 2214
2171 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2215 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2172 e1000_enter_82542_rst(adapter); 2216 e1000_enter_82542_rst(adapter);
2173 2217
2174 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -2182,17 +2226,17 @@ e1000_set_mac(struct net_device *netdev, void *p)
2182 /* activate the work around */ 2226 /* activate the work around */
2183 adapter->hw.laa_is_present = 1; 2227 adapter->hw.laa_is_present = 1;
2184 2228
2185 /* Hold a copy of the LAA in RAR[14] This is done so that 2229 /* Hold a copy of the LAA in RAR[14] This is done so that
2186 * between the time RAR[0] gets clobbered and the time it 2230 * between the time RAR[0] gets clobbered and the time it
2187 * gets fixed (in e1000_watchdog), the actual LAA is in one 2231 * gets fixed (in e1000_watchdog), the actual LAA is in one
2188 * of the RARs and no incoming packets directed to this port 2232 * of the RARs and no incoming packets directed to this port
2189 * are dropped. Eventaully the LAA will be in RAR[0] and 2233 * are dropped. Eventaully the LAA will be in RAR[0] and
2190 * RAR[14] */ 2234 * RAR[14] */
2191 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 2235 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2192 E1000_RAR_ENTRIES - 1); 2236 E1000_RAR_ENTRIES - 1);
2193 } 2237 }
2194 2238
2195 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2239 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2196 e1000_leave_82542_rst(adapter); 2240 e1000_leave_82542_rst(adapter);
2197 2241
2198 return 0; 2242 return 0;
@@ -2226,9 +2270,9 @@ e1000_set_multi(struct net_device *netdev)
2226 2270
2227 rctl = E1000_READ_REG(hw, RCTL); 2271 rctl = E1000_READ_REG(hw, RCTL);
2228 2272
2229 if(netdev->flags & IFF_PROMISC) { 2273 if (netdev->flags & IFF_PROMISC) {
2230 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2274 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2231 } else if(netdev->flags & IFF_ALLMULTI) { 2275 } else if (netdev->flags & IFF_ALLMULTI) {
2232 rctl |= E1000_RCTL_MPE; 2276 rctl |= E1000_RCTL_MPE;
2233 rctl &= ~E1000_RCTL_UPE; 2277 rctl &= ~E1000_RCTL_UPE;
2234 } else { 2278 } else {
@@ -2239,7 +2283,7 @@ e1000_set_multi(struct net_device *netdev)
2239 2283
2240 /* 82542 2.0 needs to be in reset to write receive address registers */ 2284 /* 82542 2.0 needs to be in reset to write receive address registers */
2241 2285
2242 if(hw->mac_type == e1000_82542_rev2_0) 2286 if (hw->mac_type == e1000_82542_rev2_0)
2243 e1000_enter_82542_rst(adapter); 2287 e1000_enter_82542_rst(adapter);
2244 2288
2245 /* load the first 14 multicast address into the exact filters 1-14 2289 /* load the first 14 multicast address into the exact filters 1-14
@@ -2249,7 +2293,7 @@ e1000_set_multi(struct net_device *netdev)
2249 */ 2293 */
2250 mc_ptr = netdev->mc_list; 2294 mc_ptr = netdev->mc_list;
2251 2295
2252 for(i = 1; i < rar_entries; i++) { 2296 for (i = 1; i < rar_entries; i++) {
2253 if (mc_ptr) { 2297 if (mc_ptr) {
2254 e1000_rar_set(hw, mc_ptr->dmi_addr, i); 2298 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
2255 mc_ptr = mc_ptr->next; 2299 mc_ptr = mc_ptr->next;
@@ -2261,17 +2305,17 @@ e1000_set_multi(struct net_device *netdev)
2261 2305
2262 /* clear the old settings from the multicast hash table */ 2306 /* clear the old settings from the multicast hash table */
2263 2307
2264 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) 2308 for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
2265 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 2309 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2266 2310
2267 /* load any remaining addresses into the hash table */ 2311 /* load any remaining addresses into the hash table */
2268 2312
2269 for(; mc_ptr; mc_ptr = mc_ptr->next) { 2313 for (; mc_ptr; mc_ptr = mc_ptr->next) {
2270 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); 2314 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
2271 e1000_mta_set(hw, hash_value); 2315 e1000_mta_set(hw, hash_value);
2272 } 2316 }
2273 2317
2274 if(hw->mac_type == e1000_82542_rev2_0) 2318 if (hw->mac_type == e1000_82542_rev2_0)
2275 e1000_leave_82542_rst(adapter); 2319 e1000_leave_82542_rst(adapter);
2276} 2320}
2277 2321
@@ -2297,8 +2341,8 @@ e1000_82547_tx_fifo_stall(unsigned long data)
2297 struct net_device *netdev = adapter->netdev; 2341 struct net_device *netdev = adapter->netdev;
2298 uint32_t tctl; 2342 uint32_t tctl;
2299 2343
2300 if(atomic_read(&adapter->tx_fifo_stall)) { 2344 if (atomic_read(&adapter->tx_fifo_stall)) {
2301 if((E1000_READ_REG(&adapter->hw, TDT) == 2345 if ((E1000_READ_REG(&adapter->hw, TDT) ==
2302 E1000_READ_REG(&adapter->hw, TDH)) && 2346 E1000_READ_REG(&adapter->hw, TDH)) &&
2303 (E1000_READ_REG(&adapter->hw, TDFT) == 2347 (E1000_READ_REG(&adapter->hw, TDFT) ==
2304 E1000_READ_REG(&adapter->hw, TDFH)) && 2348 E1000_READ_REG(&adapter->hw, TDFH)) &&
@@ -2350,18 +2394,18 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2350 e1000_check_for_link(&adapter->hw); 2394 e1000_check_for_link(&adapter->hw);
2351 if (adapter->hw.mac_type == e1000_82573) { 2395 if (adapter->hw.mac_type == e1000_82573) {
2352 e1000_enable_tx_pkt_filtering(&adapter->hw); 2396 e1000_enable_tx_pkt_filtering(&adapter->hw);
2353 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 2397 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
2354 e1000_update_mng_vlan(adapter); 2398 e1000_update_mng_vlan(adapter);
2355 } 2399 }
2356 2400
2357 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 2401 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2358 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) 2402 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
2359 link = !adapter->hw.serdes_link_down; 2403 link = !adapter->hw.serdes_link_down;
2360 else 2404 else
2361 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; 2405 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
2362 2406
2363 if(link) { 2407 if (link) {
2364 if(!netif_carrier_ok(netdev)) { 2408 if (!netif_carrier_ok(netdev)) {
2365 e1000_get_speed_and_duplex(&adapter->hw, 2409 e1000_get_speed_and_duplex(&adapter->hw,
2366 &adapter->link_speed, 2410 &adapter->link_speed,
2367 &adapter->link_duplex); 2411 &adapter->link_duplex);
@@ -2392,7 +2436,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2392 adapter->smartspeed = 0; 2436 adapter->smartspeed = 0;
2393 } 2437 }
2394 } else { 2438 } else {
2395 if(netif_carrier_ok(netdev)) { 2439 if (netif_carrier_ok(netdev)) {
2396 adapter->link_speed = 0; 2440 adapter->link_speed = 0;
2397 adapter->link_duplex = 0; 2441 adapter->link_duplex = 0;
2398 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2442 DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -2432,12 +2476,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2432 } 2476 }
2433 2477
2434 /* Dynamic mode for Interrupt Throttle Rate (ITR) */ 2478 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
2435 if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { 2479 if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
2436 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total 2480 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
2437 * asymmetrical Tx or Rx gets ITR=8000; everyone 2481 * asymmetrical Tx or Rx gets ITR=8000; everyone
2438 * else is between 2000-8000. */ 2482 * else is between 2000-8000. */
2439 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; 2483 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
2440 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 2484 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
2441 adapter->gotcl - adapter->gorcl : 2485 adapter->gotcl - adapter->gorcl :
2442 adapter->gorcl - adapter->gotcl) / 10000; 2486 adapter->gorcl - adapter->gotcl) / 10000;
2443 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2487 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
@@ -2450,7 +2494,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2450 /* Force detection of hung controller every watchdog period */ 2494 /* Force detection of hung controller every watchdog period */
2451 adapter->detect_tx_hung = TRUE; 2495 adapter->detect_tx_hung = TRUE;
2452 2496
2453 /* With 82571 controllers, LAA may be overwritten due to controller 2497 /* With 82571 controllers, LAA may be overwritten due to controller
2454 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2498 * reset from the other port. Set the appropriate LAA in RAR[0] */
2455 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) 2499 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2456 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2500 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
@@ -2479,7 +2523,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2479 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2523 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2480 int err; 2524 int err;
2481 2525
2482 if(skb_shinfo(skb)->tso_size) { 2526 if (skb_shinfo(skb)->tso_size) {
2483 if (skb_header_cloned(skb)) { 2527 if (skb_header_cloned(skb)) {
2484 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2528 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2485 if (err) 2529 if (err)
@@ -2488,7 +2532,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2488 2532
2489 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 2533 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2490 mss = skb_shinfo(skb)->tso_size; 2534 mss = skb_shinfo(skb)->tso_size;
2491 if(skb->protocol == ntohs(ETH_P_IP)) { 2535 if (skb->protocol == ntohs(ETH_P_IP)) {
2492 skb->nh.iph->tot_len = 0; 2536 skb->nh.iph->tot_len = 0;
2493 skb->nh.iph->check = 0; 2537 skb->nh.iph->check = 0;
2494 skb->h.th->check = 2538 skb->h.th->check =
@@ -2500,7 +2544,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2500 cmd_length = E1000_TXD_CMD_IP; 2544 cmd_length = E1000_TXD_CMD_IP;
2501 ipcse = skb->h.raw - skb->data - 1; 2545 ipcse = skb->h.raw - skb->data - 1;
2502#ifdef NETIF_F_TSO_IPV6 2546#ifdef NETIF_F_TSO_IPV6
2503 } else if(skb->protocol == ntohs(ETH_P_IPV6)) { 2547 } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
2504 skb->nh.ipv6h->payload_len = 0; 2548 skb->nh.ipv6h->payload_len = 0;
2505 skb->h.th->check = 2549 skb->h.th->check =
2506 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, 2550 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -2555,7 +2599,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2555 unsigned int i; 2599 unsigned int i;
2556 uint8_t css; 2600 uint8_t css;
2557 2601
2558 if(likely(skb->ip_summed == CHECKSUM_HW)) { 2602 if (likely(skb->ip_summed == CHECKSUM_HW)) {
2559 css = skb->h.raw - skb->data; 2603 css = skb->h.raw - skb->data;
2560 2604
2561 i = tx_ring->next_to_use; 2605 i = tx_ring->next_to_use;
@@ -2595,7 +2639,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2595 2639
2596 i = tx_ring->next_to_use; 2640 i = tx_ring->next_to_use;
2597 2641
2598 while(len) { 2642 while (len) {
2599 buffer_info = &tx_ring->buffer_info[i]; 2643 buffer_info = &tx_ring->buffer_info[i];
2600 size = min(len, max_per_txd); 2644 size = min(len, max_per_txd);
2601#ifdef NETIF_F_TSO 2645#ifdef NETIF_F_TSO
@@ -2611,7 +2655,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2611 2655
2612 /* Workaround for premature desc write-backs 2656 /* Workaround for premature desc write-backs
2613 * in TSO mode. Append 4-byte sentinel desc */ 2657 * in TSO mode. Append 4-byte sentinel desc */
2614 if(unlikely(mss && !nr_frags && size == len && size > 8)) 2658 if (unlikely(mss && !nr_frags && size == len && size > 8))
2615 size -= 4; 2659 size -= 4;
2616#endif 2660#endif
2617 /* work-around for errata 10 and it applies 2661 /* work-around for errata 10 and it applies
@@ -2619,13 +2663,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2619 * The fix is to make sure that the first descriptor of a 2663 * The fix is to make sure that the first descriptor of a
2620 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2664 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2621 */ 2665 */
2622 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2666 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2623 (size > 2015) && count == 0)) 2667 (size > 2015) && count == 0))
2624 size = 2015; 2668 size = 2015;
2625 2669
2626 /* Workaround for potential 82544 hang in PCI-X. Avoid 2670 /* Workaround for potential 82544 hang in PCI-X. Avoid
2627 * terminating buffers within evenly-aligned dwords. */ 2671 * terminating buffers within evenly-aligned dwords. */
2628 if(unlikely(adapter->pcix_82544 && 2672 if (unlikely(adapter->pcix_82544 &&
2629 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2673 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2630 size > 4)) 2674 size > 4))
2631 size -= 4; 2675 size -= 4;
@@ -2641,29 +2685,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2641 len -= size; 2685 len -= size;
2642 offset += size; 2686 offset += size;
2643 count++; 2687 count++;
2644 if(unlikely(++i == tx_ring->count)) i = 0; 2688 if (unlikely(++i == tx_ring->count)) i = 0;
2645 } 2689 }
2646 2690
2647 for(f = 0; f < nr_frags; f++) { 2691 for (f = 0; f < nr_frags; f++) {
2648 struct skb_frag_struct *frag; 2692 struct skb_frag_struct *frag;
2649 2693
2650 frag = &skb_shinfo(skb)->frags[f]; 2694 frag = &skb_shinfo(skb)->frags[f];
2651 len = frag->size; 2695 len = frag->size;
2652 offset = frag->page_offset; 2696 offset = frag->page_offset;
2653 2697
2654 while(len) { 2698 while (len) {
2655 buffer_info = &tx_ring->buffer_info[i]; 2699 buffer_info = &tx_ring->buffer_info[i];
2656 size = min(len, max_per_txd); 2700 size = min(len, max_per_txd);
2657#ifdef NETIF_F_TSO 2701#ifdef NETIF_F_TSO
2658 /* Workaround for premature desc write-backs 2702 /* Workaround for premature desc write-backs
2659 * in TSO mode. Append 4-byte sentinel desc */ 2703 * in TSO mode. Append 4-byte sentinel desc */
2660 if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2704 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2661 size -= 4; 2705 size -= 4;
2662#endif 2706#endif
2663 /* Workaround for potential 82544 hang in PCI-X. 2707 /* Workaround for potential 82544 hang in PCI-X.
2664 * Avoid terminating buffers within evenly-aligned 2708 * Avoid terminating buffers within evenly-aligned
2665 * dwords. */ 2709 * dwords. */
2666 if(unlikely(adapter->pcix_82544 && 2710 if (unlikely(adapter->pcix_82544 &&
2667 !((unsigned long)(frag->page+offset+size-1) & 4) && 2711 !((unsigned long)(frag->page+offset+size-1) & 4) &&
2668 size > 4)) 2712 size > 4))
2669 size -= 4; 2713 size -= 4;
@@ -2680,7 +2724,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2680 len -= size; 2724 len -= size;
2681 offset += size; 2725 offset += size;
2682 count++; 2726 count++;
2683 if(unlikely(++i == tx_ring->count)) i = 0; 2727 if (unlikely(++i == tx_ring->count)) i = 0;
2684 } 2728 }
2685 } 2729 }
2686 2730
@@ -2700,35 +2744,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2700 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2744 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2701 unsigned int i; 2745 unsigned int i;
2702 2746
2703 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2747 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2704 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2748 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2705 E1000_TXD_CMD_TSE; 2749 E1000_TXD_CMD_TSE;
2706 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2750 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2707 2751
2708 if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2752 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2709 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2753 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2710 } 2754 }
2711 2755
2712 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2756 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2713 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2757 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2714 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2758 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2715 } 2759 }
2716 2760
2717 if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2761 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2718 txd_lower |= E1000_TXD_CMD_VLE; 2762 txd_lower |= E1000_TXD_CMD_VLE;
2719 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2763 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2720 } 2764 }
2721 2765
2722 i = tx_ring->next_to_use; 2766 i = tx_ring->next_to_use;
2723 2767
2724 while(count--) { 2768 while (count--) {
2725 buffer_info = &tx_ring->buffer_info[i]; 2769 buffer_info = &tx_ring->buffer_info[i];
2726 tx_desc = E1000_TX_DESC(*tx_ring, i); 2770 tx_desc = E1000_TX_DESC(*tx_ring, i);
2727 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 2771 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2728 tx_desc->lower.data = 2772 tx_desc->lower.data =
2729 cpu_to_le32(txd_lower | buffer_info->length); 2773 cpu_to_le32(txd_lower | buffer_info->length);
2730 tx_desc->upper.data = cpu_to_le32(txd_upper); 2774 tx_desc->upper.data = cpu_to_le32(txd_upper);
2731 if(unlikely(++i == tx_ring->count)) i = 0; 2775 if (unlikely(++i == tx_ring->count)) i = 0;
2732 } 2776 }
2733 2777
2734 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 2778 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -2763,20 +2807,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
2763 2807
2764 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); 2808 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
2765 2809
2766 if(adapter->link_duplex != HALF_DUPLEX) 2810 if (adapter->link_duplex != HALF_DUPLEX)
2767 goto no_fifo_stall_required; 2811 goto no_fifo_stall_required;
2768 2812
2769 if(atomic_read(&adapter->tx_fifo_stall)) 2813 if (atomic_read(&adapter->tx_fifo_stall))
2770 return 1; 2814 return 1;
2771 2815
2772 if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 2816 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
2773 atomic_set(&adapter->tx_fifo_stall, 1); 2817 atomic_set(&adapter->tx_fifo_stall, 1);
2774 return 1; 2818 return 1;
2775 } 2819 }
2776 2820
2777no_fifo_stall_required: 2821no_fifo_stall_required:
2778 adapter->tx_fifo_head += skb_fifo_len; 2822 adapter->tx_fifo_head += skb_fifo_len;
2779 if(adapter->tx_fifo_head >= adapter->tx_fifo_size) 2823 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
2780 adapter->tx_fifo_head -= adapter->tx_fifo_size; 2824 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2781 return 0; 2825 return 0;
2782} 2826}
@@ -2787,27 +2831,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2787{ 2831{
2788 struct e1000_hw *hw = &adapter->hw; 2832 struct e1000_hw *hw = &adapter->hw;
2789 uint16_t length, offset; 2833 uint16_t length, offset;
2790 if(vlan_tx_tag_present(skb)) { 2834 if (vlan_tx_tag_present(skb)) {
2791 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 2835 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2792 ( adapter->hw.mng_cookie.status & 2836 ( adapter->hw.mng_cookie.status &
2793 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 2837 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2794 return 0; 2838 return 0;
2795 } 2839 }
2796 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { 2840 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2797 struct ethhdr *eth = (struct ethhdr *) skb->data; 2841 struct ethhdr *eth = (struct ethhdr *) skb->data;
2798 if((htons(ETH_P_IP) == eth->h_proto)) { 2842 if ((htons(ETH_P_IP) == eth->h_proto)) {
2799 const struct iphdr *ip = 2843 const struct iphdr *ip =
2800 (struct iphdr *)((uint8_t *)skb->data+14); 2844 (struct iphdr *)((uint8_t *)skb->data+14);
2801 if(IPPROTO_UDP == ip->protocol) { 2845 if (IPPROTO_UDP == ip->protocol) {
2802 struct udphdr *udp = 2846 struct udphdr *udp =
2803 (struct udphdr *)((uint8_t *)ip + 2847 (struct udphdr *)((uint8_t *)ip +
2804 (ip->ihl << 2)); 2848 (ip->ihl << 2));
2805 if(ntohs(udp->dest) == 67) { 2849 if (ntohs(udp->dest) == 67) {
2806 offset = (uint8_t *)udp + 8 - skb->data; 2850 offset = (uint8_t *)udp + 8 - skb->data;
2807 length = skb->len - offset; 2851 length = skb->len - offset;
2808 2852
2809 return e1000_mng_write_dhcp_info(hw, 2853 return e1000_mng_write_dhcp_info(hw,
2810 (uint8_t *)udp + 8, 2854 (uint8_t *)udp + 8,
2811 length); 2855 length);
2812 } 2856 }
2813 } 2857 }
@@ -2830,7 +2874,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2830 unsigned int nr_frags = 0; 2874 unsigned int nr_frags = 0;
2831 unsigned int mss = 0; 2875 unsigned int mss = 0;
2832 int count = 0; 2876 int count = 0;
2833 int tso; 2877 int tso;
2834 unsigned int f; 2878 unsigned int f;
2835 len -= skb->data_len; 2879 len -= skb->data_len;
2836 2880
@@ -2853,7 +2897,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2853 * 4 = ceil(buffer len/mss). To make sure we don't 2897 * 4 = ceil(buffer len/mss). To make sure we don't
2854 * overrun the FIFO, adjust the max buffer len if mss 2898 * overrun the FIFO, adjust the max buffer len if mss
2855 * drops. */ 2899 * drops. */
2856 if(mss) { 2900 if (mss) {
2857 uint8_t hdr_len; 2901 uint8_t hdr_len;
2858 max_per_txd = min(mss << 2, max_per_txd); 2902 max_per_txd = min(mss << 2, max_per_txd);
2859 max_txd_pwr = fls(max_per_txd) - 1; 2903 max_txd_pwr = fls(max_per_txd) - 1;
@@ -2876,12 +2920,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2876 } 2920 }
2877 } 2921 }
2878 2922
2879 if((mss) || (skb->ip_summed == CHECKSUM_HW))
2880 /* reserve a descriptor for the offload context */ 2923 /* reserve a descriptor for the offload context */
2924 if ((mss) || (skb->ip_summed == CHECKSUM_HW))
2881 count++; 2925 count++;
2882 count++; 2926 count++;
2883#else 2927#else
2884 if(skb->ip_summed == CHECKSUM_HW) 2928 if (skb->ip_summed == CHECKSUM_HW)
2885 count++; 2929 count++;
2886#endif 2930#endif
2887 2931
@@ -2894,24 +2938,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2894 2938
2895 count += TXD_USE_COUNT(len, max_txd_pwr); 2939 count += TXD_USE_COUNT(len, max_txd_pwr);
2896 2940
2897 if(adapter->pcix_82544) 2941 if (adapter->pcix_82544)
2898 count++; 2942 count++;
2899 2943
2900 /* work-around for errata 10 and it applies to all controllers 2944 /* work-around for errata 10 and it applies to all controllers
2901 * in PCI-X mode, so add one more descriptor to the count 2945 * in PCI-X mode, so add one more descriptor to the count
2902 */ 2946 */
2903 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2947 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2904 (len > 2015))) 2948 (len > 2015)))
2905 count++; 2949 count++;
2906 2950
2907 nr_frags = skb_shinfo(skb)->nr_frags; 2951 nr_frags = skb_shinfo(skb)->nr_frags;
2908 for(f = 0; f < nr_frags; f++) 2952 for (f = 0; f < nr_frags; f++)
2909 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 2953 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
2910 max_txd_pwr); 2954 max_txd_pwr);
2911 if(adapter->pcix_82544) 2955 if (adapter->pcix_82544)
2912 count += nr_frags; 2956 count += nr_frags;
2913 2957
2914 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2958 if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2915 e1000_transfer_dhcp_info(adapter, skb); 2959 e1000_transfer_dhcp_info(adapter, skb);
2916 2960
2917 local_irq_save(flags); 2961 local_irq_save(flags);
@@ -2929,8 +2973,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2929 return NETDEV_TX_BUSY; 2973 return NETDEV_TX_BUSY;
2930 } 2974 }
2931 2975
2932 if(unlikely(adapter->hw.mac_type == e1000_82547)) { 2976 if (unlikely(adapter->hw.mac_type == e1000_82547)) {
2933 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 2977 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2934 netif_stop_queue(netdev); 2978 netif_stop_queue(netdev);
2935 mod_timer(&adapter->tx_fifo_stall_timer, jiffies); 2979 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2936 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 2980 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
@@ -2938,13 +2982,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2938 } 2982 }
2939 } 2983 }
2940 2984
2941 if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 2985 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
2942 tx_flags |= E1000_TX_FLAGS_VLAN; 2986 tx_flags |= E1000_TX_FLAGS_VLAN;
2943 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 2987 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2944 } 2988 }
2945 2989
2946 first = tx_ring->next_to_use; 2990 first = tx_ring->next_to_use;
2947 2991
2948 tso = e1000_tso(adapter, tx_ring, skb); 2992 tso = e1000_tso(adapter, tx_ring, skb);
2949 if (tso < 0) { 2993 if (tso < 0) {
2950 dev_kfree_skb_any(skb); 2994 dev_kfree_skb_any(skb);
@@ -3033,9 +3077,9 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3033 struct e1000_adapter *adapter = netdev_priv(netdev); 3077 struct e1000_adapter *adapter = netdev_priv(netdev);
3034 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3078 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3035 3079
3036 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3080 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3037 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3081 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3038 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3082 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
3039 return -EINVAL; 3083 return -EINVAL;
3040 } 3084 }
3041 3085
@@ -3083,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3083 3127
3084 netdev->mtu = new_mtu; 3128 netdev->mtu = new_mtu;
3085 3129
3086 if(netif_running(netdev)) { 3130 if (netif_running(netdev)) {
3087 e1000_down(adapter); 3131 e1000_down(adapter);
3088 e1000_up(adapter); 3132 e1000_up(adapter);
3089 } 3133 }
@@ -3170,7 +3214,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3170 hw->collision_delta = E1000_READ_REG(hw, COLC); 3214 hw->collision_delta = E1000_READ_REG(hw, COLC);
3171 adapter->stats.colc += hw->collision_delta; 3215 adapter->stats.colc += hw->collision_delta;
3172 3216
3173 if(hw->mac_type >= e1000_82543) { 3217 if (hw->mac_type >= e1000_82543) {
3174 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); 3218 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
3175 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); 3219 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
3176 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); 3220 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
@@ -3178,7 +3222,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3178 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 3222 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
3179 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 3223 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
3180 } 3224 }
3181 if(hw->mac_type > e1000_82547_rev_2) { 3225 if (hw->mac_type > e1000_82547_rev_2) {
3182 adapter->stats.iac += E1000_READ_REG(hw, IAC); 3226 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3183 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 3227 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3184 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 3228 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
@@ -3222,14 +3266,14 @@ e1000_update_stats(struct e1000_adapter *adapter)
3222 3266
3223 /* Phy Stats */ 3267 /* Phy Stats */
3224 3268
3225 if(hw->media_type == e1000_media_type_copper) { 3269 if (hw->media_type == e1000_media_type_copper) {
3226 if((adapter->link_speed == SPEED_1000) && 3270 if ((adapter->link_speed == SPEED_1000) &&
3227 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3271 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3228 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3272 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3229 adapter->phy_stats.idle_errors += phy_tmp; 3273 adapter->phy_stats.idle_errors += phy_tmp;
3230 } 3274 }
3231 3275
3232 if((hw->mac_type <= e1000_82546) && 3276 if ((hw->mac_type <= e1000_82546) &&
3233 (hw->phy_type == e1000_phy_m88) && 3277 (hw->phy_type == e1000_phy_m88) &&
3234 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3278 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3235 adapter->phy_stats.receive_errors += phy_tmp; 3279 adapter->phy_stats.receive_errors += phy_tmp;
@@ -3294,7 +3338,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3294 return IRQ_NONE; /* Not our interrupt */ 3338 return IRQ_NONE; /* Not our interrupt */
3295 } 3339 }
3296 3340
3297 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3341 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3298 hw->get_link_status = 1; 3342 hw->get_link_status = 1;
3299 mod_timer(&adapter->watchdog_timer, jiffies); 3343 mod_timer(&adapter->watchdog_timer, jiffies);
3300 } 3344 }
@@ -3326,26 +3370,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3326 3370
3327#else /* if !CONFIG_E1000_NAPI */ 3371#else /* if !CONFIG_E1000_NAPI */
3328 /* Writing IMC and IMS is needed for 82547. 3372 /* Writing IMC and IMS is needed for 82547.
3329 Due to Hub Link bus being occupied, an interrupt 3373 * Due to Hub Link bus being occupied, an interrupt
3330 de-assertion message is not able to be sent. 3374 * de-assertion message is not able to be sent.
3331 When an interrupt assertion message is generated later, 3375 * When an interrupt assertion message is generated later,
3332 two messages are re-ordered and sent out. 3376 * two messages are re-ordered and sent out.
3333 That causes APIC to think 82547 is in de-assertion 3377 * That causes APIC to think 82547 is in de-assertion
3334 state, while 82547 is in assertion state, resulting 3378 * state, while 82547 is in assertion state, resulting
3335 in dead lock. Writing IMC forces 82547 into 3379 * in dead lock. Writing IMC forces 82547 into
3336 de-assertion state. 3380 * de-assertion state.
3337 */ 3381 */
3338 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ 3382 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
3339 atomic_inc(&adapter->irq_sem); 3383 atomic_inc(&adapter->irq_sem);
3340 E1000_WRITE_REG(hw, IMC, ~0); 3384 E1000_WRITE_REG(hw, IMC, ~0);
3341 } 3385 }
3342 3386
3343 for(i = 0; i < E1000_MAX_INTR; i++) 3387 for (i = 0; i < E1000_MAX_INTR; i++)
3344 if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3388 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3345 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3389 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3346 break; 3390 break;
3347 3391
3348 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3392 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3349 e1000_irq_enable(adapter); 3393 e1000_irq_enable(adapter);
3350 3394
3351#endif /* CONFIG_E1000_NAPI */ 3395#endif /* CONFIG_E1000_NAPI */
@@ -3397,9 +3441,9 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3397 3441
3398 *budget -= work_done; 3442 *budget -= work_done;
3399 poll_dev->quota -= work_done; 3443 poll_dev->quota -= work_done;
3400 3444
3401 /* If no Tx and not enough Rx work done, exit the polling mode */ 3445 /* If no Tx and not enough Rx work done, exit the polling mode */
3402 if((!tx_cleaned && (work_done == 0)) || 3446 if ((!tx_cleaned && (work_done == 0)) ||
3403 !netif_running(adapter->netdev)) { 3447 !netif_running(adapter->netdev)) {
3404quit_polling: 3448quit_polling:
3405 netif_rx_complete(poll_dev); 3449 netif_rx_complete(poll_dev);
@@ -3431,7 +3475,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3431 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3475 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3432 3476
3433 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3477 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3434 for(cleaned = FALSE; !cleaned; ) { 3478 for (cleaned = FALSE; !cleaned; ) {
3435 tx_desc = E1000_TX_DESC(*tx_ring, i); 3479 tx_desc = E1000_TX_DESC(*tx_ring, i);
3436 buffer_info = &tx_ring->buffer_info[i]; 3480 buffer_info = &tx_ring->buffer_info[i];
3437 cleaned = (i == eop); 3481 cleaned = (i == eop);
@@ -3442,7 +3486,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3442 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3486 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3443 memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); 3487 memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
3444 3488
3445 if(unlikely(++i == tx_ring->count)) i = 0; 3489 if (unlikely(++i == tx_ring->count)) i = 0;
3446 } 3490 }
3447 3491
3448#ifdef CONFIG_E1000_MQ 3492#ifdef CONFIG_E1000_MQ
@@ -3457,7 +3501,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3457 3501
3458 spin_lock(&tx_ring->tx_lock); 3502 spin_lock(&tx_ring->tx_lock);
3459 3503
3460 if(unlikely(cleaned && netif_queue_stopped(netdev) && 3504 if (unlikely(cleaned && netif_queue_stopped(netdev) &&
3461 netif_carrier_ok(netdev))) 3505 netif_carrier_ok(netdev)))
3462 netif_wake_queue(netdev); 3506 netif_wake_queue(netdev);
3463 3507
@@ -3519,21 +3563,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
3519 skb->ip_summed = CHECKSUM_NONE; 3563 skb->ip_summed = CHECKSUM_NONE;
3520 3564
3521 /* 82543 or newer only */ 3565 /* 82543 or newer only */
3522 if(unlikely(adapter->hw.mac_type < e1000_82543)) return; 3566 if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
3523 /* Ignore Checksum bit is set */ 3567 /* Ignore Checksum bit is set */
3524 if(unlikely(status & E1000_RXD_STAT_IXSM)) return; 3568 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3525 /* TCP/UDP checksum error bit is set */ 3569 /* TCP/UDP checksum error bit is set */
3526 if(unlikely(errors & E1000_RXD_ERR_TCPE)) { 3570 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3527 /* let the stack verify checksum errors */ 3571 /* let the stack verify checksum errors */
3528 adapter->hw_csum_err++; 3572 adapter->hw_csum_err++;
3529 return; 3573 return;
3530 } 3574 }
3531 /* TCP/UDP Checksum has not been calculated */ 3575 /* TCP/UDP Checksum has not been calculated */
3532 if(adapter->hw.mac_type <= e1000_82547_rev_2) { 3576 if (adapter->hw.mac_type <= e1000_82547_rev_2) {
3533 if(!(status & E1000_RXD_STAT_TCPCS)) 3577 if (!(status & E1000_RXD_STAT_TCPCS))
3534 return; 3578 return;
3535 } else { 3579 } else {
3536 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 3580 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
3537 return; 3581 return;
3538 } 3582 }
3539 /* It must be a TCP or UDP packet with a valid checksum */ 3583 /* It must be a TCP or UDP packet with a valid checksum */
@@ -3569,9 +3613,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3569{ 3613{
3570 struct net_device *netdev = adapter->netdev; 3614 struct net_device *netdev = adapter->netdev;
3571 struct pci_dev *pdev = adapter->pdev; 3615 struct pci_dev *pdev = adapter->pdev;
3572 struct e1000_rx_desc *rx_desc; 3616 struct e1000_rx_desc *rx_desc, *next_rxd;
3573 struct e1000_buffer *buffer_info; 3617 struct e1000_buffer *buffer_info, *next_buffer;
3574 struct sk_buff *skb;
3575 unsigned long flags; 3618 unsigned long flags;
3576 uint32_t length; 3619 uint32_t length;
3577 uint8_t last_byte; 3620 uint8_t last_byte;
@@ -3581,16 +3624,25 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3581 3624
3582 i = rx_ring->next_to_clean; 3625 i = rx_ring->next_to_clean;
3583 rx_desc = E1000_RX_DESC(*rx_ring, i); 3626 rx_desc = E1000_RX_DESC(*rx_ring, i);
3627 buffer_info = &rx_ring->buffer_info[i];
3584 3628
3585 while(rx_desc->status & E1000_RXD_STAT_DD) { 3629 while (rx_desc->status & E1000_RXD_STAT_DD) {
3586 buffer_info = &rx_ring->buffer_info[i]; 3630 struct sk_buff *skb, *next_skb;
3587 u8 status; 3631 u8 status;
3588#ifdef CONFIG_E1000_NAPI 3632#ifdef CONFIG_E1000_NAPI
3589 if(*work_done >= work_to_do) 3633 if (*work_done >= work_to_do)
3590 break; 3634 break;
3591 (*work_done)++; 3635 (*work_done)++;
3592#endif 3636#endif
3593 status = rx_desc->status; 3637 status = rx_desc->status;
3638 skb = buffer_info->skb;
3639 buffer_info->skb = NULL;
3640
3641 if (++i == rx_ring->count) i = 0;
3642 next_rxd = E1000_RX_DESC(*rx_ring, i);
3643 next_buffer = &rx_ring->buffer_info[i];
3644 next_skb = next_buffer->skb;
3645
3594 cleaned = TRUE; 3646 cleaned = TRUE;
3595 cleaned_count++; 3647 cleaned_count++;
3596 pci_unmap_single(pdev, 3648 pci_unmap_single(pdev,
@@ -3598,20 +3650,50 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3598 buffer_info->length, 3650 buffer_info->length,
3599 PCI_DMA_FROMDEVICE); 3651 PCI_DMA_FROMDEVICE);
3600 3652
3601 skb = buffer_info->skb;
3602 length = le16_to_cpu(rx_desc->length); 3653 length = le16_to_cpu(rx_desc->length);
3603 3654
3604 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { 3655 skb_put(skb, length);
3605 /* All receives must fit into a single buffer */ 3656
3606 E1000_DBG("%s: Receive packet consumed multiple" 3657 if (!(status & E1000_RXD_STAT_EOP)) {
3607 " buffers\n", netdev->name); 3658 if (!rx_ring->rx_skb_top) {
3608 dev_kfree_skb_irq(skb); 3659 rx_ring->rx_skb_top = skb;
3660 rx_ring->rx_skb_top->len = length;
3661 rx_ring->rx_skb_prev = skb;
3662 } else {
3663 if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
3664 rx_ring->rx_skb_prev->next = skb;
3665 skb->prev = rx_ring->rx_skb_prev;
3666 } else {
3667 skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
3668 }
3669 rx_ring->rx_skb_prev = skb;
3670 rx_ring->rx_skb_top->data_len += length;
3671 }
3609 goto next_desc; 3672 goto next_desc;
3673 } else {
3674 if (rx_ring->rx_skb_top) {
3675 if (skb_shinfo(rx_ring->rx_skb_top)
3676 ->frag_list) {
3677 rx_ring->rx_skb_prev->next = skb;
3678 skb->prev = rx_ring->rx_skb_prev;
3679 } else
3680 skb_shinfo(rx_ring->rx_skb_top)
3681 ->frag_list = skb;
3682
3683 rx_ring->rx_skb_top->data_len += length;
3684 rx_ring->rx_skb_top->len +=
3685 rx_ring->rx_skb_top->data_len;
3686
3687 skb = rx_ring->rx_skb_top;
3688 multi_descriptor = TRUE;
3689 rx_ring->rx_skb_top = NULL;
3690 rx_ring->rx_skb_prev = NULL;
3691 }
3610 } 3692 }
3611 3693
3612 if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 3694 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
3613 last_byte = *(skb->data + length - 1); 3695 last_byte = *(skb->data + length - 1);
3614 if(TBI_ACCEPT(&adapter->hw, rx_desc->status, 3696 if (TBI_ACCEPT(&adapter->hw, status,
3615 rx_desc->errors, length, last_byte)) { 3697 rx_desc->errors, length, last_byte)) {
3616 spin_lock_irqsave(&adapter->stats_lock, flags); 3698 spin_lock_irqsave(&adapter->stats_lock, flags);
3617 e1000_tbi_adjust_stats(&adapter->hw, 3699 e1000_tbi_adjust_stats(&adapter->hw,
@@ -3656,9 +3738,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3656 (uint32_t)(status) | 3738 (uint32_t)(status) |
3657 ((uint32_t)(rx_desc->errors) << 24), 3739 ((uint32_t)(rx_desc->errors) << 24),
3658 rx_desc->csum, skb); 3740 rx_desc->csum, skb);
3741
3659 skb->protocol = eth_type_trans(skb, netdev); 3742 skb->protocol = eth_type_trans(skb, netdev);
3660#ifdef CONFIG_E1000_NAPI 3743#ifdef CONFIG_E1000_NAPI
3661 if(unlikely(adapter->vlgrp && 3744 if (unlikely(adapter->vlgrp &&
3662 (status & E1000_RXD_STAT_VP))) { 3745 (status & E1000_RXD_STAT_VP))) {
3663 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3746 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3664 le16_to_cpu(rx_desc->special) & 3747 le16_to_cpu(rx_desc->special) &
@@ -3667,8 +3750,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3667 netif_receive_skb(skb); 3750 netif_receive_skb(skb);
3668 } 3751 }
3669#else /* CONFIG_E1000_NAPI */ 3752#else /* CONFIG_E1000_NAPI */
3670 if(unlikely(adapter->vlgrp && 3753 if (unlikely(adapter->vlgrp &&
3671 (rx_desc->status & E1000_RXD_STAT_VP))) { 3754 (status & E1000_RXD_STAT_VP))) {
3672 vlan_hwaccel_rx(skb, adapter->vlgrp, 3755 vlan_hwaccel_rx(skb, adapter->vlgrp,
3673 le16_to_cpu(rx_desc->special) & 3756 le16_to_cpu(rx_desc->special) &
3674 E1000_RXD_SPC_VLAN_MASK); 3757 E1000_RXD_SPC_VLAN_MASK);
@@ -3691,6 +3774,8 @@ next_desc:
3691 cleaned_count = 0; 3774 cleaned_count = 0;
3692 } 3775 }
3693 3776
3777 rx_desc = next_rxd;
3778 buffer_info = next_buffer;
3694 } 3779 }
3695 rx_ring->next_to_clean = i; 3780 rx_ring->next_to_clean = i;
3696 3781
@@ -3716,13 +3801,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3716 struct e1000_rx_ring *rx_ring) 3801 struct e1000_rx_ring *rx_ring)
3717#endif 3802#endif
3718{ 3803{
3719 union e1000_rx_desc_packet_split *rx_desc; 3804 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
3720 struct net_device *netdev = adapter->netdev; 3805 struct net_device *netdev = adapter->netdev;
3721 struct pci_dev *pdev = adapter->pdev; 3806 struct pci_dev *pdev = adapter->pdev;
3722 struct e1000_buffer *buffer_info; 3807 struct e1000_buffer *buffer_info, *next_buffer;
3723 struct e1000_ps_page *ps_page; 3808 struct e1000_ps_page *ps_page;
3724 struct e1000_ps_page_dma *ps_page_dma; 3809 struct e1000_ps_page_dma *ps_page_dma;
3725 struct sk_buff *skb; 3810 struct sk_buff *skb, *next_skb;
3726 unsigned int i, j; 3811 unsigned int i, j;
3727 uint32_t length, staterr; 3812 uint32_t length, staterr;
3728 int cleaned_count = 0; 3813 int cleaned_count = 0;
@@ -3731,39 +3816,44 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3731 i = rx_ring->next_to_clean; 3816 i = rx_ring->next_to_clean;
3732 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3817 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3733 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3818 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3819 buffer_info = &rx_ring->buffer_info[i];
3734 3820
3735 while(staterr & E1000_RXD_STAT_DD) { 3821 while (staterr & E1000_RXD_STAT_DD) {
3736 buffer_info = &rx_ring->buffer_info[i];
3737 ps_page = &rx_ring->ps_page[i]; 3822 ps_page = &rx_ring->ps_page[i];
3738 ps_page_dma = &rx_ring->ps_page_dma[i]; 3823 ps_page_dma = &rx_ring->ps_page_dma[i];
3739#ifdef CONFIG_E1000_NAPI 3824#ifdef CONFIG_E1000_NAPI
3740 if(unlikely(*work_done >= work_to_do)) 3825 if (unlikely(*work_done >= work_to_do))
3741 break; 3826 break;
3742 (*work_done)++; 3827 (*work_done)++;
3743#endif 3828#endif
3829 skb = buffer_info->skb;
3830
3831 if (++i == rx_ring->count) i = 0;
3832 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
3833 next_buffer = &rx_ring->buffer_info[i];
3834 next_skb = next_buffer->skb;
3835
3744 cleaned = TRUE; 3836 cleaned = TRUE;
3745 cleaned_count++; 3837 cleaned_count++;
3746 pci_unmap_single(pdev, buffer_info->dma, 3838 pci_unmap_single(pdev, buffer_info->dma,
3747 buffer_info->length, 3839 buffer_info->length,
3748 PCI_DMA_FROMDEVICE); 3840 PCI_DMA_FROMDEVICE);
3749 3841
3750 skb = buffer_info->skb; 3842 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3751
3752 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3753 E1000_DBG("%s: Packet Split buffers didn't pick up" 3843 E1000_DBG("%s: Packet Split buffers didn't pick up"
3754 " the full packet\n", netdev->name); 3844 " the full packet\n", netdev->name);
3755 dev_kfree_skb_irq(skb); 3845 dev_kfree_skb_irq(skb);
3756 goto next_desc; 3846 goto next_desc;
3757 } 3847 }
3758 3848
3759 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3849 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3760 dev_kfree_skb_irq(skb); 3850 dev_kfree_skb_irq(skb);
3761 goto next_desc; 3851 goto next_desc;
3762 } 3852 }
3763 3853
3764 length = le16_to_cpu(rx_desc->wb.middle.length0); 3854 length = le16_to_cpu(rx_desc->wb.middle.length0);
3765 3855
3766 if(unlikely(!length)) { 3856 if (unlikely(!length)) {
3767 E1000_DBG("%s: Last part of the packet spanning" 3857 E1000_DBG("%s: Last part of the packet spanning"
3768 " multiple descriptors\n", netdev->name); 3858 " multiple descriptors\n", netdev->name);
3769 dev_kfree_skb_irq(skb); 3859 dev_kfree_skb_irq(skb);
@@ -3773,8 +3863,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3773 /* Good Receive */ 3863 /* Good Receive */
3774 skb_put(skb, length); 3864 skb_put(skb, length);
3775 3865
3776 for(j = 0; j < adapter->rx_ps_pages; j++) { 3866 for (j = 0; j < adapter->rx_ps_pages; j++) {
3777 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 3867 if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3778 break; 3868 break;
3779 3869
3780 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], 3870 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
@@ -3794,15 +3884,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3794 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 3884 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3795 skb->protocol = eth_type_trans(skb, netdev); 3885 skb->protocol = eth_type_trans(skb, netdev);
3796 3886
3797 if(likely(rx_desc->wb.upper.header_status & 3887 if (likely(rx_desc->wb.upper.header_status &
3798 E1000_RXDPS_HDRSTAT_HDRSP)) { 3888 E1000_RXDPS_HDRSTAT_HDRSP))
3799 adapter->rx_hdr_split++; 3889 adapter->rx_hdr_split++;
3800#ifdef HAVE_RX_ZERO_COPY
3801 skb_shinfo(skb)->zero_copy = TRUE;
3802#endif
3803 }
3804#ifdef CONFIG_E1000_NAPI 3890#ifdef CONFIG_E1000_NAPI
3805 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3891 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3806 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3892 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3807 le16_to_cpu(rx_desc->wb.middle.vlan) & 3893 le16_to_cpu(rx_desc->wb.middle.vlan) &
3808 E1000_RXD_SPC_VLAN_MASK); 3894 E1000_RXD_SPC_VLAN_MASK);
@@ -3810,7 +3896,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3810 netif_receive_skb(skb); 3896 netif_receive_skb(skb);
3811 } 3897 }
3812#else /* CONFIG_E1000_NAPI */ 3898#else /* CONFIG_E1000_NAPI */
3813 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3899 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3814 vlan_hwaccel_rx(skb, adapter->vlgrp, 3900 vlan_hwaccel_rx(skb, adapter->vlgrp,
3815 le16_to_cpu(rx_desc->wb.middle.vlan) & 3901 le16_to_cpu(rx_desc->wb.middle.vlan) &
3816 E1000_RXD_SPC_VLAN_MASK); 3902 E1000_RXD_SPC_VLAN_MASK);
@@ -3834,6 +3920,9 @@ next_desc:
3834 cleaned_count = 0; 3920 cleaned_count = 0;
3835 } 3921 }
3836 3922
3923 rx_desc = next_rxd;
3924 buffer_info = next_buffer;
3925
3837 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3926 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3838 } 3927 }
3839 rx_ring->next_to_clean = i; 3928 rx_ring->next_to_clean = i;
@@ -3875,7 +3964,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3875 } 3964 }
3876 3965
3877 3966
3878 if(unlikely(!skb)) { 3967 if (unlikely(!skb)) {
3879 /* Better luck next round */ 3968 /* Better luck next round */
3880 adapter->alloc_rx_buff_failed++; 3969 adapter->alloc_rx_buff_failed++;
3881 break; 3970 break;
@@ -3940,20 +4029,23 @@ map_skb:
3940 rx_desc = E1000_RX_DESC(*rx_ring, i); 4029 rx_desc = E1000_RX_DESC(*rx_ring, i);
3941 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4030 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3942 4031
3943 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 4032 if (unlikely(++i == rx_ring->count))
3944 /* Force memory writes to complete before letting h/w 4033 i = 0;
3945 * know there are new descriptors to fetch. (Only
3946 * applicable for weak-ordered memory model archs,
3947 * such as IA-64). */
3948 wmb();
3949 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
3950 }
3951
3952 if(unlikely(++i == rx_ring->count)) i = 0;
3953 buffer_info = &rx_ring->buffer_info[i]; 4034 buffer_info = &rx_ring->buffer_info[i];
3954 } 4035 }
3955 4036
3956 rx_ring->next_to_use = i; 4037 if (likely(rx_ring->next_to_use != i)) {
4038 rx_ring->next_to_use = i;
4039 if (unlikely(i-- == 0))
4040 i = (rx_ring->count - 1);
4041
4042 /* Force memory writes to complete before letting h/w
4043 * know there are new descriptors to fetch. (Only
4044 * applicable for weak-ordered memory model archs,
4045 * such as IA-64). */
4046 wmb();
4047 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4048 }
3957} 4049}
3958 4050
3959/** 4051/**
@@ -3983,13 +4075,15 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3983 while (cleaned_count--) { 4075 while (cleaned_count--) {
3984 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 4076 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3985 4077
3986 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 4078 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
3987 if (j < adapter->rx_ps_pages) { 4079 if (j < adapter->rx_ps_pages) {
3988 if (likely(!ps_page->ps_page[j])) { 4080 if (likely(!ps_page->ps_page[j])) {
3989 ps_page->ps_page[j] = 4081 ps_page->ps_page[j] =
3990 alloc_page(GFP_ATOMIC); 4082 alloc_page(GFP_ATOMIC);
3991 if (unlikely(!ps_page->ps_page[j])) 4083 if (unlikely(!ps_page->ps_page[j])) {
4084 adapter->alloc_rx_buff_failed++;
3992 goto no_buffers; 4085 goto no_buffers;
4086 }
3993 ps_page_dma->ps_page_dma[j] = 4087 ps_page_dma->ps_page_dma[j] =
3994 pci_map_page(pdev, 4088 pci_map_page(pdev,
3995 ps_page->ps_page[j], 4089 ps_page->ps_page[j],
@@ -3997,7 +4091,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3997 PCI_DMA_FROMDEVICE); 4091 PCI_DMA_FROMDEVICE);
3998 } 4092 }
3999 /* Refresh the desc even if buffer_addrs didn't 4093 /* Refresh the desc even if buffer_addrs didn't
4000 * change because each write-back erases 4094 * change because each write-back erases
4001 * this info. 4095 * this info.
4002 */ 4096 */
4003 rx_desc->read.buffer_addr[j+1] = 4097 rx_desc->read.buffer_addr[j+1] =
@@ -4008,8 +4102,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4008 4102
4009 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4103 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4010 4104
4011 if(unlikely(!skb)) 4105 if (unlikely(!skb)) {
4106 adapter->alloc_rx_buff_failed++;
4012 break; 4107 break;
4108 }
4013 4109
4014 /* Make buffer alignment 2 beyond a 16 byte boundary 4110 /* Make buffer alignment 2 beyond a 16 byte boundary
4015 * this will result in a 16 byte aligned IP header after 4111 * this will result in a 16 byte aligned IP header after
@@ -4027,27 +4123,28 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4027 4123
4028 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 4124 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4029 4125
4030 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 4126 if (unlikely(++i == rx_ring->count)) i = 0;
4031 /* Force memory writes to complete before letting h/w
4032 * know there are new descriptors to fetch. (Only
4033 * applicable for weak-ordered memory model archs,
4034 * such as IA-64). */
4035 wmb();
4036 /* Hardware increments by 16 bytes, but packet split
4037 * descriptors are 32 bytes...so we increment tail
4038 * twice as much.
4039 */
4040 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4041 }
4042
4043 if(unlikely(++i == rx_ring->count)) i = 0;
4044 buffer_info = &rx_ring->buffer_info[i]; 4127 buffer_info = &rx_ring->buffer_info[i];
4045 ps_page = &rx_ring->ps_page[i]; 4128 ps_page = &rx_ring->ps_page[i];
4046 ps_page_dma = &rx_ring->ps_page_dma[i]; 4129 ps_page_dma = &rx_ring->ps_page_dma[i];
4047 } 4130 }
4048 4131
4049no_buffers: 4132no_buffers:
4050 rx_ring->next_to_use = i; 4133 if (likely(rx_ring->next_to_use != i)) {
4134 rx_ring->next_to_use = i;
4135 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4136
4137 /* Force memory writes to complete before letting h/w
4138 * know there are new descriptors to fetch. (Only
4139 * applicable for weak-ordered memory model archs,
4140 * such as IA-64). */
4141 wmb();
4142 /* Hardware increments by 16 bytes, but packet split
4143 * descriptors are 32 bytes...so we increment tail
4144 * twice as much.
4145 */
4146 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4147 }
4051} 4148}
4052 4149
4053/** 4150/**
@@ -4061,24 +4158,24 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4061 uint16_t phy_status; 4158 uint16_t phy_status;
4062 uint16_t phy_ctrl; 4159 uint16_t phy_ctrl;
4063 4160
4064 if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || 4161 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
4065 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 4162 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
4066 return; 4163 return;
4067 4164
4068 if(adapter->smartspeed == 0) { 4165 if (adapter->smartspeed == 0) {
4069 /* If Master/Slave config fault is asserted twice, 4166 /* If Master/Slave config fault is asserted twice,
4070 * we assume back-to-back */ 4167 * we assume back-to-back */
4071 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4168 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4072 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4169 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4073 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4170 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4074 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4171 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4075 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4172 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4076 if(phy_ctrl & CR_1000T_MS_ENABLE) { 4173 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4077 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4174 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4078 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, 4175 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
4079 phy_ctrl); 4176 phy_ctrl);
4080 adapter->smartspeed++; 4177 adapter->smartspeed++;
4081 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4178 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4082 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, 4179 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
4083 &phy_ctrl)) { 4180 &phy_ctrl)) {
4084 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4181 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
@@ -4088,12 +4185,12 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4088 } 4185 }
4089 } 4186 }
4090 return; 4187 return;
4091 } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4188 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4092 /* If still no link, perhaps using 2/3 pair cable */ 4189 /* If still no link, perhaps using 2/3 pair cable */
4093 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4190 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4094 phy_ctrl |= CR_1000T_MS_ENABLE; 4191 phy_ctrl |= CR_1000T_MS_ENABLE;
4095 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); 4192 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
4096 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4193 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4097 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { 4194 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
4098 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4195 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4099 MII_CR_RESTART_AUTO_NEG); 4196 MII_CR_RESTART_AUTO_NEG);
@@ -4101,7 +4198,7 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4101 } 4198 }
4102 } 4199 }
4103 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4200 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4104 if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4201 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4105 adapter->smartspeed = 0; 4202 adapter->smartspeed = 0;
4106} 4203}
4107 4204
@@ -4142,7 +4239,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4142 uint16_t spddplx; 4239 uint16_t spddplx;
4143 unsigned long flags; 4240 unsigned long flags;
4144 4241
4145 if(adapter->hw.media_type != e1000_media_type_copper) 4242 if (adapter->hw.media_type != e1000_media_type_copper)
4146 return -EOPNOTSUPP; 4243 return -EOPNOTSUPP;
4147 4244
4148 switch (cmd) { 4245 switch (cmd) {
@@ -4150,10 +4247,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4150 data->phy_id = adapter->hw.phy_addr; 4247 data->phy_id = adapter->hw.phy_addr;
4151 break; 4248 break;
4152 case SIOCGMIIREG: 4249 case SIOCGMIIREG:
4153 if(!capable(CAP_NET_ADMIN)) 4250 if (!capable(CAP_NET_ADMIN))
4154 return -EPERM; 4251 return -EPERM;
4155 spin_lock_irqsave(&adapter->stats_lock, flags); 4252 spin_lock_irqsave(&adapter->stats_lock, flags);
4156 if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 4253 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4157 &data->val_out)) { 4254 &data->val_out)) {
4158 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4255 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4159 return -EIO; 4256 return -EIO;
@@ -4161,23 +4258,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4161 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4258 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4162 break; 4259 break;
4163 case SIOCSMIIREG: 4260 case SIOCSMIIREG:
4164 if(!capable(CAP_NET_ADMIN)) 4261 if (!capable(CAP_NET_ADMIN))
4165 return -EPERM; 4262 return -EPERM;
4166 if(data->reg_num & ~(0x1F)) 4263 if (data->reg_num & ~(0x1F))
4167 return -EFAULT; 4264 return -EFAULT;
4168 mii_reg = data->val_in; 4265 mii_reg = data->val_in;
4169 spin_lock_irqsave(&adapter->stats_lock, flags); 4266 spin_lock_irqsave(&adapter->stats_lock, flags);
4170 if(e1000_write_phy_reg(&adapter->hw, data->reg_num, 4267 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
4171 mii_reg)) { 4268 mii_reg)) {
4172 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4269 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4173 return -EIO; 4270 return -EIO;
4174 } 4271 }
4175 if(adapter->hw.phy_type == e1000_phy_m88) { 4272 if (adapter->hw.phy_type == e1000_phy_m88) {
4176 switch (data->reg_num) { 4273 switch (data->reg_num) {
4177 case PHY_CTRL: 4274 case PHY_CTRL:
4178 if(mii_reg & MII_CR_POWER_DOWN) 4275 if (mii_reg & MII_CR_POWER_DOWN)
4179 break; 4276 break;
4180 if(mii_reg & MII_CR_AUTO_NEG_EN) { 4277 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4181 adapter->hw.autoneg = 1; 4278 adapter->hw.autoneg = 1;
4182 adapter->hw.autoneg_advertised = 0x2F; 4279 adapter->hw.autoneg_advertised = 0x2F;
4183 } else { 4280 } else {
@@ -4192,14 +4289,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4192 HALF_DUPLEX; 4289 HALF_DUPLEX;
4193 retval = e1000_set_spd_dplx(adapter, 4290 retval = e1000_set_spd_dplx(adapter,
4194 spddplx); 4291 spddplx);
4195 if(retval) { 4292 if (retval) {
4196 spin_unlock_irqrestore( 4293 spin_unlock_irqrestore(
4197 &adapter->stats_lock, 4294 &adapter->stats_lock,
4198 flags); 4295 flags);
4199 return retval; 4296 return retval;
4200 } 4297 }
4201 } 4298 }
4202 if(netif_running(adapter->netdev)) { 4299 if (netif_running(adapter->netdev)) {
4203 e1000_down(adapter); 4300 e1000_down(adapter);
4204 e1000_up(adapter); 4301 e1000_up(adapter);
4205 } else 4302 } else
@@ -4207,7 +4304,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4207 break; 4304 break;
4208 case M88E1000_PHY_SPEC_CTRL: 4305 case M88E1000_PHY_SPEC_CTRL:
4209 case M88E1000_EXT_PHY_SPEC_CTRL: 4306 case M88E1000_EXT_PHY_SPEC_CTRL:
4210 if(e1000_phy_reset(&adapter->hw)) { 4307 if (e1000_phy_reset(&adapter->hw)) {
4211 spin_unlock_irqrestore( 4308 spin_unlock_irqrestore(
4212 &adapter->stats_lock, flags); 4309 &adapter->stats_lock, flags);
4213 return -EIO; 4310 return -EIO;
@@ -4217,9 +4314,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4217 } else { 4314 } else {
4218 switch (data->reg_num) { 4315 switch (data->reg_num) {
4219 case PHY_CTRL: 4316 case PHY_CTRL:
4220 if(mii_reg & MII_CR_POWER_DOWN) 4317 if (mii_reg & MII_CR_POWER_DOWN)
4221 break; 4318 break;
4222 if(netif_running(adapter->netdev)) { 4319 if (netif_running(adapter->netdev)) {
4223 e1000_down(adapter); 4320 e1000_down(adapter);
4224 e1000_up(adapter); 4321 e1000_up(adapter);
4225 } else 4322 } else
@@ -4241,7 +4338,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
4241 struct e1000_adapter *adapter = hw->back; 4338 struct e1000_adapter *adapter = hw->back;
4242 int ret_val = pci_set_mwi(adapter->pdev); 4339 int ret_val = pci_set_mwi(adapter->pdev);
4243 4340
4244 if(ret_val) 4341 if (ret_val)
4245 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 4342 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4246} 4343}
4247 4344
@@ -4290,7 +4387,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4290 e1000_irq_disable(adapter); 4387 e1000_irq_disable(adapter);
4291 adapter->vlgrp = grp; 4388 adapter->vlgrp = grp;
4292 4389
4293 if(grp) { 4390 if (grp) {
4294 /* enable VLAN tag insert/strip */ 4391 /* enable VLAN tag insert/strip */
4295 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4392 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4296 ctrl |= E1000_CTRL_VME; 4393 ctrl |= E1000_CTRL_VME;
@@ -4312,7 +4409,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4312 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4409 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4313 rctl &= ~E1000_RCTL_VFE; 4410 rctl &= ~E1000_RCTL_VFE;
4314 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4411 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4315 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { 4412 if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
4316 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4413 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4317 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4414 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4318 } 4415 }
@@ -4326,9 +4423,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
4326{ 4423{
4327 struct e1000_adapter *adapter = netdev_priv(netdev); 4424 struct e1000_adapter *adapter = netdev_priv(netdev);
4328 uint32_t vfta, index; 4425 uint32_t vfta, index;
4329 if((adapter->hw.mng_cookie.status & 4426
4330 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4427 if ((adapter->hw.mng_cookie.status &
4331 (vid == adapter->mng_vlan_id)) 4428 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4429 (vid == adapter->mng_vlan_id))
4332 return; 4430 return;
4333 /* add VID to filter table */ 4431 /* add VID to filter table */
4334 index = (vid >> 5) & 0x7F; 4432 index = (vid >> 5) & 0x7F;
@@ -4345,13 +4443,13 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4345 4443
4346 e1000_irq_disable(adapter); 4444 e1000_irq_disable(adapter);
4347 4445
4348 if(adapter->vlgrp) 4446 if (adapter->vlgrp)
4349 adapter->vlgrp->vlan_devices[vid] = NULL; 4447 adapter->vlgrp->vlan_devices[vid] = NULL;
4350 4448
4351 e1000_irq_enable(adapter); 4449 e1000_irq_enable(adapter);
4352 4450
4353 if((adapter->hw.mng_cookie.status & 4451 if ((adapter->hw.mng_cookie.status &
4354 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4452 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4355 (vid == adapter->mng_vlan_id)) { 4453 (vid == adapter->mng_vlan_id)) {
4356 /* release control to f/w */ 4454 /* release control to f/w */
4357 e1000_release_hw_control(adapter); 4455 e1000_release_hw_control(adapter);
@@ -4370,10 +4468,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
4370{ 4468{
4371 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 4469 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4372 4470
4373 if(adapter->vlgrp) { 4471 if (adapter->vlgrp) {
4374 uint16_t vid; 4472 uint16_t vid;
4375 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 4473 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4376 if(!adapter->vlgrp->vlan_devices[vid]) 4474 if (!adapter->vlgrp->vlan_devices[vid])
4377 continue; 4475 continue;
4378 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4476 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4379 } 4477 }
@@ -4386,13 +4484,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4386 adapter->hw.autoneg = 0; 4484 adapter->hw.autoneg = 0;
4387 4485
4388 /* Fiber NICs only allow 1000 gbps Full duplex */ 4486 /* Fiber NICs only allow 1000 gbps Full duplex */
4389 if((adapter->hw.media_type == e1000_media_type_fiber) && 4487 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
4390 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4488 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4391 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4489 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4392 return -EINVAL; 4490 return -EINVAL;
4393 } 4491 }
4394 4492
4395 switch(spddplx) { 4493 switch (spddplx) {
4396 case SPEED_10 + DUPLEX_HALF: 4494 case SPEED_10 + DUPLEX_HALF:
4397 adapter->hw.forced_speed_duplex = e1000_10_half; 4495 adapter->hw.forced_speed_duplex = e1000_10_half;
4398 break; 4496 break;
@@ -4418,6 +4516,54 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4418} 4516}
4419 4517
4420#ifdef CONFIG_PM 4518#ifdef CONFIG_PM
4519/* these functions save and restore 16 or 64 dwords (64-256 bytes) of config
4520 * space versus the 64 bytes that pci_[save|restore]_state handle
4521 */
4522#define PCIE_CONFIG_SPACE_LEN 256
4523#define PCI_CONFIG_SPACE_LEN 64
4524static int
4525e1000_pci_save_state(struct e1000_adapter *adapter)
4526{
4527 struct pci_dev *dev = adapter->pdev;
4528 int size;
4529 int i;
4530 if (adapter->hw.mac_type >= e1000_82571)
4531 size = PCIE_CONFIG_SPACE_LEN;
4532 else
4533 size = PCI_CONFIG_SPACE_LEN;
4534
4535 WARN_ON(adapter->config_space != NULL);
4536
4537 adapter->config_space = kmalloc(size, GFP_KERNEL);
4538 if (!adapter->config_space) {
4539 DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
4540 return -ENOMEM;
4541 }
4542 for (i = 0; i < (size / 4); i++)
4543 pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
4544 return 0;
4545}
4546
4547static void
4548e1000_pci_restore_state(struct e1000_adapter *adapter)
4549{
4550 struct pci_dev *dev = adapter->pdev;
4551 int size;
4552 int i;
4553 if (adapter->config_space == NULL)
4554 return;
4555 if (adapter->hw.mac_type >= e1000_82571)
4556 size = PCIE_CONFIG_SPACE_LEN;
4557 else
4558 size = PCI_CONFIG_SPACE_LEN;
4559 for (i = 0; i < (size / 4); i++)
4560 pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
4561 kfree(adapter->config_space);
4562 adapter->config_space = NULL;
4563 return;
4564}
4565#endif /* CONFIG_PM */
4566
4421static int 4567static int
4422e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4568e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4423{ 4569{
@@ -4429,25 +4575,33 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4429 4575
4430 netif_device_detach(netdev); 4576 netif_device_detach(netdev);
4431 4577
4432 if(netif_running(netdev)) 4578 if (netif_running(netdev))
4433 e1000_down(adapter); 4579 e1000_down(adapter);
4434 4580
4581#ifdef CONFIG_PM
4582 /* implement our own version of pci_save_state(pdev) because pci
4583 * express adapters have larger 256 byte config spaces */
4584 retval = e1000_pci_save_state(adapter);
4585 if (retval)
4586 return retval;
4587#endif
4588
4435 status = E1000_READ_REG(&adapter->hw, STATUS); 4589 status = E1000_READ_REG(&adapter->hw, STATUS);
4436 if(status & E1000_STATUS_LU) 4590 if (status & E1000_STATUS_LU)
4437 wufc &= ~E1000_WUFC_LNKC; 4591 wufc &= ~E1000_WUFC_LNKC;
4438 4592
4439 if(wufc) { 4593 if (wufc) {
4440 e1000_setup_rctl(adapter); 4594 e1000_setup_rctl(adapter);
4441 e1000_set_multi(netdev); 4595 e1000_set_multi(netdev);
4442 4596
4443 /* turn on all-multi mode if wake on multicast is enabled */ 4597 /* turn on all-multi mode if wake on multicast is enabled */
4444 if(adapter->wol & E1000_WUFC_MC) { 4598 if (adapter->wol & E1000_WUFC_MC) {
4445 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4599 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4446 rctl |= E1000_RCTL_MPE; 4600 rctl |= E1000_RCTL_MPE;
4447 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4601 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4448 } 4602 }
4449 4603
4450 if(adapter->hw.mac_type >= e1000_82540) { 4604 if (adapter->hw.mac_type >= e1000_82540) {
4451 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4605 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4452 /* advertise wake from D3Cold */ 4606 /* advertise wake from D3Cold */
4453 #define E1000_CTRL_ADVD3WUC 0x00100000 4607 #define E1000_CTRL_ADVD3WUC 0x00100000
@@ -4458,7 +4612,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4458 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4612 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4459 } 4613 }
4460 4614
4461 if(adapter->hw.media_type == e1000_media_type_fiber || 4615 if (adapter->hw.media_type == e1000_media_type_fiber ||
4462 adapter->hw.media_type == e1000_media_type_internal_serdes) { 4616 adapter->hw.media_type == e1000_media_type_internal_serdes) {
4463 /* keep the laser running in D3 */ 4617 /* keep the laser running in D3 */
4464 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 4618 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
@@ -4488,12 +4642,10 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4488 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); 4642 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4489 } 4643 }
4490 4644
4491 pci_save_state(pdev); 4645 if (adapter->hw.mac_type >= e1000_82540 &&
4492
4493 if(adapter->hw.mac_type >= e1000_82540 &&
4494 adapter->hw.media_type == e1000_media_type_copper) { 4646 adapter->hw.media_type == e1000_media_type_copper) {
4495 manc = E1000_READ_REG(&adapter->hw, MANC); 4647 manc = E1000_READ_REG(&adapter->hw, MANC);
4496 if(manc & E1000_MANC_SMBUS_EN) { 4648 if (manc & E1000_MANC_SMBUS_EN) {
4497 manc |= E1000_MANC_ARP_EN; 4649 manc |= E1000_MANC_ARP_EN;
4498 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4650 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4499 retval = pci_enable_wake(pdev, PCI_D3hot, 1); 4651 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
@@ -4518,6 +4670,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4518 return 0; 4670 return 0;
4519} 4671}
4520 4672
4673#ifdef CONFIG_PM
4521static int 4674static int
4522e1000_resume(struct pci_dev *pdev) 4675e1000_resume(struct pci_dev *pdev)
4523{ 4676{
@@ -4529,6 +4682,7 @@ e1000_resume(struct pci_dev *pdev)
4529 retval = pci_set_power_state(pdev, PCI_D0); 4682 retval = pci_set_power_state(pdev, PCI_D0);
4530 if (retval) 4683 if (retval)
4531 DPRINTK(PROBE, ERR, "Error in setting power state\n"); 4684 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4685 e1000_pci_restore_state(adapter);
4532 ret_val = pci_enable_device(pdev); 4686 ret_val = pci_enable_device(pdev);
4533 pci_set_master(pdev); 4687 pci_set_master(pdev);
4534 4688
@@ -4542,12 +4696,12 @@ e1000_resume(struct pci_dev *pdev)
4542 e1000_reset(adapter); 4696 e1000_reset(adapter);
4543 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 4697 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
4544 4698
4545 if(netif_running(netdev)) 4699 if (netif_running(netdev))
4546 e1000_up(adapter); 4700 e1000_up(adapter);
4547 4701
4548 netif_device_attach(netdev); 4702 netif_device_attach(netdev);
4549 4703
4550 if(adapter->hw.mac_type >= e1000_82540 && 4704 if (adapter->hw.mac_type >= e1000_82540 &&
4551 adapter->hw.media_type == e1000_media_type_copper) { 4705 adapter->hw.media_type == e1000_media_type_copper) {
4552 manc = E1000_READ_REG(&adapter->hw, MANC); 4706 manc = E1000_READ_REG(&adapter->hw, MANC);
4553 manc &= ~(E1000_MANC_ARP_EN); 4707 manc &= ~(E1000_MANC_ARP_EN);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index aac64de6143..9790db974dc 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -47,7 +47,7 @@
47 BUG(); \ 47 BUG(); \
48 } else { \ 48 } else { \
49 msleep(x); \ 49 msleep(x); \
50 } } while(0) 50 } } while (0)
51 51
52/* Some workarounds require millisecond delays and are run during interrupt 52/* Some workarounds require millisecond delays and are run during interrupt
53 * context. Most notably, when establishing link, the phy may need tweaking 53 * context. Most notably, when establishing link, the phy may need tweaking
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 0a7918c6255..3768d83cd57 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -227,7 +227,7 @@ static int __devinit
227e1000_validate_option(int *value, struct e1000_option *opt, 227e1000_validate_option(int *value, struct e1000_option *opt,
228 struct e1000_adapter *adapter) 228 struct e1000_adapter *adapter)
229{ 229{
230 if(*value == OPTION_UNSET) { 230 if (*value == OPTION_UNSET) {
231 *value = opt->def; 231 *value = opt->def;
232 return 0; 232 return 0;
233 } 233 }
@@ -244,7 +244,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
244 } 244 }
245 break; 245 break;
246 case range_option: 246 case range_option:
247 if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 247 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
248 DPRINTK(PROBE, INFO, 248 DPRINTK(PROBE, INFO,
249 "%s set to %i\n", opt->name, *value); 249 "%s set to %i\n", opt->name, *value);
250 return 0; 250 return 0;
@@ -254,10 +254,10 @@ e1000_validate_option(int *value, struct e1000_option *opt,
254 int i; 254 int i;
255 struct e1000_opt_list *ent; 255 struct e1000_opt_list *ent;
256 256
257 for(i = 0; i < opt->arg.l.nr; i++) { 257 for (i = 0; i < opt->arg.l.nr; i++) {
258 ent = &opt->arg.l.p[i]; 258 ent = &opt->arg.l.p[i];
259 if(*value == ent->i) { 259 if (*value == ent->i) {
260 if(ent->str[0] != '\0') 260 if (ent->str[0] != '\0')
261 DPRINTK(PROBE, INFO, "%s\n", ent->str); 261 DPRINTK(PROBE, INFO, "%s\n", ent->str);
262 return 0; 262 return 0;
263 } 263 }
@@ -291,7 +291,7 @@ void __devinit
291e1000_check_options(struct e1000_adapter *adapter) 291e1000_check_options(struct e1000_adapter *adapter)
292{ 292{
293 int bd = adapter->bd_number; 293 int bd = adapter->bd_number;
294 if(bd >= E1000_MAX_NIC) { 294 if (bd >= E1000_MAX_NIC) {
295 DPRINTK(PROBE, NOTICE, 295 DPRINTK(PROBE, NOTICE,
296 "Warning: no configuration for board #%i\n", bd); 296 "Warning: no configuration for board #%i\n", bd);
297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); 297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
@@ -315,7 +315,7 @@ e1000_check_options(struct e1000_adapter *adapter)
315 if (num_TxDescriptors > bd) { 315 if (num_TxDescriptors > bd) {
316 tx_ring->count = TxDescriptors[bd]; 316 tx_ring->count = TxDescriptors[bd];
317 e1000_validate_option(&tx_ring->count, &opt, adapter); 317 e1000_validate_option(&tx_ring->count, &opt, adapter);
318 E1000_ROUNDUP(tx_ring->count, 318 E1000_ROUNDUP(tx_ring->count,
319 REQ_TX_DESCRIPTOR_MULTIPLE); 319 REQ_TX_DESCRIPTOR_MULTIPLE);
320 } else { 320 } else {
321 tx_ring->count = opt.def; 321 tx_ring->count = opt.def;
@@ -341,7 +341,7 @@ e1000_check_options(struct e1000_adapter *adapter)
341 if (num_RxDescriptors > bd) { 341 if (num_RxDescriptors > bd) {
342 rx_ring->count = RxDescriptors[bd]; 342 rx_ring->count = RxDescriptors[bd];
343 e1000_validate_option(&rx_ring->count, &opt, adapter); 343 e1000_validate_option(&rx_ring->count, &opt, adapter);
344 E1000_ROUNDUP(rx_ring->count, 344 E1000_ROUNDUP(rx_ring->count,
345 REQ_RX_DESCRIPTOR_MULTIPLE); 345 REQ_RX_DESCRIPTOR_MULTIPLE);
346 } else { 346 } else {
347 rx_ring->count = opt.def; 347 rx_ring->count = opt.def;
@@ -403,7 +403,7 @@ e1000_check_options(struct e1000_adapter *adapter)
403 403
404 if (num_TxIntDelay > bd) { 404 if (num_TxIntDelay > bd) {
405 adapter->tx_int_delay = TxIntDelay[bd]; 405 adapter->tx_int_delay = TxIntDelay[bd];
406 e1000_validate_option(&adapter->tx_int_delay, &opt, 406 e1000_validate_option(&adapter->tx_int_delay, &opt,
407 adapter); 407 adapter);
408 } else { 408 } else {
409 adapter->tx_int_delay = opt.def; 409 adapter->tx_int_delay = opt.def;
@@ -421,7 +421,7 @@ e1000_check_options(struct e1000_adapter *adapter)
421 421
422 if (num_TxAbsIntDelay > bd) { 422 if (num_TxAbsIntDelay > bd) {
423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; 423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
425 adapter); 425 adapter);
426 } else { 426 } else {
427 adapter->tx_abs_int_delay = opt.def; 427 adapter->tx_abs_int_delay = opt.def;
@@ -439,7 +439,7 @@ e1000_check_options(struct e1000_adapter *adapter)
439 439
440 if (num_RxIntDelay > bd) { 440 if (num_RxIntDelay > bd) {
441 adapter->rx_int_delay = RxIntDelay[bd]; 441 adapter->rx_int_delay = RxIntDelay[bd];
442 e1000_validate_option(&adapter->rx_int_delay, &opt, 442 e1000_validate_option(&adapter->rx_int_delay, &opt,
443 adapter); 443 adapter);
444 } else { 444 } else {
445 adapter->rx_int_delay = opt.def; 445 adapter->rx_int_delay = opt.def;
@@ -457,7 +457,7 @@ e1000_check_options(struct e1000_adapter *adapter)
457 457
458 if (num_RxAbsIntDelay > bd) { 458 if (num_RxAbsIntDelay > bd) {
459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; 459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
461 adapter); 461 adapter);
462 } else { 462 } else {
463 adapter->rx_abs_int_delay = opt.def; 463 adapter->rx_abs_int_delay = opt.def;
@@ -475,17 +475,17 @@ e1000_check_options(struct e1000_adapter *adapter)
475 475
476 if (num_InterruptThrottleRate > bd) { 476 if (num_InterruptThrottleRate > bd) {
477 adapter->itr = InterruptThrottleRate[bd]; 477 adapter->itr = InterruptThrottleRate[bd];
478 switch(adapter->itr) { 478 switch (adapter->itr) {
479 case 0: 479 case 0:
480 DPRINTK(PROBE, INFO, "%s turned off\n", 480 DPRINTK(PROBE, INFO, "%s turned off\n",
481 opt.name); 481 opt.name);
482 break; 482 break;
483 case 1: 483 case 1:
484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
485 opt.name); 485 opt.name);
486 break; 486 break;
487 default: 487 default:
488 e1000_validate_option(&adapter->itr, &opt, 488 e1000_validate_option(&adapter->itr, &opt,
489 adapter); 489 adapter);
490 break; 490 break;
491 } 491 }
@@ -494,7 +494,7 @@ e1000_check_options(struct e1000_adapter *adapter)
494 } 494 }
495 } 495 }
496 496
497 switch(adapter->hw.media_type) { 497 switch (adapter->hw.media_type) {
498 case e1000_media_type_fiber: 498 case e1000_media_type_fiber:
499 case e1000_media_type_internal_serdes: 499 case e1000_media_type_internal_serdes:
500 e1000_check_fiber_options(adapter); 500 e1000_check_fiber_options(adapter);
@@ -518,17 +518,17 @@ static void __devinit
518e1000_check_fiber_options(struct e1000_adapter *adapter) 518e1000_check_fiber_options(struct e1000_adapter *adapter)
519{ 519{
520 int bd = adapter->bd_number; 520 int bd = adapter->bd_number;
521 if(num_Speed > bd) { 521 if (num_Speed > bd) {
522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " 522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
523 "parameter ignored\n"); 523 "parameter ignored\n");
524 } 524 }
525 525
526 if(num_Duplex > bd) { 526 if (num_Duplex > bd) {
527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " 527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
528 "parameter ignored\n"); 528 "parameter ignored\n");
529 } 529 }
530 530
531 if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { 531 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " 532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
533 "not valid for fiber adapters, " 533 "not valid for fiber adapters, "
534 "parameter ignored\n"); 534 "parameter ignored\n");
@@ -598,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
598 } 598 }
599 } 599 }
600 600
601 if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { 601 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
602 DPRINTK(PROBE, INFO, 602 DPRINTK(PROBE, INFO,
603 "AutoNeg specified along with Speed or Duplex, " 603 "AutoNeg specified along with Speed or Duplex, "
604 "parameter ignored\n"); 604 "parameter ignored\n");
@@ -659,7 +659,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
659 switch (speed + dplx) { 659 switch (speed + dplx) {
660 case 0: 660 case 0:
661 adapter->hw.autoneg = adapter->fc_autoneg = 1; 661 adapter->hw.autoneg = adapter->fc_autoneg = 1;
662 if((num_Speed > bd) && (speed != 0 || dplx != 0)) 662 if ((num_Speed > bd) && (speed != 0 || dplx != 0))
663 DPRINTK(PROBE, INFO, 663 DPRINTK(PROBE, INFO,
664 "Speed and duplex autonegotiation enabled\n"); 664 "Speed and duplex autonegotiation enabled\n");
665 break; 665 break;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index d9ce8c54941..bc36edff205 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2595,15 +2595,11 @@ static int __init serial8250_init(void)
2595 if (ret) 2595 if (ret)
2596 goto out; 2596 goto out;
2597 2597
2598 ret = platform_driver_register(&serial8250_isa_driver);
2599 if (ret)
2600 goto unreg_uart_drv;
2601
2602 serial8250_isa_devs = platform_device_alloc("serial8250", 2598 serial8250_isa_devs = platform_device_alloc("serial8250",
2603 PLAT8250_DEV_LEGACY); 2599 PLAT8250_DEV_LEGACY);
2604 if (!serial8250_isa_devs) { 2600 if (!serial8250_isa_devs) {
2605 ret = -ENOMEM; 2601 ret = -ENOMEM;
2606 goto unreg_plat_drv; 2602 goto unreg_uart_drv;
2607 } 2603 }
2608 2604
2609 ret = platform_device_add(serial8250_isa_devs); 2605 ret = platform_device_add(serial8250_isa_devs);
@@ -2612,12 +2608,13 @@ static int __init serial8250_init(void)
2612 2608
2613 serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev); 2609 serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev);
2614 2610
2615 goto out; 2611 ret = platform_driver_register(&serial8250_isa_driver);
2612 if (ret == 0)
2613 goto out;
2616 2614
2615 platform_device_del(serial8250_isa_devs);
2617 put_dev: 2616 put_dev:
2618 platform_device_put(serial8250_isa_devs); 2617 platform_device_put(serial8250_isa_devs);
2619 unreg_plat_drv:
2620 platform_driver_unregister(&serial8250_isa_driver);
2621 unreg_uart_drv: 2618 unreg_uart_drv:
2622 uart_unregister_driver(&serial8250_reg); 2619 uart_unregister_driver(&serial8250_reg);
2623 out: 2620 out:
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 589fb076654..2a912153321 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -940,6 +940,7 @@ enum pci_board_num_t {
940 pbn_b2_bt_2_921600, 940 pbn_b2_bt_2_921600,
941 pbn_b2_bt_4_921600, 941 pbn_b2_bt_4_921600,
942 942
943 pbn_b3_2_115200,
943 pbn_b3_4_115200, 944 pbn_b3_4_115200,
944 pbn_b3_8_115200, 945 pbn_b3_8_115200,
945 946
@@ -1311,6 +1312,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
1311 .uart_offset = 8, 1312 .uart_offset = 8,
1312 }, 1313 },
1313 1314
1315 [pbn_b3_2_115200] = {
1316 .flags = FL_BASE3,
1317 .num_ports = 2,
1318 .base_baud = 115200,
1319 .uart_offset = 8,
1320 },
1314 [pbn_b3_4_115200] = { 1321 [pbn_b3_4_115200] = {
1315 .flags = FL_BASE3, 1322 .flags = FL_BASE3,
1316 .num_ports = 4, 1323 .num_ports = 4,
@@ -2272,6 +2279,9 @@ static struct pci_device_id serial_pci_tbl[] = {
2272 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2279 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2273 pbn_nec_nile4 }, 2280 pbn_nec_nile4 },
2274 2281
2282 { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
2283 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2284 pbn_b3_2_115200 },
2275 { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4, 2285 { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4,
2276 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2286 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2277 pbn_b3_4_115200 }, 2287 pbn_b3_4_115200 },
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 5e7199f7b59..9fd1925de36 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -301,7 +301,7 @@ config SERIAL_AT91_TTYAT
301 depends on SERIAL_AT91=y 301 depends on SERIAL_AT91=y
302 help 302 help
303 Say Y here if you wish to have the five internal AT91RM9200 UARTs 303 Say Y here if you wish to have the five internal AT91RM9200 UARTs
304 appear as /dev/ttyAT0-4 (major 240, minor 0-4) instead of the 304 appear as /dev/ttyAT0-4 (major 204, minor 154-158) instead of the
305 normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if 305 normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if
306 you also want other UARTs, such as external 8250/16C550 compatible 306 you also want other UARTs, such as external 8250/16C550 compatible
307 UARTs. 307 UARTs.
diff --git a/drivers/serial/at91_serial.c b/drivers/serial/at91_serial.c
index 0e206063d68..2113feb75c3 100644
--- a/drivers/serial/at91_serial.c
+++ b/drivers/serial/at91_serial.c
@@ -222,8 +222,6 @@ static void at91_rx_chars(struct uart_port *port, struct pt_regs *regs)
222 while (status & (AT91_US_RXRDY)) { 222 while (status & (AT91_US_RXRDY)) {
223 ch = UART_GET_CHAR(port); 223 ch = UART_GET_CHAR(port);
224 224
225 if (tty->flip.count >= TTY_FLIPBUF_SIZE)
226 goto ignore_char;
227 port->icount.rx++; 225 port->icount.rx++;
228 226
229 flg = TTY_NORMAL; 227 flg = TTY_NORMAL;
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 5468e5a767e..43e67d6c29d 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -6,7 +6,7 @@
6 * driver for that. 6 * driver for that.
7 * 7 *
8 * 8 *
9 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 9 * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of version 2 of the GNU General Public License 12 * under the terms of version 2 of the GNU General Public License
@@ -829,8 +829,8 @@ static int __init sn_sal_module_init(void)
829 misc.name = DEVICE_NAME_DYNAMIC; 829 misc.name = DEVICE_NAME_DYNAMIC;
830 retval = misc_register(&misc); 830 retval = misc_register(&misc);
831 if (retval != 0) { 831 if (retval != 0) {
832 printk 832 printk(KERN_WARNING "Failed to register console "
833 ("Failed to register console device using misc_register.\n"); 833 "device using misc_register.\n");
834 return -ENODEV; 834 return -ENODEV;
835 } 835 }
836 sal_console_uart.major = MISC_MAJOR; 836 sal_console_uart.major = MISC_MAJOR;
@@ -942,88 +942,75 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
942{ 942{
943 unsigned long flags = 0; 943 unsigned long flags = 0;
944 struct sn_cons_port *port = &sal_console_port; 944 struct sn_cons_port *port = &sal_console_port;
945#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
946 static int stole_lock = 0; 945 static int stole_lock = 0;
947#endif
948 946
949 BUG_ON(!port->sc_is_asynch); 947 BUG_ON(!port->sc_is_asynch);
950 948
951 /* We can't look at the xmit buffer if we're not registered with serial core 949 /* We can't look at the xmit buffer if we're not registered with serial core
952 * yet. So only do the fancy recovery after registering 950 * yet. So only do the fancy recovery after registering
953 */ 951 */
954 if (port->sc_port.info) { 952 if (!port->sc_port.info) {
955 953 /* Not yet registered with serial core - simple case */
956 /* somebody really wants this output, might be an 954 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
957 * oops, kdb, panic, etc. make sure they get it. */ 955 return;
958#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 956 }
959 if (spin_is_locked(&port->sc_port.lock)) {
960 int lhead = port->sc_port.info->xmit.head;
961 int ltail = port->sc_port.info->xmit.tail;
962 int counter, got_lock = 0;
963 957
964 /* 958 /* somebody really wants this output, might be an
965 * We attempt to determine if someone has died with the 959 * oops, kdb, panic, etc. make sure they get it. */
966 * lock. We wait ~20 secs after the head and tail ptrs 960 if (spin_is_locked(&port->sc_port.lock)) {
967 * stop moving and assume the lock holder is not functional 961 int lhead = port->sc_port.info->xmit.head;
968 * and plow ahead. If the lock is freed within the time out 962 int ltail = port->sc_port.info->xmit.tail;
969 * period we re-get the lock and go ahead normally. We also 963 int counter, got_lock = 0;
970 * remember if we have plowed ahead so that we don't have 964
971 * to wait out the time out period again - the asumption 965 /*
972 * is that we will time out again. 966 * We attempt to determine if someone has died with the
973 */ 967 * lock. We wait ~20 secs after the head and tail ptrs
968 * stop moving and assume the lock holder is not functional
969 * and plow ahead. If the lock is freed within the time out
970 * period we re-get the lock and go ahead normally. We also
971 * remember if we have plowed ahead so that we don't have
972 * to wait out the time out period again - the asumption
973 * is that we will time out again.
974 */
974 975
975 for (counter = 0; counter < 150; mdelay(125), counter++) { 976 for (counter = 0; counter < 150; mdelay(125), counter++) {
976 if (!spin_is_locked(&port->sc_port.lock) 977 if (!spin_is_locked(&port->sc_port.lock)
977 || stole_lock) { 978 || stole_lock) {
978 if (!stole_lock) { 979 if (!stole_lock) {
979 spin_lock_irqsave(&port-> 980 spin_lock_irqsave(&port->sc_port.lock,
980 sc_port.lock, 981 flags);
981 flags); 982 got_lock = 1;
982 got_lock = 1;
983 }
984 break;
985 } else {
986 /* still locked */
987 if ((lhead !=
988 port->sc_port.info->xmit.head)
989 || (ltail !=
990 port->sc_port.info->xmit.
991 tail)) {
992 lhead =
993 port->sc_port.info->xmit.
994 head;
995 ltail =
996 port->sc_port.info->xmit.
997 tail;
998 counter = 0;
999 }
1000 } 983 }
1001 } 984 break;
1002 /* flush anything in the serial core xmit buffer, raw */
1003 sn_transmit_chars(port, 1);
1004 if (got_lock) {
1005 spin_unlock_irqrestore(&port->sc_port.lock,
1006 flags);
1007 stole_lock = 0;
1008 } else { 985 } else {
1009 /* fell thru */ 986 /* still locked */
1010 stole_lock = 1; 987 if ((lhead != port->sc_port.info->xmit.head)
988 || (ltail !=
989 port->sc_port.info->xmit.tail)) {
990 lhead =
991 port->sc_port.info->xmit.head;
992 ltail =
993 port->sc_port.info->xmit.tail;
994 counter = 0;
995 }
1011 } 996 }
1012 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); 997 }
1013 } else { 998 /* flush anything in the serial core xmit buffer, raw */
1014 stole_lock = 0; 999 sn_transmit_chars(port, 1);
1015#endif 1000 if (got_lock) {
1016 spin_lock_irqsave(&port->sc_port.lock, flags);
1017 sn_transmit_chars(port, 1);
1018 spin_unlock_irqrestore(&port->sc_port.lock, flags); 1001 spin_unlock_irqrestore(&port->sc_port.lock, flags);
1019 1002 stole_lock = 0;
1020 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); 1003 } else {
1021#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 1004 /* fell thru */
1005 stole_lock = 1;
1022 } 1006 }
1023#endif 1007 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
1024 } 1008 } else {
1025 else { 1009 stole_lock = 0;
1026 /* Not yet registered with serial core - simple case */ 1010 spin_lock_irqsave(&port->sc_port.lock, flags);
1011 sn_transmit_chars(port, 1);
1012 spin_unlock_irqrestore(&port->sc_port.lock, flags);
1013
1027 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); 1014 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
1028 } 1015 }
1029} 1016}
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index 5fc4a62173d..fa4ae94243c 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -34,6 +34,7 @@ sunserial_console_termios(struct console *con)
34 char *mode_prop = "ttyX-mode"; 34 char *mode_prop = "ttyX-mode";
35 char *cd_prop = "ttyX-ignore-cd"; 35 char *cd_prop = "ttyX-ignore-cd";
36 char *dtr_prop = "ttyX-rts-dtr-off"; 36 char *dtr_prop = "ttyX-rts-dtr-off";
37 char *ssp_console_modes_prop = "ssp-console-modes";
37 int baud, bits, stop, cflag; 38 int baud, bits, stop, cflag;
38 char parity; 39 char parity;
39 int carrier = 0; 40 int carrier = 0;
@@ -43,14 +44,39 @@ sunserial_console_termios(struct console *con)
43 if (!serial_console) 44 if (!serial_console)
44 return; 45 return;
45 46
46 if (serial_console == 1) { 47 switch (serial_console) {
48 case PROMDEV_OTTYA:
47 mode_prop[3] = 'a'; 49 mode_prop[3] = 'a';
48 cd_prop[3] = 'a'; 50 cd_prop[3] = 'a';
49 dtr_prop[3] = 'a'; 51 dtr_prop[3] = 'a';
50 } else { 52 break;
53
54 case PROMDEV_OTTYB:
51 mode_prop[3] = 'b'; 55 mode_prop[3] = 'b';
52 cd_prop[3] = 'b'; 56 cd_prop[3] = 'b';
53 dtr_prop[3] = 'b'; 57 dtr_prop[3] = 'b';
58 break;
59
60 case PROMDEV_ORSC:
61
62 nd = prom_pathtoinode("rsc");
63 if (!nd) {
64 strcpy(mode, "115200,8,n,1,-");
65 goto no_options;
66 }
67
68 if (!prom_node_has_property(nd, ssp_console_modes_prop)) {
69 strcpy(mode, "115200,8,n,1,-");
70 goto no_options;
71 }
72
73 memset(mode, 0, sizeof(mode));
74 prom_getstring(nd, ssp_console_modes_prop, mode, sizeof(mode));
75 goto no_options;
76
77 default:
78 strcpy(mode, "9600,8,n,1,-");
79 goto no_options;
54 } 80 }
55 81
56 topnd = prom_getchild(prom_root_node); 82 topnd = prom_getchild(prom_root_node);
@@ -110,6 +136,10 @@ no_options:
110 case 9600: cflag |= B9600; break; 136 case 9600: cflag |= B9600; break;
111 case 19200: cflag |= B19200; break; 137 case 19200: cflag |= B19200; break;
112 case 38400: cflag |= B38400; break; 138 case 38400: cflag |= B38400; break;
139 case 57600: cflag |= B57600; break;
140 case 115200: cflag |= B115200; break;
141 case 230400: cflag |= B230400; break;
142 case 460800: cflag |= B460800; break;
113 default: baud = 9600; cflag |= B9600; break; 143 default: baud = 9600; cflag |= B9600; break;
114 } 144 }
115 145
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 7e773ff76c6..8bcaebcc0ad 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -897,9 +897,6 @@ static int sunsab_console_setup(struct console *con, char *options)
897 897
898 sunserial_console_termios(con); 898 sunserial_console_termios(con);
899 899
900 /* Firmware console speed is limited to 150-->38400 baud so
901 * this hackish cflag thing is OK.
902 */
903 switch (con->cflag & CBAUD) { 900 switch (con->cflag & CBAUD) {
904 case B150: baud = 150; break; 901 case B150: baud = 150; break;
905 case B300: baud = 300; break; 902 case B300: baud = 300; break;
@@ -910,6 +907,10 @@ static int sunsab_console_setup(struct console *con, char *options)
910 default: case B9600: baud = 9600; break; 907 default: case B9600: baud = 9600; break;
911 case B19200: baud = 19200; break; 908 case B19200: baud = 19200; break;
912 case B38400: baud = 38400; break; 909 case B38400: baud = 38400; break;
910 case B57600: baud = 57600; break;
911 case B115200: baud = 115200; break;
912 case B230400: baud = 230400; break;
913 case B460800: baud = 460800; break;
913 }; 914 };
914 915
915 /* 916 /*
diff --git a/fs/9p/Makefile b/fs/9p/Makefile
index 3d023089707..2f4ce43f7b6 100644
--- a/fs/9p/Makefile
+++ b/fs/9p/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_9P_FS) := 9p2000.o
8 conv.o \ 8 conv.o \
9 vfs_super.o \ 9 vfs_super.o \
10 vfs_inode.o \ 10 vfs_inode.o \
11 vfs_addr.o \
11 vfs_file.o \ 12 vfs_file.o \
12 vfs_dir.o \ 13 vfs_dir.o \
13 vfs_dentry.o \ 14 vfs_dentry.o \
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index c78502ad00e..69cf2905dc9 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -39,6 +39,7 @@
39 */ 39 */
40 40
41extern struct file_system_type v9fs_fs_type; 41extern struct file_system_type v9fs_fs_type;
42extern struct address_space_operations v9fs_addr_operations;
42extern struct file_operations v9fs_file_operations; 43extern struct file_operations v9fs_file_operations;
43extern struct file_operations v9fs_dir_operations; 44extern struct file_operations v9fs_dir_operations;
44extern struct dentry_operations v9fs_dentry_operations; 45extern struct dentry_operations v9fs_dentry_operations;
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
new file mode 100644
index 00000000000..8100fb5171b
--- /dev/null
+++ b/fs/9p/vfs_addr.c
@@ -0,0 +1,109 @@
1/*
2 * linux/fs/9p/vfs_addr.c
3 *
4 * This file contians vfs address (mmap) ops for 9P2000.
5 *
6 * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to:
21 * Free Software Foundation
22 * 51 Franklin Street, Fifth Floor
23 * Boston, MA 02111-1301 USA
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/stat.h>
32#include <linux/string.h>
33#include <linux/smp_lock.h>
34#include <linux/inet.h>
35#include <linux/version.h>
36#include <linux/pagemap.h>
37#include <linux/idr.h>
38
39#include "debug.h"
40#include "v9fs.h"
41#include "9p.h"
42#include "v9fs_vfs.h"
43#include "fid.h"
44
45/**
46 * v9fs_vfs_readpage - read an entire page in from 9P
47 *
48 * @file: file being read
49 * @page: structure to page
50 *
51 */
52
53static int v9fs_vfs_readpage(struct file *filp, struct page *page)
54{
55 char *buffer = NULL;
56 int retval = -EIO;
57 loff_t offset = page_offset(page);
58 int count = PAGE_CACHE_SIZE;
59 struct inode *inode = filp->f_dentry->d_inode;
60 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
61 int rsize = v9ses->maxdata - V9FS_IOHDRSZ;
62 struct v9fs_fid *v9f = filp->private_data;
63 struct v9fs_fcall *fcall = NULL;
64 int fid = v9f->fid;
65 int total = 0;
66 int result = 0;
67
68 buffer = kmap(page);
69 do {
70 if (count < rsize)
71 rsize = count;
72
73 result = v9fs_t_read(v9ses, fid, offset, rsize, &fcall);
74
75 if (result < 0) {
76 printk(KERN_ERR "v9fs_t_read returned %d\n",
77 result);
78
79 kfree(fcall);
80 goto UnmapAndUnlock;
81 } else
82 offset += result;
83
84 memcpy(buffer, fcall->params.rread.data, result);
85
86 count -= result;
87 buffer += result;
88 total += result;
89
90 kfree(fcall);
91
92 if (result < rsize)
93 break;
94 } while (count);
95
96 memset(buffer, 0, count);
97 flush_dcache_page(page);
98 SetPageUptodate(page);
99 retval = 0;
100
101UnmapAndUnlock:
102 kunmap(page);
103 unlock_page(page);
104 return retval;
105}
106
107struct address_space_operations v9fs_addr_operations = {
108 .readpage = v9fs_vfs_readpage,
109};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 6852f0eb96e..c7e14d91721 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -289,6 +289,9 @@ v9fs_file_write(struct file *filp, const char __user * data,
289 total += result; 289 total += result;
290 } while (count); 290 } while (count);
291 291
292 if(inode->i_mapping->nrpages)
293 invalidate_inode_pages2(inode->i_mapping);
294
292 return total; 295 return total;
293} 296}
294 297
@@ -299,4 +302,5 @@ struct file_operations v9fs_file_operations = {
299 .open = v9fs_file_open, 302 .open = v9fs_file_open,
300 .release = v9fs_dir_release, 303 .release = v9fs_dir_release,
301 .lock = v9fs_file_lock, 304 .lock = v9fs_file_lock,
305 .mmap = generic_file_mmap,
302}; 306};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index a17b2885428..91f552454c7 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -177,6 +177,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
177 inode->i_blocks = 0; 177 inode->i_blocks = 0;
178 inode->i_rdev = 0; 178 inode->i_rdev = 0;
179 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 179 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
180 inode->i_mapping->a_ops = &v9fs_addr_operations;
180 181
181 switch (mode & S_IFMT) { 182 switch (mode & S_IFMT) {
182 case S_IFIFO: 183 case S_IFIFO:
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 943ef9b8224..d335015473a 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,11 @@
1Version 1.40
2------------
3Use fsuid (fsgid) more consistently instead of uid (gid). Improve performance
4of readpages by eliminating one extra memcpy. Allow update of file size
5from remote server even if file is open for write as long as mount is
6directio. Recognize share mode security and send NTLM encrypted password
7on tree connect if share mode negotiated.
8
1Version 1.39 9Version 1.39
2------------ 10------------
3Defer close of a file handle slightly if pending writes depend on that handle 11Defer close of a file handle slightly if pending writes depend on that handle
@@ -7,6 +15,8 @@ Fix SFU style symlinks and mknod needed for servers which do not support the
7CIFS Unix Extensions. Fix setfacl/getfacl on bigendian. Timeout negative 15CIFS Unix Extensions. Fix setfacl/getfacl on bigendian. Timeout negative
8dentries so files that the client sees as deleted but that later get created 16dentries so files that the client sees as deleted but that later get created
9on the server will be recognized. Add client side permission check on setattr. 17on the server will be recognized. Add client side permission check on setattr.
18Timeout stuck requests better (where server has never responded or sent corrupt
19responses)
10 20
11Version 1.38 21Version 1.38
12------------ 22------------
diff --git a/fs/cifs/README b/fs/cifs/README
index e5d09a2fc7a..b0070d1b149 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -436,7 +436,17 @@ A partial list of the supported mount options follows:
436 SFU does). In the future the bottom 9 bits of the mode 436 SFU does). In the future the bottom 9 bits of the mode
437 mode also will be emulated using queries of the security 437 mode also will be emulated using queries of the security
438 descriptor (ACL). 438 descriptor (ACL).
439 439sec Security mode. Allowed values are:
440 none attempt to connection as a null user (no name)
441 krb5 Use Kerberos version 5 authentication
442 krb5i Use Kerberos authentication and packet signing
443 ntlm Use NTLM password hashing (default)
444 ntlmi Use NTLM password hashing with signing (if
445 /proc/fs/cifs/PacketSigningEnabled on or if
446 server requires signing also can be the default)
447 ntlmv2 Use NTLMv2 password hashing
448 ntlmv2i Use NTLMv2 password hashing with packet signing
449
440The mount.cifs mount helper also accepts a few mount options before -o 450The mount.cifs mount helper also accepts a few mount options before -o
441including: 451including:
442 452
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 22a444a3fe4..f4124a32bef 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -219,6 +219,10 @@ cifs_stats_write(struct file *file, const char __user *buffer,
219 219
220 if (c == '1' || c == 'y' || c == 'Y' || c == '0') { 220 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
221 read_lock(&GlobalSMBSeslock); 221 read_lock(&GlobalSMBSeslock);
222#ifdef CONFIG_CIFS_STATS2
223 atomic_set(&totBufAllocCount, 0);
224 atomic_set(&totSmBufAllocCount, 0);
225#endif /* CONFIG_CIFS_STATS2 */
222 list_for_each(tmp, &GlobalTreeConnectionList) { 226 list_for_each(tmp, &GlobalTreeConnectionList) {
223 tcon = list_entry(tmp, struct cifsTconInfo, 227 tcon = list_entry(tmp, struct cifsTconInfo,
224 cifsConnectionList); 228 cifsConnectionList);
@@ -276,6 +280,14 @@ cifs_stats_read(char *buf, char **beginBuffer, off_t offset,
276 smBufAllocCount.counter,cifs_min_small); 280 smBufAllocCount.counter,cifs_min_small);
277 length += item_length; 281 length += item_length;
278 buf += item_length; 282 buf += item_length;
283#ifdef CONFIG_CIFS_STATS2
284 item_length = sprintf(buf, "Total Large %d Small %d Allocations\n",
285 atomic_read(&totBufAllocCount),
286 atomic_read(&totSmBufAllocCount));
287 length += item_length;
288 buf += item_length;
289#endif /* CONFIG_CIFS_STATS2 */
290
279 item_length = 291 item_length =
280 sprintf(buf,"Operations (MIDs): %d\n", 292 sprintf(buf,"Operations (MIDs): %d\n",
281 midCount.counter); 293 midCount.counter);
@@ -389,8 +401,8 @@ static read_proc_t ntlmv2_enabled_read;
389static write_proc_t ntlmv2_enabled_write; 401static write_proc_t ntlmv2_enabled_write;
390static read_proc_t packet_signing_enabled_read; 402static read_proc_t packet_signing_enabled_read;
391static write_proc_t packet_signing_enabled_write; 403static write_proc_t packet_signing_enabled_write;
392static read_proc_t quotaEnabled_read; 404static read_proc_t experimEnabled_read;
393static write_proc_t quotaEnabled_write; 405static write_proc_t experimEnabled_write;
394static read_proc_t linuxExtensionsEnabled_read; 406static read_proc_t linuxExtensionsEnabled_read;
395static write_proc_t linuxExtensionsEnabled_write; 407static write_proc_t linuxExtensionsEnabled_write;
396 408
@@ -430,9 +442,9 @@ cifs_proc_init(void)
430 pde->write_proc = oplockEnabled_write; 442 pde->write_proc = oplockEnabled_write;
431 443
432 pde = create_proc_read_entry("Experimental", 0, proc_fs_cifs, 444 pde = create_proc_read_entry("Experimental", 0, proc_fs_cifs,
433 quotaEnabled_read, NULL); 445 experimEnabled_read, NULL);
434 if (pde) 446 if (pde)
435 pde->write_proc = quotaEnabled_write; 447 pde->write_proc = experimEnabled_write;
436 448
437 pde = create_proc_read_entry("LinuxExtensionsEnabled", 0, proc_fs_cifs, 449 pde = create_proc_read_entry("LinuxExtensionsEnabled", 0, proc_fs_cifs,
438 linuxExtensionsEnabled_read, NULL); 450 linuxExtensionsEnabled_read, NULL);
@@ -574,14 +586,13 @@ oplockEnabled_write(struct file *file, const char __user *buffer,
574} 586}
575 587
576static int 588static int
577quotaEnabled_read(char *page, char **start, off_t off, 589experimEnabled_read(char *page, char **start, off_t off,
578 int count, int *eof, void *data) 590 int count, int *eof, void *data)
579{ 591{
580 int len; 592 int len;
581 593
582 len = sprintf(page, "%d\n", experimEnabled); 594 len = sprintf(page, "%d\n", experimEnabled);
583/* could also check if quotas are enabled in kernel 595
584 as a whole first */
585 len -= off; 596 len -= off;
586 *start = page + off; 597 *start = page + off;
587 598
@@ -596,21 +607,23 @@ quotaEnabled_read(char *page, char **start, off_t off,
596 return len; 607 return len;
597} 608}
598static int 609static int
599quotaEnabled_write(struct file *file, const char __user *buffer, 610experimEnabled_write(struct file *file, const char __user *buffer,
600 unsigned long count, void *data) 611 unsigned long count, void *data)
601{ 612{
602 char c; 613 char c;
603 int rc; 614 int rc;
604 615
605 rc = get_user(c, buffer); 616 rc = get_user(c, buffer);
606 if (rc) 617 if (rc)
607 return rc; 618 return rc;
608 if (c == '0' || c == 'n' || c == 'N') 619 if (c == '0' || c == 'n' || c == 'N')
609 experimEnabled = 0; 620 experimEnabled = 0;
610 else if (c == '1' || c == 'y' || c == 'Y') 621 else if (c == '1' || c == 'y' || c == 'Y')
611 experimEnabled = 1; 622 experimEnabled = 1;
623 else if (c == '2')
624 experimEnabled = 2;
612 625
613 return count; 626 return count;
614} 627}
615 628
616static int 629static int
@@ -620,8 +633,6 @@ linuxExtensionsEnabled_read(char *page, char **start, off_t off,
620 int len; 633 int len;
621 634
622 len = sprintf(page, "%d\n", linuxExtEnabled); 635 len = sprintf(page, "%d\n", linuxExtEnabled);
623/* could also check if quotas are enabled in kernel
624 as a whole first */
625 len -= off; 636 len -= off;
626 *start = page + off; 637 *start = page + off;
627 638
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index f799f6f0e72..ad58eb0c4d6 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -24,9 +24,10 @@
24#define CIFS_MOUNT_DIRECT_IO 8 /* do not write nor read through page cache */ 24#define CIFS_MOUNT_DIRECT_IO 8 /* do not write nor read through page cache */
25#define CIFS_MOUNT_NO_XATTR 0x10 /* if set - disable xattr support */ 25#define CIFS_MOUNT_NO_XATTR 0x10 /* if set - disable xattr support */
26#define CIFS_MOUNT_MAP_SPECIAL_CHR 0x20 /* remap illegal chars in filenames */ 26#define CIFS_MOUNT_MAP_SPECIAL_CHR 0x20 /* remap illegal chars in filenames */
27#define CIFS_MOUNT_POSIX_PATHS 0x40 /* Negotiate posix pathnames if possible. */ 27#define CIFS_MOUNT_POSIX_PATHS 0x40 /* Negotiate posix pathnames if possible. */
28#define CIFS_MOUNT_UNX_EMUL 0x80 /* Network compat with SFUnix emulation */ 28#define CIFS_MOUNT_UNX_EMUL 0x80 /* Network compat with SFUnix emulation */
29#define CIFS_MOUNT_NO_BRL 0x100 /* No sending byte range locks to srv */ 29#define CIFS_MOUNT_NO_BRL 0x100 /* No sending byte range locks to srv */
30#define CIFS_MOUNT_CIFS_ACL 0x200 /* send ACL requests to non-POSIX srv */
30 31
31struct cifs_sb_info { 32struct cifs_sb_info {
32 struct cifsTconInfo *tcon; /* primary mount */ 33 struct cifsTconInfo *tcon; /* primary mount */
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
new file mode 100644
index 00000000000..d0776ac2b80
--- /dev/null
+++ b/fs/cifs/cifsacl.h
@@ -0,0 +1,38 @@
1/*
2 * fs/cifs/cifsacl.h
3 *
4 * Copyright (c) International Business Machines Corp., 2005
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef _CIFSACL_H
23#define _CIFSACL_H
24
25struct cifs_sid {
26 __u8 revision; /* revision level */
27 __u8 num_subauths;
28 __u8 authority[6];
29 __u32 sub_auth[4];
30 /* next sub_auth if any ... */
31} __attribute__((packed));
32
33/* everyone */
34extern const struct cifs_sid sid_everyone;
35/* group users */
36extern const struct cifs_sid sid_user;
37
38#endif /* _CIFSACL_H */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index fe2bb7c4c91..a2c24858d40 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/cifs/cifsencrypt.c 2 * fs/cifs/cifsencrypt.c
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2003 4 * Copyright (C) International Business Machines Corp., 2005
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
@@ -82,6 +82,59 @@ int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
82 return rc; 82 return rc;
83} 83}
84 84
85static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
86 const char * key, char * signature)
87{
88 struct MD5Context context;
89
90 if((iov == NULL) || (signature == NULL))
91 return -EINVAL;
92
93 MD5Init(&context);
94 MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
95
96/* MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length); */ /* BB FIXME BB */
97
98 MD5Final(signature,&context);
99
100 return -EOPNOTSUPP;
101/* return 0; */
102}
103
104
105int cifs_sign_smb2(struct kvec * iov, int n_vec, struct TCP_Server_Info *server,
106 __u32 * pexpected_response_sequence_number)
107{
108 int rc = 0;
109 char smb_signature[20];
110 struct smb_hdr * cifs_pdu = iov[0].iov_base;
111
112 if((cifs_pdu == NULL) || (server == NULL))
113 return -EINVAL;
114
115 if((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
116 return rc;
117
118 spin_lock(&GlobalMid_Lock);
119 cifs_pdu->Signature.Sequence.SequenceNumber =
120 cpu_to_le32(server->sequence_number);
121 cifs_pdu->Signature.Sequence.Reserved = 0;
122
123 *pexpected_response_sequence_number = server->sequence_number++;
124 server->sequence_number++;
125 spin_unlock(&GlobalMid_Lock);
126
127 rc = cifs_calc_signature2(iov, n_vec, server->mac_signing_key,
128 smb_signature);
129 if(rc)
130 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
131 else
132 memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
133
134 return rc;
135
136}
137
85int cifs_verify_signature(struct smb_hdr * cifs_pdu, const char * mac_key, 138int cifs_verify_signature(struct smb_hdr * cifs_pdu, const char * mac_key,
86 __u32 expected_sequence_number) 139 __u32 expected_sequence_number)
87{ 140{
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index e10213b7541..79eeccd0437 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -513,6 +513,17 @@ static ssize_t cifs_file_aio_write(struct kiocb *iocb, const char __user *buf,
513 return written; 513 return written;
514} 514}
515 515
516static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
517{
518 /* origin == SEEK_END => we must revalidate the cached file length */
519 if (origin == 2) {
520 int retval = cifs_revalidate(file->f_dentry);
521 if (retval < 0)
522 return (loff_t)retval;
523 }
524 return remote_llseek(file, offset, origin);
525}
526
516static struct file_system_type cifs_fs_type = { 527static struct file_system_type cifs_fs_type = {
517 .owner = THIS_MODULE, 528 .owner = THIS_MODULE,
518 .name = "cifs", 529 .name = "cifs",
@@ -586,6 +597,7 @@ struct file_operations cifs_file_ops = {
586 .flush = cifs_flush, 597 .flush = cifs_flush,
587 .mmap = cifs_file_mmap, 598 .mmap = cifs_file_mmap,
588 .sendfile = generic_file_sendfile, 599 .sendfile = generic_file_sendfile,
600 .llseek = cifs_llseek,
589#ifdef CONFIG_CIFS_POSIX 601#ifdef CONFIG_CIFS_POSIX
590 .ioctl = cifs_ioctl, 602 .ioctl = cifs_ioctl,
591#endif /* CONFIG_CIFS_POSIX */ 603#endif /* CONFIG_CIFS_POSIX */
@@ -609,7 +621,7 @@ struct file_operations cifs_file_direct_ops = {
609#ifdef CONFIG_CIFS_POSIX 621#ifdef CONFIG_CIFS_POSIX
610 .ioctl = cifs_ioctl, 622 .ioctl = cifs_ioctl,
611#endif /* CONFIG_CIFS_POSIX */ 623#endif /* CONFIG_CIFS_POSIX */
612 624 .llseek = cifs_llseek,
613#ifdef CONFIG_CIFS_EXPERIMENTAL 625#ifdef CONFIG_CIFS_EXPERIMENTAL
614 .dir_notify = cifs_dir_notify, 626 .dir_notify = cifs_dir_notify,
615#endif /* CONFIG_CIFS_EXPERIMENTAL */ 627#endif /* CONFIG_CIFS_EXPERIMENTAL */
@@ -627,6 +639,7 @@ struct file_operations cifs_file_nobrl_ops = {
627 .flush = cifs_flush, 639 .flush = cifs_flush,
628 .mmap = cifs_file_mmap, 640 .mmap = cifs_file_mmap,
629 .sendfile = generic_file_sendfile, 641 .sendfile = generic_file_sendfile,
642 .llseek = cifs_llseek,
630#ifdef CONFIG_CIFS_POSIX 643#ifdef CONFIG_CIFS_POSIX
631 .ioctl = cifs_ioctl, 644 .ioctl = cifs_ioctl,
632#endif /* CONFIG_CIFS_POSIX */ 645#endif /* CONFIG_CIFS_POSIX */
@@ -649,7 +662,7 @@ struct file_operations cifs_file_direct_nobrl_ops = {
649#ifdef CONFIG_CIFS_POSIX 662#ifdef CONFIG_CIFS_POSIX
650 .ioctl = cifs_ioctl, 663 .ioctl = cifs_ioctl,
651#endif /* CONFIG_CIFS_POSIX */ 664#endif /* CONFIG_CIFS_POSIX */
652 665 .llseek = cifs_llseek,
653#ifdef CONFIG_CIFS_EXPERIMENTAL 666#ifdef CONFIG_CIFS_EXPERIMENTAL
654 .dir_notify = cifs_dir_notify, 667 .dir_notify = cifs_dir_notify,
655#endif /* CONFIG_CIFS_EXPERIMENTAL */ 668#endif /* CONFIG_CIFS_EXPERIMENTAL */
@@ -733,7 +746,7 @@ cifs_init_request_bufs(void)
733 kmem_cache_destroy(cifs_req_cachep); 746 kmem_cache_destroy(cifs_req_cachep);
734 return -ENOMEM; 747 return -ENOMEM;
735 } 748 }
736 /* 256 (MAX_CIFS_HDR_SIZE bytes is enough for most SMB responses and 749 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
737 almost all handle based requests (but not write response, nor is it 750 almost all handle based requests (but not write response, nor is it
738 sufficient for path based requests). A smaller size would have 751 sufficient for path based requests). A smaller size would have
739 been more efficient (compacting multiple slab items on one 4k page) 752 been more efficient (compacting multiple slab items on one 4k page)
@@ -742,7 +755,8 @@ cifs_init_request_bufs(void)
742 efficient to alloc 1 per page off the slab compared to 17K (5page) 755 efficient to alloc 1 per page off the slab compared to 17K (5page)
743 alloc of large cifs buffers even when page debugging is on */ 756 alloc of large cifs buffers even when page debugging is on */
744 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", 757 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
745 MAX_CIFS_HDR_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 758 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
759 NULL, NULL);
746 if (cifs_sm_req_cachep == NULL) { 760 if (cifs_sm_req_cachep == NULL) {
747 mempool_destroy(cifs_req_poolp); 761 mempool_destroy(cifs_req_poolp);
748 kmem_cache_destroy(cifs_req_cachep); 762 kmem_cache_destroy(cifs_req_cachep);
@@ -954,6 +968,12 @@ init_cifs(void)
954 atomic_set(&tconInfoReconnectCount, 0); 968 atomic_set(&tconInfoReconnectCount, 0);
955 969
956 atomic_set(&bufAllocCount, 0); 970 atomic_set(&bufAllocCount, 0);
971 atomic_set(&smBufAllocCount, 0);
972#ifdef CONFIG_CIFS_STATS2
973 atomic_set(&totBufAllocCount, 0);
974 atomic_set(&totSmBufAllocCount, 0);
975#endif /* CONFIG_CIFS_STATS2 */
976
957 atomic_set(&midCount, 0); 977 atomic_set(&midCount, 0);
958 GlobalCurrentXid = 0; 978 GlobalCurrentXid = 0;
959 GlobalTotalActiveXid = 0; 979 GlobalTotalActiveXid = 0;
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 9ec40e0e54f..821a8eb2255 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -99,5 +99,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
99extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); 99extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
100extern int cifs_ioctl (struct inode * inode, struct file * filep, 100extern int cifs_ioctl (struct inode * inode, struct file * filep,
101 unsigned int command, unsigned long arg); 101 unsigned int command, unsigned long arg);
102#define CIFS_VERSION "1.39" 102#define CIFS_VERSION "1.40"
103#endif /* _CIFSFS_H */ 103#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1ba08f8c5bc..7bed27601ce 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -233,6 +233,8 @@ struct cifsTconInfo {
233 atomic_t num_hardlinks; 233 atomic_t num_hardlinks;
234 atomic_t num_symlinks; 234 atomic_t num_symlinks;
235 atomic_t num_locks; 235 atomic_t num_locks;
236 atomic_t num_acl_get;
237 atomic_t num_acl_set;
236#ifdef CONFIG_CIFS_STATS2 238#ifdef CONFIG_CIFS_STATS2
237 unsigned long long time_writes; 239 unsigned long long time_writes;
238 unsigned long long time_reads; 240 unsigned long long time_reads;
@@ -285,6 +287,7 @@ struct cifs_search_info {
285 unsigned endOfSearch:1; 287 unsigned endOfSearch:1;
286 unsigned emptyDir:1; 288 unsigned emptyDir:1;
287 unsigned unicode:1; 289 unsigned unicode:1;
290 unsigned smallBuf:1; /* so we know which buf_release function to call */
288}; 291};
289 292
290struct cifsFileInfo { 293struct cifsFileInfo {
@@ -420,7 +423,12 @@ struct dir_notify_req {
420#define MID_RESPONSE_RECEIVED 4 423#define MID_RESPONSE_RECEIVED 4
421#define MID_RETRY_NEEDED 8 /* session closed while this request out */ 424#define MID_RETRY_NEEDED 8 /* session closed while this request out */
422#define MID_NO_RESP_NEEDED 0x10 425#define MID_NO_RESP_NEEDED 0x10
423#define MID_SMALL_BUFFER 0x20 /* 112 byte response buffer instead of 4K */ 426
427/* Types of response buffer returned from SendReceive2 */
428#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
429#define CIFS_SMALL_BUFFER 1
430#define CIFS_LARGE_BUFFER 2
431#define CIFS_IOVEC 4 /* array of response buffers */
424 432
425/* 433/*
426 ***************************************************************** 434 *****************************************************************
@@ -505,8 +513,12 @@ GLOBAL_EXTERN atomic_t tcpSesReconnectCount;
505GLOBAL_EXTERN atomic_t tconInfoReconnectCount; 513GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
506 514
507/* Various Debug counters to remove someday (BB) */ 515/* Various Debug counters to remove someday (BB) */
508GLOBAL_EXTERN atomic_t bufAllocCount; 516GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
509GLOBAL_EXTERN atomic_t smBufAllocCount; 517#ifdef CONFIG_CIFS_STATS2
518GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
519GLOBAL_EXTERN atomic_t totSmBufAllocCount;
520#endif
521GLOBAL_EXTERN atomic_t smBufAllocCount;
510GLOBAL_EXTERN atomic_t midCount; 522GLOBAL_EXTERN atomic_t midCount;
511 523
512/* Misc globals */ 524/* Misc globals */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 33e1859fd2f..cc2471094ca 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/cifs/cifspdu.h 2 * fs/cifs/cifspdu.h
3 * 3 *
4 * Copyright (c) International Business Machines Corp., 2002 4 * Copyright (c) International Business Machines Corp., 2002,2005
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
@@ -80,7 +80,11 @@
80#define NT_TRANSACT_GET_USER_QUOTA 0x07 80#define NT_TRANSACT_GET_USER_QUOTA 0x07
81#define NT_TRANSACT_SET_USER_QUOTA 0x08 81#define NT_TRANSACT_SET_USER_QUOTA 0x08
82 82
83#define MAX_CIFS_HDR_SIZE 256 /* is future chained NTCreateXReadX bigger? */ 83#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
84/* future chained NTCreateXReadX bigger, but for time being NTCreateX biggest */
85/* among the requests (NTCreateX response is bigger with wct of 34) */
86#define MAX_CIFS_HDR_SIZE 0x58 /* 4 len + 32 hdr + (2*24 wct) + 2 bct + 2 pad */
87#define CIFS_SMALL_PATH 120 /* allows for (448-88)/3 */
84 88
85/* internal cifs vfs structures */ 89/* internal cifs vfs structures */
86/***************************************************************** 90/*****************************************************************
@@ -524,7 +528,7 @@ typedef union smb_com_session_setup_andx {
524 /* STRING PrimaryDomain */ 528 /* STRING PrimaryDomain */
525 /* STRING NativeOS */ 529 /* STRING NativeOS */
526 /* STRING NativeLanMan */ 530 /* STRING NativeLanMan */
527 } __attribute__((packed)) old_req; /* pre-NTLM (LANMAN2.1) request format */ 531 } __attribute__((packed)) old_req; /* pre-NTLM (LANMAN2.1) req format */
528 532
529 struct { /* default (NTLM) response format */ 533 struct { /* default (NTLM) response format */
530 struct smb_hdr hdr; /* wct = 3 */ 534 struct smb_hdr hdr; /* wct = 3 */
@@ -536,7 +540,7 @@ typedef union smb_com_session_setup_andx {
536 unsigned char NativeOS[1]; /* followed by */ 540 unsigned char NativeOS[1]; /* followed by */
537/* unsigned char * NativeLanMan; */ 541/* unsigned char * NativeLanMan; */
538/* unsigned char * PrimaryDomain; */ 542/* unsigned char * PrimaryDomain; */
539 } __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response format */ 543 } __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */
540} __attribute__((packed)) SESSION_SETUP_ANDX; 544} __attribute__((packed)) SESSION_SETUP_ANDX;
541 545
542#define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux" 546#define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux"
@@ -1003,10 +1007,49 @@ typedef struct smb_com_setattr_rsp {
1003 1007
1004/* empty wct response to setattr */ 1008/* empty wct response to setattr */
1005 1009
1006/***************************************************/ 1010/*******************************************************/
1007/* NT Transact structure defintions follow */ 1011/* NT Transact structure defintions follow */
1008/* Currently only ioctl and notify are implemented */ 1012/* Currently only ioctl, acl (get security descriptor) */
1009/***************************************************/ 1013/* and notify are implemented */
1014/*******************************************************/
1015typedef struct smb_com_ntransact_req {
1016 struct smb_hdr hdr; /* wct >= 19 */
1017 __u8 MaxSetupCount;
1018 __u16 Reserved;
1019 __le32 TotalParameterCount;
1020 __le32 TotalDataCount;
1021 __le32 MaxParameterCount;
1022 __le32 MaxDataCount;
1023 __le32 ParameterCount;
1024 __le32 ParameterOffset;
1025 __le32 DataCount;
1026 __le32 DataOffset;
1027 __u8 SetupCount; /* four setup words follow subcommand */
1028 /* SNIA spec incorrectly included spurious pad here */
1029 __le16 SubCommand; /* 2 = IOCTL/FSCTL */
1030 /* SetupCount words follow then */
1031 __le16 ByteCount;
1032 __u8 Pad[3];
1033 __u8 Parms[0];
1034} __attribute__((packed)) NTRANSACT_REQ;
1035
1036typedef struct smb_com_ntransact_rsp {
1037 struct smb_hdr hdr; /* wct = 18 */
1038 __u8 Reserved[3];
1039 __le32 TotalParameterCount;
1040 __le32 TotalDataCount;
1041 __le32 ParameterCount;
1042 __le32 ParameterOffset;
1043 __le32 ParameterDisplacement;
1044 __le32 DataCount;
1045 __le32 DataOffset;
1046 __le32 DataDisplacement;
1047 __u8 SetupCount; /* 0 */
1048 __u16 ByteCount;
1049 /* __u8 Pad[3]; */
1050 /* parms and data follow */
1051} __attribute__((packed)) NTRANSACT_RSP;
1052
1010typedef struct smb_com_transaction_ioctl_req { 1053typedef struct smb_com_transaction_ioctl_req {
1011 struct smb_hdr hdr; /* wct = 23 */ 1054 struct smb_hdr hdr; /* wct = 23 */
1012 __u8 MaxSetupCount; 1055 __u8 MaxSetupCount;
@@ -1021,11 +1064,11 @@ typedef struct smb_com_transaction_ioctl_req {
1021 __le32 DataOffset; 1064 __le32 DataOffset;
1022 __u8 SetupCount; /* four setup words follow subcommand */ 1065 __u8 SetupCount; /* four setup words follow subcommand */
1023 /* SNIA spec incorrectly included spurious pad here */ 1066 /* SNIA spec incorrectly included spurious pad here */
1024 __le16 SubCommand;/* 2 = IOCTL/FSCTL */ 1067 __le16 SubCommand; /* 2 = IOCTL/FSCTL */
1025 __le32 FunctionCode; 1068 __le32 FunctionCode;
1026 __u16 Fid; 1069 __u16 Fid;
1027 __u8 IsFsctl; /* 1 = File System Control, 0 = device control (IOCTL)*/ 1070 __u8 IsFsctl; /* 1 = File System Control 0 = device control (IOCTL) */
1028 __u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS share)*/ 1071 __u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS) */
1029 __le16 ByteCount; 1072 __le16 ByteCount;
1030 __u8 Pad[3]; 1073 __u8 Pad[3];
1031 __u8 Data[1]; 1074 __u8 Data[1];
@@ -1045,9 +1088,35 @@ typedef struct smb_com_transaction_ioctl_rsp {
1045 __u8 SetupCount; /* 1 */ 1088 __u8 SetupCount; /* 1 */
1046 __le16 ReturnedDataLen; 1089 __le16 ReturnedDataLen;
1047 __u16 ByteCount; 1090 __u16 ByteCount;
1048 __u8 Pad[3];
1049} __attribute__((packed)) TRANSACT_IOCTL_RSP; 1091} __attribute__((packed)) TRANSACT_IOCTL_RSP;
1050 1092
1093#define CIFS_ACL_OWNER 1
1094#define CIFS_ACL_GROUP 2
1095#define CIFS_ACL_DACL 4
1096#define CIFS_ACL_SACL 8
1097
1098typedef struct smb_com_transaction_qsec_req {
1099 struct smb_hdr hdr; /* wct = 19 */
1100 __u8 MaxSetupCount;
1101 __u16 Reserved;
1102 __le32 TotalParameterCount;
1103 __le32 TotalDataCount;
1104 __le32 MaxParameterCount;
1105 __le32 MaxDataCount;
1106 __le32 ParameterCount;
1107 __le32 ParameterOffset;
1108 __le32 DataCount;
1109 __le32 DataOffset;
1110 __u8 SetupCount; /* no setup words follow subcommand */
1111 /* SNIA spec incorrectly included spurious pad here */
1112 __le16 SubCommand; /* 6 = QUERY_SECURITY_DESC */
1113 __le16 ByteCount; /* bcc = 3 + 8 */
1114 __u8 Pad[3];
1115 __u16 Fid;
1116 __u16 Reserved2;
1117 __le32 AclFlags;
1118} __attribute__((packed)) QUERY_SEC_DESC_REQ;
1119
1051typedef struct smb_com_transaction_change_notify_req { 1120typedef struct smb_com_transaction_change_notify_req {
1052 struct smb_hdr hdr; /* wct = 23 */ 1121 struct smb_hdr hdr; /* wct = 23 */
1053 __u8 MaxSetupCount; 1122 __u8 MaxSetupCount;
@@ -1068,10 +1137,12 @@ typedef struct smb_com_transaction_change_notify_req {
1068 __u8 WatchTree; /* 1 = Monitor subdirectories */ 1137 __u8 WatchTree; /* 1 = Monitor subdirectories */
1069 __u8 Reserved2; 1138 __u8 Reserved2;
1070 __le16 ByteCount; 1139 __le16 ByteCount;
1071/* __u8 Pad[3];*/ 1140/* __u8 Pad[3];*/
1072/* __u8 Data[1];*/ 1141/* __u8 Data[1];*/
1073} __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_REQ; 1142} __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_REQ;
1074 1143
1144/* BB eventually change to use generic ntransact rsp struct
1145 and validation routine */
1075typedef struct smb_com_transaction_change_notify_rsp { 1146typedef struct smb_com_transaction_change_notify_rsp {
1076 struct smb_hdr hdr; /* wct = 18 */ 1147 struct smb_hdr hdr; /* wct = 18 */
1077 __u8 Reserved[3]; 1148 __u8 Reserved[3];
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1b73f4f4c5c..3c03aadaff0 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -48,8 +48,8 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
48 struct smb_hdr * /* out */ , 48 struct smb_hdr * /* out */ ,
49 int * /* bytes returned */ , const int long_op); 49 int * /* bytes returned */ , const int long_op);
50extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, 50extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
51 struct kvec *, int /* nvec */, 51 struct kvec *, int /* nvec to send */,
52 int * /* bytes returned */ , const int long_op); 52 int * /* type of buf returned */ , const int long_op);
53extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid); 53extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid);
54extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length); 54extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length);
55extern int is_valid_oplock_break(struct smb_hdr *smb); 55extern int is_valid_oplock_break(struct smb_hdr *smb);
@@ -93,11 +93,12 @@ extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
93 const struct nls_table *); 93 const struct nls_table *);
94 94
95extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, 95extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
96 const char *searchName, const struct nls_table *nls_codepage, 96 const char *searchName, const struct nls_table *nls_codepage,
97 __u16 *searchHandle, struct cifs_search_info * psrch_inf, int map, const char dirsep); 97 __u16 *searchHandle, struct cifs_search_info * psrch_inf,
98 int map, const char dirsep);
98 99
99extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, 100extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
100 __u16 searchHandle, struct cifs_search_info * psrch_inf); 101 __u16 searchHandle, struct cifs_search_info * psrch_inf);
101 102
102extern int CIFSFindClose(const int, struct cifsTconInfo *tcon, 103extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
103 const __u16 search_handle); 104 const __u16 search_handle);
@@ -230,19 +231,18 @@ extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon,
230 const int smb_file_id); 231 const int smb_file_id);
231 232
232extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, 233extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
233 const int netfid, unsigned int count, 234 const int netfid, unsigned int count,
234 const __u64 lseek, unsigned int *nbytes, char **buf); 235 const __u64 lseek, unsigned int *nbytes, char **buf,
236 int * return_buf_type);
235extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, 237extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
236 const int netfid, const unsigned int count, 238 const int netfid, const unsigned int count,
237 const __u64 lseek, unsigned int *nbytes, 239 const __u64 lseek, unsigned int *nbytes,
238 const char *buf, const char __user *ubuf, 240 const char *buf, const char __user *ubuf,
239 const int long_op); 241 const int long_op);
240#ifdef CONFIG_CIFS_EXPERIMENTAL
241extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, 242extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
242 const int netfid, const unsigned int count, 243 const int netfid, const unsigned int count,
243 const __u64 offset, unsigned int *nbytes, 244 const __u64 offset, unsigned int *nbytes,
244 struct kvec *iov, const int nvec, const int long_op); 245 struct kvec *iov, const int nvec, const int long_op);
245#endif /* CONFIG_CIFS_EXPERIMENTAL */
246extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon, 246extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
247 const unsigned char *searchName, __u64 * inode_number, 247 const unsigned char *searchName, __u64 * inode_number,
248 const struct nls_table *nls_codepage, 248 const struct nls_table *nls_codepage,
@@ -269,6 +269,8 @@ extern void tconInfoFree(struct cifsTconInfo *);
269extern int cifs_reconnect(struct TCP_Server_Info *server); 269extern int cifs_reconnect(struct TCP_Server_Info *server);
270 270
271extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *,__u32 *); 271extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *,__u32 *);
272extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
273 __u32 *);
272extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key, 274extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key,
273 __u32 expected_sequence_number); 275 __u32 expected_sequence_number);
274extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass); 276extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
@@ -297,6 +299,9 @@ extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
297 const char *fileName, const char * ea_name, 299 const char *fileName, const char * ea_name,
298 const void * ea_value, const __u16 ea_value_len, 300 const void * ea_value, const __u16 ea_value_len,
299 const struct nls_table *nls_codepage, int remap_special_chars); 301 const struct nls_table *nls_codepage, int remap_special_chars);
302extern int CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon,
303 __u16 fid, char *acl_inf, const int buflen,
304 const int acl_type /* ACCESS vs. DEFAULT */);
300extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon, 305extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
301 const unsigned char *searchName, 306 const unsigned char *searchName,
302 char *acl_inf, const int buflen,const int acl_type, 307 char *acl_inf, const int buflen,const int acl_type,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 6867e556d37..217323b0c89 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -37,6 +37,7 @@
37#include "cifsproto.h" 37#include "cifsproto.h"
38#include "cifs_unicode.h" 38#include "cifs_unicode.h"
39#include "cifs_debug.h" 39#include "cifs_debug.h"
40#include "cifsacl.h"
40 41
41#ifdef CONFIG_CIFS_POSIX 42#ifdef CONFIG_CIFS_POSIX
42static struct { 43static struct {
@@ -372,8 +373,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
372 rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, 373 rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
373 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 374 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
374 if (rc == 0) { 375 if (rc == 0) {
375 server->secMode = pSMBr->SecurityMode; 376 server->secMode = pSMBr->SecurityMode;
376 server->secType = NTLM; /* BB override default for 377 if((server->secMode & SECMODE_USER) == 0)
378 cFYI(1,("share mode security"));
379 server->secType = NTLM; /* BB override default for
377 NTLMv2 or kerberos v5 */ 380 NTLMv2 or kerberos v5 */
378 /* one byte - no need to convert this or EncryptionKeyLen 381 /* one byte - no need to convert this or EncryptionKeyLen
379 from little endian */ 382 from little endian */
@@ -383,7 +386,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
383 min(le32_to_cpu(pSMBr->MaxBufferSize), 386 min(le32_to_cpu(pSMBr->MaxBufferSize),
384 (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); 387 (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
385 server->maxRw = le32_to_cpu(pSMBr->MaxRawSize); 388 server->maxRw = le32_to_cpu(pSMBr->MaxRawSize);
386 cFYI(0, ("Max buf = %d ", ses->server->maxBuf)); 389 cFYI(0, ("Max buf = %d", ses->server->maxBuf));
387 GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey); 390 GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
388 server->capabilities = le32_to_cpu(pSMBr->Capabilities); 391 server->capabilities = le32_to_cpu(pSMBr->Capabilities);
389 server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone); 392 server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone);
@@ -411,8 +414,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
411 (server->server_GUID, 414 (server->server_GUID,
412 pSMBr->u.extended_response. 415 pSMBr->u.extended_response.
413 GUID, 16) != 0) { 416 GUID, 16) != 0) {
414 cFYI(1, 417 cFYI(1, ("server UID changed"));
415 ("UID of server does not match previous connection to same ip address"));
416 memcpy(server-> 418 memcpy(server->
417 server_GUID, 419 server_GUID,
418 pSMBr->u. 420 pSMBr->u.
@@ -958,21 +960,19 @@ openRetry:
958 return rc; 960 return rc;
959} 961}
960 962
961/* If no buffer passed in, then caller wants to do the copy
962 as in the case of readpages so the SMB buffer must be
963 freed by the caller */
964
965int 963int
966CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, 964CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
967 const int netfid, const unsigned int count, 965 const int netfid, const unsigned int count,
968 const __u64 lseek, unsigned int *nbytes, char **buf) 966 const __u64 lseek, unsigned int *nbytes, char **buf,
967 int * pbuf_type)
969{ 968{
970 int rc = -EACCES; 969 int rc = -EACCES;
971 READ_REQ *pSMB = NULL; 970 READ_REQ *pSMB = NULL;
972 READ_RSP *pSMBr = NULL; 971 READ_RSP *pSMBr = NULL;
973 char *pReadData = NULL; 972 char *pReadData = NULL;
974 int bytes_returned;
975 int wct; 973 int wct;
974 int resp_buf_type = 0;
975 struct kvec iov[1];
976 976
977 cFYI(1,("Reading %d bytes on fid %d",count,netfid)); 977 cFYI(1,("Reading %d bytes on fid %d",count,netfid));
978 if(tcon->ses->capabilities & CAP_LARGE_FILES) 978 if(tcon->ses->capabilities & CAP_LARGE_FILES)
@@ -981,8 +981,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
981 wct = 10; /* old style read */ 981 wct = 10; /* old style read */
982 982
983 *nbytes = 0; 983 *nbytes = 0;
984 rc = smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB, 984 rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB);
985 (void **) &pSMBr);
986 if (rc) 985 if (rc)
987 return rc; 986 return rc;
988 987
@@ -990,13 +989,13 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
990 if (tcon->ses->server == NULL) 989 if (tcon->ses->server == NULL)
991 return -ECONNABORTED; 990 return -ECONNABORTED;
992 991
993 pSMB->AndXCommand = 0xFF; /* none */ 992 pSMB->AndXCommand = 0xFF; /* none */
994 pSMB->Fid = netfid; 993 pSMB->Fid = netfid;
995 pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF); 994 pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF);
996 if(wct == 12) 995 if(wct == 12)
997 pSMB->OffsetHigh = cpu_to_le32(lseek >> 32); 996 pSMB->OffsetHigh = cpu_to_le32(lseek >> 32);
998 else if((lseek >> 32) > 0) /* can not handle this big offset for old */ 997 else if((lseek >> 32) > 0) /* can not handle this big offset for old */
999 return -EIO; 998 return -EIO;
1000 999
1001 pSMB->Remaining = 0; 1000 pSMB->Remaining = 0;
1002 pSMB->MaxCount = cpu_to_le16(count & 0xFFFF); 1001 pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
@@ -1005,14 +1004,18 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
1005 pSMB->ByteCount = 0; /* no need to do le conversion since 0 */ 1004 pSMB->ByteCount = 0; /* no need to do le conversion since 0 */
1006 else { 1005 else {
1007 /* old style read */ 1006 /* old style read */
1008 struct smb_com_readx_req * pSMBW = 1007 struct smb_com_readx_req * pSMBW =
1009 (struct smb_com_readx_req *)pSMB; 1008 (struct smb_com_readx_req *)pSMB;
1010 pSMBW->ByteCount = 0; 1009 pSMBW->ByteCount = 0;
1011 } 1010 }
1012 1011
1013 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 1012 iov[0].iov_base = (char *)pSMB;
1014 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 1013 iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
1014 rc = SendReceive2(xid, tcon->ses, iov,
1015 1 /* num iovecs */,
1016 &resp_buf_type, 0);
1015 cifs_stats_inc(&tcon->num_reads); 1017 cifs_stats_inc(&tcon->num_reads);
1018 pSMBr = (READ_RSP *)iov[0].iov_base;
1016 if (rc) { 1019 if (rc) {
1017 cERROR(1, ("Send error in read = %d", rc)); 1020 cERROR(1, ("Send error in read = %d", rc));
1018 } else { 1021 } else {
@@ -1022,33 +1025,43 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
1022 *nbytes = data_length; 1025 *nbytes = data_length;
1023 1026
1024 /*check that DataLength would not go beyond end of SMB */ 1027 /*check that DataLength would not go beyond end of SMB */
1025 if ((data_length > CIFSMaxBufSize) 1028 if ((data_length > CIFSMaxBufSize)
1026 || (data_length > count)) { 1029 || (data_length > count)) {
1027 cFYI(1,("bad length %d for count %d",data_length,count)); 1030 cFYI(1,("bad length %d for count %d",data_length,count));
1028 rc = -EIO; 1031 rc = -EIO;
1029 *nbytes = 0; 1032 *nbytes = 0;
1030 } else { 1033 } else {
1031 pReadData = 1034 pReadData = (char *) (&pSMBr->hdr.Protocol) +
1032 (char *) (&pSMBr->hdr.Protocol) +
1033 le16_to_cpu(pSMBr->DataOffset); 1035 le16_to_cpu(pSMBr->DataOffset);
1034/* if(rc = copy_to_user(buf, pReadData, data_length)) { 1036/* if(rc = copy_to_user(buf, pReadData, data_length)) {
1035 cERROR(1,("Faulting on read rc = %d",rc)); 1037 cERROR(1,("Faulting on read rc = %d",rc));
1036 rc = -EFAULT; 1038 rc = -EFAULT;
1037 }*/ /* can not use copy_to_user when using page cache*/ 1039 }*/ /* can not use copy_to_user when using page cache*/
1038 if(*buf) 1040 if(*buf)
1039 memcpy(*buf,pReadData,data_length); 1041 memcpy(*buf,pReadData,data_length);
1040 } 1042 }
1041 } 1043 }
1042 if(*buf)
1043 cifs_buf_release(pSMB);
1044 else
1045 *buf = (char *)pSMB;
1046 1044
1047 /* Note: On -EAGAIN error only caller can retry on handle based calls 1045 cifs_small_buf_release(pSMB);
1046 if(*buf) {
1047 if(resp_buf_type == CIFS_SMALL_BUFFER)
1048 cifs_small_buf_release(iov[0].iov_base);
1049 else if(resp_buf_type == CIFS_LARGE_BUFFER)
1050 cifs_buf_release(iov[0].iov_base);
1051 } else /* return buffer to caller to free */ /* BB FIXME how do we tell caller if it is not a large buffer */ {
1052 *buf = iov[0].iov_base;
1053 if(resp_buf_type == CIFS_SMALL_BUFFER)
1054 *pbuf_type = CIFS_SMALL_BUFFER;
1055 else if(resp_buf_type == CIFS_LARGE_BUFFER)
1056 *pbuf_type = CIFS_LARGE_BUFFER;
1057 }
1058
1059 /* Note: On -EAGAIN error only caller can retry on handle based calls
1048 since file handle passed in no longer valid */ 1060 since file handle passed in no longer valid */
1049 return rc; 1061 return rc;
1050} 1062}
1051 1063
1064
1052int 1065int
1053CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, 1066CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
1054 const int netfid, const unsigned int count, 1067 const int netfid, const unsigned int count,
@@ -1155,7 +1168,6 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
1155 return rc; 1168 return rc;
1156} 1169}
1157 1170
1158#ifdef CONFIG_CIFS_EXPERIMENTAL
1159int 1171int
1160CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, 1172CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
1161 const int netfid, const unsigned int count, 1173 const int netfid, const unsigned int count,
@@ -1164,10 +1176,10 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
1164{ 1176{
1165 int rc = -EACCES; 1177 int rc = -EACCES;
1166 WRITE_REQ *pSMB = NULL; 1178 WRITE_REQ *pSMB = NULL;
1167 int bytes_returned, wct; 1179 int wct;
1168 int smb_hdr_len; 1180 int smb_hdr_len;
1181 int resp_buf_type = 0;
1169 1182
1170 /* BB removeme BB */
1171 cFYI(1,("write2 at %lld %d bytes", (long long)offset, count)); 1183 cFYI(1,("write2 at %lld %d bytes", (long long)offset, count));
1172 1184
1173 if(tcon->ses->capabilities & CAP_LARGE_FILES) 1185 if(tcon->ses->capabilities & CAP_LARGE_FILES)
@@ -1210,22 +1222,34 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
1210 pSMBW->ByteCount = cpu_to_le16(count + 5); 1222 pSMBW->ByteCount = cpu_to_le16(count + 5);
1211 } 1223 }
1212 iov[0].iov_base = pSMB; 1224 iov[0].iov_base = pSMB;
1213 iov[0].iov_len = smb_hdr_len + 4; 1225 if(wct == 14)
1226 iov[0].iov_len = smb_hdr_len + 4;
1227 else /* wct == 12 pad bigger by four bytes */
1228 iov[0].iov_len = smb_hdr_len + 8;
1229
1214 1230
1215 rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &bytes_returned, 1231 rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type,
1216 long_op); 1232 long_op);
1217 cifs_stats_inc(&tcon->num_writes); 1233 cifs_stats_inc(&tcon->num_writes);
1218 if (rc) { 1234 if (rc) {
1219 cFYI(1, ("Send error Write2 = %d", rc)); 1235 cFYI(1, ("Send error Write2 = %d", rc));
1220 *nbytes = 0; 1236 *nbytes = 0;
1237 } else if(resp_buf_type == 0) {
1238 /* presumably this can not happen, but best to be safe */
1239 rc = -EIO;
1240 *nbytes = 0;
1221 } else { 1241 } else {
1222 WRITE_RSP * pSMBr = (WRITE_RSP *)pSMB; 1242 WRITE_RSP * pSMBr = (WRITE_RSP *)iov[0].iov_base;
1223 *nbytes = le16_to_cpu(pSMBr->CountHigh); 1243 *nbytes = le16_to_cpu(pSMBr->CountHigh);
1224 *nbytes = (*nbytes) << 16; 1244 *nbytes = (*nbytes) << 16;
1225 *nbytes += le16_to_cpu(pSMBr->Count); 1245 *nbytes += le16_to_cpu(pSMBr->Count);
1226 } 1246 }
1227 1247
1228 cifs_small_buf_release(pSMB); 1248 cifs_small_buf_release(pSMB);
1249 if(resp_buf_type == CIFS_SMALL_BUFFER)
1250 cifs_small_buf_release(iov[0].iov_base);
1251 else if(resp_buf_type == CIFS_LARGE_BUFFER)
1252 cifs_buf_release(iov[0].iov_base);
1229 1253
1230 /* Note: On -EAGAIN error only caller can retry on handle based calls 1254 /* Note: On -EAGAIN error only caller can retry on handle based calls
1231 since file handle passed in no longer valid */ 1255 since file handle passed in no longer valid */
@@ -1234,8 +1258,6 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
1234} 1258}
1235 1259
1236 1260
1237#endif /* CIFS_EXPERIMENTAL */
1238
1239int 1261int
1240CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, 1262CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
1241 const __u16 smb_file_id, const __u64 len, 1263 const __u16 smb_file_id, const __u64 len,
@@ -1906,6 +1928,90 @@ querySymLinkRetry:
1906 return rc; 1928 return rc;
1907} 1929}
1908 1930
1931/* Initialize NT TRANSACT SMB into small smb request buffer.
1932 This assumes that all NT TRANSACTS that we init here have
1933 total parm and data under about 400 bytes (to fit in small cifs
1934 buffer size), which is the case so far, it easily fits. NB:
1935 Setup words themselves and ByteCount
1936 MaxSetupCount (size of returned setup area) and
1937 MaxParameterCount (returned parms size) must be set by caller */
1938static int
1939smb_init_ntransact(const __u16 sub_command, const int setup_count,
1940 const int parm_len, struct cifsTconInfo *tcon,
1941 void ** ret_buf)
1942{
1943 int rc;
1944 __u32 temp_offset;
1945 struct smb_com_ntransact_req * pSMB;
1946
1947 rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon,
1948 (void **)&pSMB);
1949 if (rc)
1950 return rc;
1951 *ret_buf = (void *)pSMB;
1952 pSMB->Reserved = 0;
1953 pSMB->TotalParameterCount = cpu_to_le32(parm_len);
1954 pSMB->TotalDataCount = 0;
1955 pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
1956 MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
1957 pSMB->ParameterCount = pSMB->TotalParameterCount;
1958 pSMB->DataCount = pSMB->TotalDataCount;
1959 temp_offset = offsetof(struct smb_com_ntransact_req, Parms) +
1960 (setup_count * 2) - 4 /* for rfc1001 length itself */;
1961 pSMB->ParameterOffset = cpu_to_le32(temp_offset);
1962 pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len);
1963 pSMB->SetupCount = setup_count; /* no need to le convert byte fields */
1964 pSMB->SubCommand = cpu_to_le16(sub_command);
1965 return 0;
1966}
1967
1968static int
1969validate_ntransact(char * buf, char ** ppparm, char ** ppdata,
1970 int * pdatalen, int * pparmlen)
1971{
1972 char * end_of_smb;
1973 __u32 data_count, data_offset, parm_count, parm_offset;
1974 struct smb_com_ntransact_rsp * pSMBr;
1975
1976 if(buf == NULL)
1977 return -EINVAL;
1978
1979 pSMBr = (struct smb_com_ntransact_rsp *)buf;
1980
1981 /* ByteCount was converted from little endian in SendReceive */
1982 end_of_smb = 2 /* sizeof byte count */ + pSMBr->ByteCount +
1983 (char *)&pSMBr->ByteCount;
1984
1985
1986 data_offset = le32_to_cpu(pSMBr->DataOffset);
1987 data_count = le32_to_cpu(pSMBr->DataCount);
1988 parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
1989 parm_count = le32_to_cpu(pSMBr->ParameterCount);
1990
1991 *ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset;
1992 *ppdata = (char *)&pSMBr->hdr.Protocol + data_offset;
1993
1994 /* should we also check that parm and data areas do not overlap? */
1995 if(*ppparm > end_of_smb) {
1996 cFYI(1,("parms start after end of smb"));
1997 return -EINVAL;
1998 } else if(parm_count + *ppparm > end_of_smb) {
1999 cFYI(1,("parm end after end of smb"));
2000 return -EINVAL;
2001 } else if(*ppdata > end_of_smb) {
2002 cFYI(1,("data starts after end of smb"));
2003 return -EINVAL;
2004 } else if(data_count + *ppdata > end_of_smb) {
2005 cFYI(1,("data %p + count %d (%p) ends after end of smb %p start %p",
2006 *ppdata, data_count, (data_count + *ppdata), end_of_smb, pSMBr)); /* BB FIXME */
2007 return -EINVAL;
2008 } else if(parm_count + data_count > pSMBr->ByteCount) {
2009 cFYI(1,("parm count and data count larger than SMB"));
2010 return -EINVAL;
2011 }
2012 return 0;
2013}
2014
1909int 2015int
1910CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon, 2016CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
1911 const unsigned char *searchName, 2017 const unsigned char *searchName,
@@ -1928,7 +2034,8 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
1928 pSMB->TotalDataCount = 0; 2034 pSMB->TotalDataCount = 0;
1929 pSMB->MaxParameterCount = cpu_to_le32(2); 2035 pSMB->MaxParameterCount = cpu_to_le32(2);
1930 /* BB find exact data count max from sess structure BB */ 2036 /* BB find exact data count max from sess structure BB */
1931 pSMB->MaxDataCount = cpu_to_le32(4000); 2037 pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
2038 MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
1932 pSMB->MaxSetupCount = 4; 2039 pSMB->MaxSetupCount = 4;
1933 pSMB->Reserved = 0; 2040 pSMB->Reserved = 0;
1934 pSMB->ParameterOffset = 0; 2041 pSMB->ParameterOffset = 0;
@@ -1955,7 +2062,9 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
1955 rc = -EIO; /* bad smb */ 2062 rc = -EIO; /* bad smb */
1956 else { 2063 else {
1957 if(data_count && (data_count < 2048)) { 2064 if(data_count && (data_count < 2048)) {
1958 char * end_of_smb = pSMBr->ByteCount + (char *)&pSMBr->ByteCount; 2065 char * end_of_smb = 2 /* sizeof byte count */ +
2066 pSMBr->ByteCount +
2067 (char *)&pSMBr->ByteCount;
1959 2068
1960 struct reparse_data * reparse_buf = (struct reparse_data *) 2069 struct reparse_data * reparse_buf = (struct reparse_data *)
1961 ((char *)&pSMBr->hdr.Protocol + data_offset); 2070 ((char *)&pSMBr->hdr.Protocol + data_offset);
@@ -2199,6 +2308,7 @@ queryAclRetry:
2199 2308
2200 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 2309 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
2201 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 2310 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
2311 cifs_stats_inc(&tcon->num_acl_get);
2202 if (rc) { 2312 if (rc) {
2203 cFYI(1, ("Send error in Query POSIX ACL = %d", rc)); 2313 cFYI(1, ("Send error in Query POSIX ACL = %d", rc));
2204 } else { 2314 } else {
@@ -2386,6 +2496,92 @@ GetExtAttrOut:
2386 2496
2387#endif /* CONFIG_POSIX */ 2497#endif /* CONFIG_POSIX */
2388 2498
2499
2500/* security id for everyone */
2501const struct cifs_sid sid_everyone = {1, 1, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0}};
2502/* group users */
2503const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {32, 545, 0, 0}};
2504
2505/* Convert CIFS ACL to POSIX form */
2506static int parse_sec_desc(struct cifs_sid * psec_desc, int acl_len)
2507{
2508 return 0;
2509}
2510
2511/* Get Security Descriptor (by handle) from remote server for a file or dir */
2512int
2513CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
2514 /* BB fix up return info */ char *acl_inf, const int buflen,
2515 const int acl_type /* ACCESS/DEFAULT not sure implication */)
2516{
2517 int rc = 0;
2518 int buf_type = 0;
2519 QUERY_SEC_DESC_REQ * pSMB;
2520 struct kvec iov[1];
2521
2522 cFYI(1, ("GetCifsACL"));
2523
2524 rc = smb_init_ntransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0,
2525 8 /* parm len */, tcon, (void **) &pSMB);
2526 if (rc)
2527 return rc;
2528
2529 pSMB->MaxParameterCount = cpu_to_le32(4);
2530 /* BB TEST with big acls that might need to be e.g. larger than 16K */
2531 pSMB->MaxSetupCount = 0;
2532 pSMB->Fid = fid; /* file handle always le */
2533 pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP |
2534 CIFS_ACL_DACL);
2535 pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */
2536 pSMB->hdr.smb_buf_length += 11;
2537 iov[0].iov_base = (char *)pSMB;
2538 iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
2539
2540 rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, 0);
2541 cifs_stats_inc(&tcon->num_acl_get);
2542 if (rc) {
2543 cFYI(1, ("Send error in QuerySecDesc = %d", rc));
2544 } else { /* decode response */
2545 struct cifs_sid * psec_desc;
2546 __le32 * parm;
2547 int parm_len;
2548 int data_len;
2549 int acl_len;
2550 struct smb_com_ntransact_rsp * pSMBr;
2551
2552/* validate_nttransact */
2553 rc = validate_ntransact(iov[0].iov_base, (char **)&parm,
2554 (char **)&psec_desc,
2555 &parm_len, &data_len);
2556
2557 if(rc)
2558 goto qsec_out;
2559 pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base;
2560
2561 cERROR(1,("smb %p parm %p data %p",pSMBr,parm,psec_desc)); /* BB removeme BB */
2562
2563 if (le32_to_cpu(pSMBr->ParameterCount) != 4) {
2564 rc = -EIO; /* bad smb */
2565 goto qsec_out;
2566 }
2567
2568/* BB check that data area is minimum length and as big as acl_len */
2569
2570 acl_len = le32_to_cpu(*(__le32 *)parm);
2571 /* BB check if(acl_len > bufsize) */
2572
2573 parse_sec_desc(psec_desc, acl_len);
2574 }
2575qsec_out:
2576 if(buf_type == CIFS_SMALL_BUFFER)
2577 cifs_small_buf_release(iov[0].iov_base);
2578 else if(buf_type == CIFS_LARGE_BUFFER)
2579 cifs_buf_release(iov[0].iov_base);
2580 cifs_small_buf_release(pSMB);
2581 return rc;
2582}
2583
2584
2389/* Legacy Query Path Information call for lookup to old servers such 2585/* Legacy Query Path Information call for lookup to old servers such
2390 as Win9x/WinME */ 2586 as Win9x/WinME */
2391int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon, 2587int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
@@ -4284,7 +4480,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
4284{ 4480{
4285 int rc = 0; 4481 int rc = 0;
4286 struct smb_com_transaction_change_notify_req * pSMB = NULL; 4482 struct smb_com_transaction_change_notify_req * pSMB = NULL;
4287 struct smb_com_transaction_change_notify_rsp * pSMBr = NULL; 4483 struct smb_com_ntransaction_change_notify_rsp * pSMBr = NULL;
4288 struct dir_notify_req *dnotify_req; 4484 struct dir_notify_req *dnotify_req;
4289 int bytes_returned; 4485 int bytes_returned;
4290 4486
@@ -4299,6 +4495,10 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
4299 pSMB->MaxParameterCount = cpu_to_le32(2); 4495 pSMB->MaxParameterCount = cpu_to_le32(2);
4300 /* BB find exact data count max from sess structure BB */ 4496 /* BB find exact data count max from sess structure BB */
4301 pSMB->MaxDataCount = 0; /* same in little endian or be */ 4497 pSMB->MaxDataCount = 0; /* same in little endian or be */
4498/* BB VERIFY verify which is correct for above BB */
4499 pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
4500 MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
4501
4302 pSMB->MaxSetupCount = 4; 4502 pSMB->MaxSetupCount = 4;
4303 pSMB->Reserved = 0; 4503 pSMB->Reserved = 0;
4304 pSMB->ParameterOffset = 0; 4504 pSMB->ParameterOffset = 0;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index c467de85761..88f60aa5205 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -76,12 +76,19 @@ struct smb_vol {
76 unsigned setuids:1; 76 unsigned setuids:1;
77 unsigned noperm:1; 77 unsigned noperm:1;
78 unsigned no_psx_acl:1; /* set if posix acl support should be disabled */ 78 unsigned no_psx_acl:1; /* set if posix acl support should be disabled */
79 unsigned cifs_acl:1;
79 unsigned no_xattr:1; /* set if xattr (EA) support should be disabled*/ 80 unsigned no_xattr:1; /* set if xattr (EA) support should be disabled*/
80 unsigned server_ino:1; /* use inode numbers from server ie UniqueId */ 81 unsigned server_ino:1; /* use inode numbers from server ie UniqueId */
81 unsigned direct_io:1; 82 unsigned direct_io:1;
82 unsigned remap:1; /* set to remap seven reserved chars in filenames */ 83 unsigned remap:1; /* set to remap seven reserved chars in filenames */
83 unsigned posix_paths:1; /* unset to not ask for posix pathnames. */ 84 unsigned posix_paths:1; /* unset to not ask for posix pathnames. */
84 unsigned sfu_emul:1; 85 unsigned sfu_emul:1;
86 unsigned krb5:1;
87 unsigned ntlm:1;
88 unsigned ntlmv2:1;
89 unsigned nullauth:1; /* attempt to authenticate with null user */
90 unsigned sign:1;
91 unsigned seal:1; /* encrypt */
85 unsigned nocase; /* request case insensitive filenames */ 92 unsigned nocase; /* request case insensitive filenames */
86 unsigned nobrl; /* disable sending byte range locks to srv */ 93 unsigned nobrl; /* disable sending byte range locks to srv */
87 unsigned int rsize; 94 unsigned int rsize;
@@ -508,7 +515,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
508 /* else length ok */ 515 /* else length ok */
509 reconnect = 0; 516 reconnect = 0;
510 517
511 if(pdu_length > MAX_CIFS_HDR_SIZE - 4) { 518 if(pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
512 isLargeBuf = TRUE; 519 isLargeBuf = TRUE;
513 memcpy(bigbuf, smallbuf, 4); 520 memcpy(bigbuf, smallbuf, 4);
514 smb_buffer = bigbuf; 521 smb_buffer = bigbuf;
@@ -777,7 +784,7 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
777 784
778 /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ 785 /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
779 vol->rw = TRUE; 786 vol->rw = TRUE;
780 787 vol->ntlm = TRUE;
781 /* default is always to request posix paths. */ 788 /* default is always to request posix paths. */
782 vol->posix_paths = 1; 789 vol->posix_paths = 1;
783 790
@@ -903,6 +910,39 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
903 printk(KERN_WARNING "CIFS: ip address too long\n"); 910 printk(KERN_WARNING "CIFS: ip address too long\n");
904 return 1; 911 return 1;
905 } 912 }
913 } else if (strnicmp(data, "sec", 3) == 0) {
914 if (!value || !*value) {
915 cERROR(1,("no security value specified"));
916 continue;
917 } else if (strnicmp(value, "krb5i", 5) == 0) {
918 vol->sign = 1;
919 vol->krb5 = 1;
920 } else if (strnicmp(value, "krb5p", 5) == 0) {
921 /* vol->seal = 1;
922 vol->krb5 = 1; */
923 cERROR(1,("Krb5 cifs privacy not supported"));
924 return 1;
925 } else if (strnicmp(value, "krb5", 4) == 0) {
926 vol->krb5 = 1;
927 } else if (strnicmp(value, "ntlmv2i", 7) == 0) {
928 vol->ntlmv2 = 1;
929 vol->sign = 1;
930 } else if (strnicmp(value, "ntlmv2", 6) == 0) {
931 vol->ntlmv2 = 1;
932 } else if (strnicmp(value, "ntlmi", 5) == 0) {
933 vol->ntlm = 1;
934 vol->sign = 1;
935 } else if (strnicmp(value, "ntlm", 4) == 0) {
936 /* ntlm is default so can be turned off too */
937 vol->ntlm = 1;
938 } else if (strnicmp(value, "nontlm", 6) == 0) {
939 vol->ntlm = 0;
940 } else if (strnicmp(value, "none", 4) == 0) {
941 vol->nullauth = 1;
942 } else {
943 cERROR(1,("bad security option: %s", value));
944 return 1;
945 }
906 } else if ((strnicmp(data, "unc", 3) == 0) 946 } else if ((strnicmp(data, "unc", 3) == 0)
907 || (strnicmp(data, "target", 6) == 0) 947 || (strnicmp(data, "target", 6) == 0)
908 || (strnicmp(data, "path", 4) == 0)) { 948 || (strnicmp(data, "path", 4) == 0)) {
@@ -1120,6 +1160,10 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
1120 vol->server_ino = 1; 1160 vol->server_ino = 1;
1121 } else if (strnicmp(data, "noserverino",9) == 0) { 1161 } else if (strnicmp(data, "noserverino",9) == 0) {
1122 vol->server_ino = 0; 1162 vol->server_ino = 0;
1163 } else if (strnicmp(data, "cifsacl",7) == 0) {
1164 vol->cifs_acl = 1;
1165 } else if (strnicmp(data, "nocifsacl", 9) == 0) {
1166 vol->cifs_acl = 0;
1123 } else if (strnicmp(data, "acl",3) == 0) { 1167 } else if (strnicmp(data, "acl",3) == 0) {
1124 vol->no_psx_acl = 0; 1168 vol->no_psx_acl = 0;
1125 } else if (strnicmp(data, "noacl",5) == 0) { 1169 } else if (strnicmp(data, "noacl",5) == 0) {
@@ -1546,7 +1590,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1546 cFYI(1, ("Username: %s ", volume_info.username)); 1590 cFYI(1, ("Username: %s ", volume_info.username));
1547 1591
1548 } else { 1592 } else {
1549 cifserror("No username specified "); 1593 cifserror("No username specified");
1550 /* In userspace mount helper we can get user name from alternate 1594 /* In userspace mount helper we can get user name from alternate
1551 locations such as env variables and files on disk */ 1595 locations such as env variables and files on disk */
1552 kfree(volume_info.UNC); 1596 kfree(volume_info.UNC);
@@ -1587,7 +1631,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1587 return -EINVAL; 1631 return -EINVAL;
1588 } else /* which servers DFS root would we conect to */ { 1632 } else /* which servers DFS root would we conect to */ {
1589 cERROR(1, 1633 cERROR(1,
1590 ("CIFS mount error: No UNC path (e.g. -o unc=//192.168.1.100/public) specified ")); 1634 ("CIFS mount error: No UNC path (e.g. -o unc=//192.168.1.100/public) specified"));
1591 kfree(volume_info.UNC); 1635 kfree(volume_info.UNC);
1592 kfree(volume_info.password); 1636 kfree(volume_info.password);
1593 FreeXid(xid); 1637 FreeXid(xid);
@@ -1626,7 +1670,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1626 1670
1627 1671
1628 if (srvTcp) { 1672 if (srvTcp) {
1629 cFYI(1, ("Existing tcp session with server found ")); 1673 cFYI(1, ("Existing tcp session with server found"));
1630 } else { /* create socket */ 1674 } else { /* create socket */
1631 if(volume_info.port) 1675 if(volume_info.port)
1632 sin_server.sin_port = htons(volume_info.port); 1676 sin_server.sin_port = htons(volume_info.port);
@@ -1689,11 +1733,11 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1689 1733
1690 if (existingCifsSes) { 1734 if (existingCifsSes) {
1691 pSesInfo = existingCifsSes; 1735 pSesInfo = existingCifsSes;
1692 cFYI(1, ("Existing smb sess found ")); 1736 cFYI(1, ("Existing smb sess found"));
1693 kfree(volume_info.password); 1737 kfree(volume_info.password);
1694 /* volume_info.UNC freed at end of function */ 1738 /* volume_info.UNC freed at end of function */
1695 } else if (!rc) { 1739 } else if (!rc) {
1696 cFYI(1, ("Existing smb sess not found ")); 1740 cFYI(1, ("Existing smb sess not found"));
1697 pSesInfo = sesInfoAlloc(); 1741 pSesInfo = sesInfoAlloc();
1698 if (pSesInfo == NULL) 1742 if (pSesInfo == NULL)
1699 rc = -ENOMEM; 1743 rc = -ENOMEM;
@@ -1751,7 +1795,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1751 cifs_sb->mnt_gid = volume_info.linux_gid; 1795 cifs_sb->mnt_gid = volume_info.linux_gid;
1752 cifs_sb->mnt_file_mode = volume_info.file_mode; 1796 cifs_sb->mnt_file_mode = volume_info.file_mode;
1753 cifs_sb->mnt_dir_mode = volume_info.dir_mode; 1797 cifs_sb->mnt_dir_mode = volume_info.dir_mode;
1754 cFYI(1,("file mode: 0x%x dir mode: 0x%x",cifs_sb->mnt_file_mode,cifs_sb->mnt_dir_mode)); 1798 cFYI(1,("file mode: 0x%x dir mode: 0x%x",
1799 cifs_sb->mnt_file_mode,cifs_sb->mnt_dir_mode));
1755 1800
1756 if(volume_info.noperm) 1801 if(volume_info.noperm)
1757 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; 1802 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
@@ -1767,6 +1812,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1767 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; 1812 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL;
1768 if(volume_info.nobrl) 1813 if(volume_info.nobrl)
1769 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; 1814 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL;
1815 if(volume_info.cifs_acl)
1816 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
1770 1817
1771 if(volume_info.direct_io) { 1818 if(volume_info.direct_io) {
1772 cFYI(1,("mounting share using direct i/o")); 1819 cFYI(1,("mounting share using direct i/o"));
@@ -1777,7 +1824,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1777 find_unc(sin_server.sin_addr.s_addr, volume_info.UNC, 1824 find_unc(sin_server.sin_addr.s_addr, volume_info.UNC,
1778 volume_info.username); 1825 volume_info.username);
1779 if (tcon) { 1826 if (tcon) {
1780 cFYI(1, ("Found match on UNC path ")); 1827 cFYI(1, ("Found match on UNC path"));
1781 /* we can have only one retry value for a connection 1828 /* we can have only one retry value for a connection
1782 to a share so for resources mounted more than once 1829 to a share so for resources mounted more than once
1783 to the same server share the last value passed in 1830 to the same server share the last value passed in
@@ -1926,7 +1973,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
1926 __u32 capabilities; 1973 __u32 capabilities;
1927 __u16 count; 1974 __u16 count;
1928 1975
1929 cFYI(1, ("In sesssetup ")); 1976 cFYI(1, ("In sesssetup"));
1930 if(ses == NULL) 1977 if(ses == NULL)
1931 return -EINVAL; 1978 return -EINVAL;
1932 user = ses->userName; 1979 user = ses->userName;
@@ -3202,9 +3249,26 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3202 3249
3203 pSMB->AndXCommand = 0xFF; 3250 pSMB->AndXCommand = 0xFF;
3204 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); 3251 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
3205 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
3206 bcc_ptr = &pSMB->Password[0]; 3252 bcc_ptr = &pSMB->Password[0];
3207 bcc_ptr++; /* skip password */ 3253 if((ses->server->secMode) & SECMODE_USER) {
3254 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
3255 bcc_ptr++; /* skip password */
3256 } else {
3257 pSMB->PasswordLength = cpu_to_le16(CIFS_SESSION_KEY_SIZE);
3258 /* BB FIXME add code to fail this if NTLMv2 or Kerberos
3259 specified as required (when that support is added to
3260 the vfs in the future) as only NTLM or the much
3261 weaker LANMAN (which we do not send) is accepted
3262 by Samba (not sure whether other servers allow
3263 NTLMv2 password here) */
3264 SMBNTencrypt(ses->password,
3265 ses->server->cryptKey,
3266 bcc_ptr);
3267
3268 bcc_ptr += CIFS_SESSION_KEY_SIZE;
3269 *bcc_ptr = 0;
3270 bcc_ptr++; /* align */
3271 }
3208 3272
3209 if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 3273 if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
3210 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 3274 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
@@ -3222,7 +3286,6 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3222 bcc_ptr += 2 * length; /* convert num of 16 bit words to bytes */ 3286 bcc_ptr += 2 * length; /* convert num of 16 bit words to bytes */
3223 bcc_ptr += 2; /* skip trailing null */ 3287 bcc_ptr += 2; /* skip trailing null */
3224 } else { /* ASCII */ 3288 } else { /* ASCII */
3225
3226 strcpy(bcc_ptr, tree); 3289 strcpy(bcc_ptr, tree);
3227 bcc_ptr += strlen(tree) + 1; 3290 bcc_ptr += strlen(tree) + 1;
3228 } 3291 }
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 32cc96cafa3..fed55e3c53d 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * vfs operations that deal with dentries 4 * vfs operations that deal with dentries
5 * 5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003 6 * Copyright (C) International Business Machines Corp., 2002,2005
7 * Author(s): Steve French (sfrench@us.ibm.com) 7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * 8 *
9 * This library is free software; you can redistribute it and/or modify 9 * This library is free software; you can redistribute it and/or modify
@@ -200,8 +200,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
200 (oplock & CIFS_CREATE_ACTION)) 200 (oplock & CIFS_CREATE_ACTION))
201 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { 201 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
202 CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode, 202 CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
203 (__u64)current->euid, 203 (__u64)current->fsuid,
204 (__u64)current->egid, 204 (__u64)current->fsgid,
205 0 /* dev */, 205 0 /* dev */,
206 cifs_sb->local_nls, 206 cifs_sb->local_nls,
207 cifs_sb->mnt_cifs_flags & 207 cifs_sb->mnt_cifs_flags &
@@ -325,7 +325,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
325 else if (pTcon->ses->capabilities & CAP_UNIX) { 325 else if (pTcon->ses->capabilities & CAP_UNIX) {
326 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { 326 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
327 rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path, 327 rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path,
328 mode,(__u64)current->euid,(__u64)current->egid, 328 mode,(__u64)current->fsuid,(__u64)current->fsgid,
329 device_number, cifs_sb->local_nls, 329 device_number, cifs_sb->local_nls,
330 cifs_sb->mnt_cifs_flags & 330 cifs_sb->mnt_cifs_flags &
331 CIFS_MOUNT_MAP_SPECIAL_CHR); 331 CIFS_MOUNT_MAP_SPECIAL_CHR);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 5ade53d7bca..77c990f0cb9 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -553,13 +553,13 @@ int cifs_closedir(struct inode *inode, struct file *file)
553 } 553 }
554 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start; 554 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
555 if (ptmp) { 555 if (ptmp) {
556 /* BB removeme BB */ cFYI(1, ("freeing smb buf in srch struct in closedir")); 556 cFYI(1, ("closedir free smb buf in srch struct"));
557 pCFileStruct->srch_inf.ntwrk_buf_start = NULL; 557 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
558 cifs_buf_release(ptmp); 558 cifs_buf_release(ptmp);
559 } 559 }
560 ptmp = pCFileStruct->search_resume_name; 560 ptmp = pCFileStruct->search_resume_name;
561 if (ptmp) { 561 if (ptmp) {
562 /* BB removeme BB */ cFYI(1, ("freeing resume name in closedir")); 562 cFYI(1, ("closedir free resume name"));
563 pCFileStruct->search_resume_name = NULL; 563 pCFileStruct->search_resume_name = NULL;
564 kfree(ptmp); 564 kfree(ptmp);
565 } 565 }
@@ -868,10 +868,9 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
868 if (rc != 0) 868 if (rc != 0)
869 break; 869 break;
870 } 870 }
871#ifdef CONFIG_CIFS_EXPERIMENTAL
872 /* BB FIXME We can not sign across two buffers yet */ 871 /* BB FIXME We can not sign across two buffers yet */
873 if((experimEnabled) && ((pTcon->ses->server->secMode & 872 if((pTcon->ses->server->secMode &
874 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0)) { 873 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0) {
875 struct kvec iov[2]; 874 struct kvec iov[2];
876 unsigned int len; 875 unsigned int len;
877 876
@@ -887,7 +886,6 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
887 iov, 1, long_op); 886 iov, 1, long_op);
888 } else 887 } else
889 /* BB FIXME fixup indentation of line below */ 888 /* BB FIXME fixup indentation of line below */
890#endif
891 rc = CIFSSMBWrite(xid, pTcon, 889 rc = CIFSSMBWrite(xid, pTcon,
892 open_file->netfid, 890 open_file->netfid,
893 min_t(const int, cifs_sb->wsize, 891 min_t(const int, cifs_sb->wsize,
@@ -1024,7 +1022,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1024 return rc; 1022 return rc;
1025} 1023}
1026 1024
1027#ifdef CONFIG_CIFS_EXPERIMENTAL
1028static int cifs_writepages(struct address_space *mapping, 1025static int cifs_writepages(struct address_space *mapping,
1029 struct writeback_control *wbc) 1026 struct writeback_control *wbc)
1030{ 1027{
@@ -1227,7 +1224,6 @@ retry:
1227 1224
1228 return rc; 1225 return rc;
1229} 1226}
1230#endif
1231 1227
1232static int cifs_writepage(struct page* page, struct writeback_control *wbc) 1228static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1233{ 1229{
@@ -1426,6 +1422,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
1426 rc = -EAGAIN; 1422 rc = -EAGAIN;
1427 smb_read_data = NULL; 1423 smb_read_data = NULL;
1428 while (rc == -EAGAIN) { 1424 while (rc == -EAGAIN) {
1425 int buf_type = CIFS_NO_BUFFER;
1429 if ((open_file->invalidHandle) && 1426 if ((open_file->invalidHandle) &&
1430 (!open_file->closePend)) { 1427 (!open_file->closePend)) {
1431 rc = cifs_reopen_file(file->f_dentry->d_inode, 1428 rc = cifs_reopen_file(file->f_dentry->d_inode,
@@ -1434,20 +1431,22 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
1434 break; 1431 break;
1435 } 1432 }
1436 rc = CIFSSMBRead(xid, pTcon, 1433 rc = CIFSSMBRead(xid, pTcon,
1437 open_file->netfid, 1434 open_file->netfid,
1438 current_read_size, *poffset, 1435 current_read_size, *poffset,
1439 &bytes_read, &smb_read_data); 1436 &bytes_read, &smb_read_data,
1437 &buf_type);
1440 pSMBr = (struct smb_com_read_rsp *)smb_read_data; 1438 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1441 if (copy_to_user(current_offset, 1439 if (copy_to_user(current_offset,
1442 smb_read_data + 4 /* RFC1001 hdr */ 1440 smb_read_data + 4 /* RFC1001 hdr */
1443 + le16_to_cpu(pSMBr->DataOffset), 1441 + le16_to_cpu(pSMBr->DataOffset),
1444 bytes_read)) { 1442 bytes_read)) {
1445 rc = -EFAULT; 1443 rc = -EFAULT;
1446 FreeXid(xid); 1444 }
1447 return rc;
1448 }
1449 if (smb_read_data) { 1445 if (smb_read_data) {
1450 cifs_buf_release(smb_read_data); 1446 if(buf_type == CIFS_SMALL_BUFFER)
1447 cifs_small_buf_release(smb_read_data);
1448 else if(buf_type == CIFS_LARGE_BUFFER)
1449 cifs_buf_release(smb_read_data);
1451 smb_read_data = NULL; 1450 smb_read_data = NULL;
1452 } 1451 }
1453 } 1452 }
@@ -1480,6 +1479,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1480 int xid; 1479 int xid;
1481 char *current_offset; 1480 char *current_offset;
1482 struct cifsFileInfo *open_file; 1481 struct cifsFileInfo *open_file;
1482 int buf_type = CIFS_NO_BUFFER;
1483 1483
1484 xid = GetXid(); 1484 xid = GetXid();
1485 cifs_sb = CIFS_SB(file->f_dentry->d_sb); 1485 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
@@ -1516,9 +1516,10 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1516 break; 1516 break;
1517 } 1517 }
1518 rc = CIFSSMBRead(xid, pTcon, 1518 rc = CIFSSMBRead(xid, pTcon,
1519 open_file->netfid, 1519 open_file->netfid,
1520 current_read_size, *poffset, 1520 current_read_size, *poffset,
1521 &bytes_read, &current_offset); 1521 &bytes_read, &current_offset,
1522 &buf_type);
1522 } 1523 }
1523 if (rc || (bytes_read == 0)) { 1524 if (rc || (bytes_read == 0)) {
1524 if (total_read) { 1525 if (total_read) {
@@ -1616,6 +1617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
1616 struct smb_com_read_rsp *pSMBr; 1617 struct smb_com_read_rsp *pSMBr;
1617 struct pagevec lru_pvec; 1618 struct pagevec lru_pvec;
1618 struct cifsFileInfo *open_file; 1619 struct cifsFileInfo *open_file;
1620 int buf_type = CIFS_NO_BUFFER;
1619 1621
1620 xid = GetXid(); 1622 xid = GetXid();
1621 if (file->private_data == NULL) { 1623 if (file->private_data == NULL) {
@@ -1672,14 +1674,17 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
1672 } 1674 }
1673 1675
1674 rc = CIFSSMBRead(xid, pTcon, 1676 rc = CIFSSMBRead(xid, pTcon,
1675 open_file->netfid, 1677 open_file->netfid,
1676 read_size, offset, 1678 read_size, offset,
1677 &bytes_read, &smb_read_data); 1679 &bytes_read, &smb_read_data,
1678 1680 &buf_type);
1679 /* BB more RC checks ? */ 1681 /* BB more RC checks ? */
1680 if (rc== -EAGAIN) { 1682 if (rc== -EAGAIN) {
1681 if (smb_read_data) { 1683 if (smb_read_data) {
1682 cifs_buf_release(smb_read_data); 1684 if(buf_type == CIFS_SMALL_BUFFER)
1685 cifs_small_buf_release(smb_read_data);
1686 else if(buf_type == CIFS_LARGE_BUFFER)
1687 cifs_buf_release(smb_read_data);
1683 smb_read_data = NULL; 1688 smb_read_data = NULL;
1684 } 1689 }
1685 } 1690 }
@@ -1736,7 +1741,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
1736 break; 1741 break;
1737 } 1742 }
1738 if (smb_read_data) { 1743 if (smb_read_data) {
1739 cifs_buf_release(smb_read_data); 1744 if(buf_type == CIFS_SMALL_BUFFER)
1745 cifs_small_buf_release(smb_read_data);
1746 else if(buf_type == CIFS_LARGE_BUFFER)
1747 cifs_buf_release(smb_read_data);
1740 smb_read_data = NULL; 1748 smb_read_data = NULL;
1741 } 1749 }
1742 bytes_read = 0; 1750 bytes_read = 0;
@@ -1746,7 +1754,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
1746 1754
1747/* need to free smb_read_data buf before exit */ 1755/* need to free smb_read_data buf before exit */
1748 if (smb_read_data) { 1756 if (smb_read_data) {
1749 cifs_buf_release(smb_read_data); 1757 if(buf_type == CIFS_SMALL_BUFFER)
1758 cifs_small_buf_release(smb_read_data);
1759 else if(buf_type == CIFS_LARGE_BUFFER)
1760 cifs_buf_release(smb_read_data);
1750 smb_read_data = NULL; 1761 smb_read_data = NULL;
1751 } 1762 }
1752 1763
@@ -1825,10 +1836,20 @@ int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1825 open_file = find_writable_file(cifsInode); 1836 open_file = find_writable_file(cifsInode);
1826 1837
1827 if(open_file) { 1838 if(open_file) {
1839 struct cifs_sb_info *cifs_sb;
1840
1828 /* there is not actually a write pending so let 1841 /* there is not actually a write pending so let
1829 this handle go free and allow it to 1842 this handle go free and allow it to
1830 be closable if needed */ 1843 be closable if needed */
1831 atomic_dec(&open_file->wrtPending); 1844 atomic_dec(&open_file->wrtPending);
1845
1846 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1847 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1848 /* since no page cache to corrupt on directio
1849 we can change size safely */
1850 return 1;
1851 }
1852
1832 return 0; 1853 return 0;
1833 } else 1854 } else
1834 return 1; 1855 return 1;
@@ -1873,9 +1894,7 @@ struct address_space_operations cifs_addr_ops = {
1873 .readpage = cifs_readpage, 1894 .readpage = cifs_readpage,
1874 .readpages = cifs_readpages, 1895 .readpages = cifs_readpages,
1875 .writepage = cifs_writepage, 1896 .writepage = cifs_writepage,
1876#ifdef CONFIG_CIFS_EXPERIMENTAL
1877 .writepages = cifs_writepages, 1897 .writepages = cifs_writepages,
1878#endif
1879 .prepare_write = cifs_prepare_write, 1898 .prepare_write = cifs_prepare_write,
1880 .commit_write = cifs_commit_write, 1899 .commit_write = cifs_commit_write,
1881 .set_page_dirty = __set_page_dirty_nobuffers, 1900 .set_page_dirty = __set_page_dirty_nobuffers,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 3ebce9430f4..59359911f48 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -229,11 +229,12 @@ static int decode_sfu_inode(struct inode * inode, __u64 size,
229 cifs_sb->mnt_cifs_flags & 229 cifs_sb->mnt_cifs_flags &
230 CIFS_MOUNT_MAP_SPECIAL_CHR); 230 CIFS_MOUNT_MAP_SPECIAL_CHR);
231 if (rc==0) { 231 if (rc==0) {
232 int buf_type = CIFS_NO_BUFFER;
232 /* Read header */ 233 /* Read header */
233 rc = CIFSSMBRead(xid, pTcon, 234 rc = CIFSSMBRead(xid, pTcon,
234 netfid, 235 netfid,
235 24 /* length */, 0 /* offset */, 236 24 /* length */, 0 /* offset */,
236 &bytes_read, &pbuf); 237 &bytes_read, &pbuf, &buf_type);
237 if((rc == 0) && (bytes_read >= 8)) { 238 if((rc == 0) && (bytes_read >= 8)) {
238 if(memcmp("IntxBLK", pbuf, 8) == 0) { 239 if(memcmp("IntxBLK", pbuf, 8) == 0) {
239 cFYI(1,("Block device")); 240 cFYI(1,("Block device"));
@@ -267,7 +268,7 @@ static int decode_sfu_inode(struct inode * inode, __u64 size,
267 } else { 268 } else {
268 inode->i_mode |= S_IFREG; /* then it is a file */ 269 inode->i_mode |= S_IFREG; /* then it is a file */
269 rc = -EOPNOTSUPP; /* or some unknown SFU type */ 270 rc = -EOPNOTSUPP; /* or some unknown SFU type */
270 } 271 }
271 CIFSSMBClose(xid, pTcon, netfid); 272 CIFSSMBClose(xid, pTcon, netfid);
272 } 273 }
273 return rc; 274 return rc;
@@ -750,8 +751,8 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
750 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { 751 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
751 CIFSSMBUnixSetPerms(xid, pTcon, full_path, 752 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
752 mode, 753 mode,
753 (__u64)current->euid, 754 (__u64)current->fsuid,
754 (__u64)current->egid, 755 (__u64)current->fsgid,
755 0 /* dev_t */, 756 0 /* dev_t */,
756 cifs_sb->local_nls, 757 cifs_sb->local_nls,
757 cifs_sb->mnt_cifs_flags & 758 cifs_sb->mnt_cifs_flags &
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 94baf6c8ecb..812c6bb0fe3 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/cifs/misc.c 2 * fs/cifs/misc.c
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2002,2004 4 * Copyright (C) International Business Machines Corp., 2002,2005
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
@@ -161,6 +161,9 @@ cifs_buf_get(void)
161 if (ret_buf) { 161 if (ret_buf) {
162 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3); 162 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
163 atomic_inc(&bufAllocCount); 163 atomic_inc(&bufAllocCount);
164#ifdef CONFIG_CIFS_STATS2
165 atomic_inc(&totBufAllocCount);
166#endif /* CONFIG_CIFS_STATS2 */
164 } 167 }
165 168
166 return ret_buf; 169 return ret_buf;
@@ -195,6 +198,10 @@ cifs_small_buf_get(void)
195 /* No need to clear memory here, cleared in header assemble */ 198 /* No need to clear memory here, cleared in header assemble */
196 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ 199 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
197 atomic_inc(&smBufAllocCount); 200 atomic_inc(&smBufAllocCount);
201#ifdef CONFIG_CIFS_STATS2
202 atomic_inc(&totSmBufAllocCount);
203#endif /* CONFIG_CIFS_STATS2 */
204
198 } 205 }
199 return ret_buf; 206 return ret_buf;
200} 207}
@@ -292,7 +299,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
292 struct cifsSesInfo * ses; 299 struct cifsSesInfo * ses;
293 char *temp = (char *) buffer; 300 char *temp = (char *) buffer;
294 301
295 memset(temp,0,MAX_CIFS_HDR_SIZE); 302 memset(temp,0,256); /* bigger than MAX_CIFS_HDR_SIZE */
296 303
297 buffer->smb_buf_length = 304 buffer->smb_buf_length =
298 (2 * word_count) + sizeof (struct smb_hdr) - 305 (2 * word_count) + sizeof (struct smb_hdr) -
@@ -348,12 +355,12 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
348 /* BB Add support for establishing new tCon and SMB Session */ 355 /* BB Add support for establishing new tCon and SMB Session */
349 /* with userid/password pairs found on the smb session */ 356 /* with userid/password pairs found on the smb session */
350 /* for other target tcp/ip addresses BB */ 357 /* for other target tcp/ip addresses BB */
351 if(current->uid != treeCon->ses->linux_uid) { 358 if(current->fsuid != treeCon->ses->linux_uid) {
352 cFYI(1,("Multiuser mode and UID did not match tcon uid ")); 359 cFYI(1,("Multiuser mode and UID did not match tcon uid"));
353 read_lock(&GlobalSMBSeslock); 360 read_lock(&GlobalSMBSeslock);
354 list_for_each(temp_item, &GlobalSMBSessionList) { 361 list_for_each(temp_item, &GlobalSMBSessionList) {
355 ses = list_entry(temp_item, struct cifsSesInfo, cifsSessionList); 362 ses = list_entry(temp_item, struct cifsSesInfo, cifsSessionList);
356 if(ses->linux_uid == current->uid) { 363 if(ses->linux_uid == current->fsuid) {
357 if(ses->server == treeCon->ses->server) { 364 if(ses->server == treeCon->ses->server) {
358 cFYI(1,("found matching uid substitute right smb_uid")); 365 cFYI(1,("found matching uid substitute right smb_uid"));
359 buffer->Uid = ses->Suid; 366 buffer->Uid = ses->Suid;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 9bdaaecae36..288cc048d37 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -214,8 +214,7 @@ static void fill_in_inode(struct inode *tmp_inode,
214 tmp_inode->i_fop = &cifs_file_nobrl_ops; 214 tmp_inode->i_fop = &cifs_file_nobrl_ops;
215 else 215 else
216 tmp_inode->i_fop = &cifs_file_ops; 216 tmp_inode->i_fop = &cifs_file_ops;
217 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 217
218 tmp_inode->i_fop->lock = NULL;
219 tmp_inode->i_data.a_ops = &cifs_addr_ops; 218 tmp_inode->i_data.a_ops = &cifs_addr_ops;
220 if((cifs_sb->tcon) && (cifs_sb->tcon->ses) && 219 if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
221 (cifs_sb->tcon->ses->server->maxBuf < 220 (cifs_sb->tcon->ses->server->maxBuf <
@@ -327,12 +326,18 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
327 if (S_ISREG(tmp_inode->i_mode)) { 326 if (S_ISREG(tmp_inode->i_mode)) {
328 cFYI(1, ("File inode")); 327 cFYI(1, ("File inode"));
329 tmp_inode->i_op = &cifs_file_inode_ops; 328 tmp_inode->i_op = &cifs_file_inode_ops;
330 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) 329
331 tmp_inode->i_fop = &cifs_file_direct_ops; 330 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
331 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
332 tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
333 else
334 tmp_inode->i_fop = &cifs_file_direct_ops;
335
336 } else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
337 tmp_inode->i_fop = &cifs_file_nobrl_ops;
332 else 338 else
333 tmp_inode->i_fop = &cifs_file_ops; 339 tmp_inode->i_fop = &cifs_file_ops;
334 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 340
335 tmp_inode->i_fop->lock = NULL;
336 tmp_inode->i_data.a_ops = &cifs_addr_ops; 341 tmp_inode->i_data.a_ops = &cifs_addr_ops;
337 if((cifs_sb->tcon) && (cifs_sb->tcon->ses) && 342 if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
338 (cifs_sb->tcon->ses->server->maxBuf < 343 (cifs_sb->tcon->ses->server->maxBuf <
diff --git a/fs/cifs/rfc1002pdu.h b/fs/cifs/rfc1002pdu.h
index 9222033cad8..aede606132a 100644
--- a/fs/cifs/rfc1002pdu.h
+++ b/fs/cifs/rfc1002pdu.h
@@ -24,11 +24,11 @@
24/* NB: unlike smb/cifs packets, the RFC1002 structures are big endian */ 24/* NB: unlike smb/cifs packets, the RFC1002 structures are big endian */
25 25
26 /* RFC 1002 session packet types */ 26 /* RFC 1002 session packet types */
27#define RFC1002_SESSION_MESASAGE 0x00 27#define RFC1002_SESSION_MESSAGE 0x00
28#define RFC1002_SESSION_REQUEST 0x81 28#define RFC1002_SESSION_REQUEST 0x81
29#define RFC1002_POSITIVE_SESSION_RESPONSE 0x82 29#define RFC1002_POSITIVE_SESSION_RESPONSE 0x82
30#define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83 30#define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83
31#define RFC1002_RETARGET_SESSION_RESPONSE 0x83 31#define RFC1002_RETARGET_SESSION_RESPONSE 0x84
32#define RFC1002_SESSION_KEEP_ALIVE 0x85 32#define RFC1002_SESSION_KEEP_ALIVE 0x85
33 33
34 /* RFC 1002 flags (only one defined */ 34 /* RFC 1002 flags (only one defined */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index f8871196098..7b98792150e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -206,7 +206,6 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
206 return rc; 206 return rc;
207} 207}
208 208
209#ifdef CONFIG_CIFS_EXPERIMENTAL
210static int 209static int
211smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec, 210smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
212 struct sockaddr *sin) 211 struct sockaddr *sin)
@@ -299,7 +298,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
299 298
300int 299int
301SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, 300SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
302 struct kvec *iov, int n_vec, int *pbytes_returned, 301 struct kvec *iov, int n_vec, int * pRespBufType /* ret */,
303 const int long_op) 302 const int long_op)
304{ 303{
305 int rc = 0; 304 int rc = 0;
@@ -307,6 +306,8 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
307 unsigned long timeout; 306 unsigned long timeout;
308 struct mid_q_entry *midQ; 307 struct mid_q_entry *midQ;
309 struct smb_hdr *in_buf = iov[0].iov_base; 308 struct smb_hdr *in_buf = iov[0].iov_base;
309
310 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
310 311
311 if (ses == NULL) { 312 if (ses == NULL) {
312 cERROR(1,("Null smb session")); 313 cERROR(1,("Null smb session"));
@@ -392,8 +393,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
392 return -ENOMEM; 393 return -ENOMEM;
393 } 394 }
394 395
395/* BB FIXME */ 396 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
396/* rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number); */
397 397
398 midQ->midState = MID_REQUEST_SUBMITTED; 398 midQ->midState = MID_REQUEST_SUBMITTED;
399#ifdef CONFIG_CIFS_STATS2 399#ifdef CONFIG_CIFS_STATS2
@@ -489,21 +489,23 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
489 receive_len, xid)); 489 receive_len, xid));
490 rc = -EIO; 490 rc = -EIO;
491 } else { /* rcvd frame is ok */ 491 } else { /* rcvd frame is ok */
492
493 if (midQ->resp_buf && 492 if (midQ->resp_buf &&
494 (midQ->midState == MID_RESPONSE_RECEIVED)) { 493 (midQ->midState == MID_RESPONSE_RECEIVED)) {
495 in_buf->smb_buf_length = receive_len;
496 /* BB verify that length would not overrun small buf */
497 memcpy((char *)in_buf + 4,
498 (char *)midQ->resp_buf + 4,
499 receive_len);
500 494
501 dump_smb(in_buf, 80); 495 iov[0].iov_base = (char *)midQ->resp_buf;
496 if(midQ->largeBuf)
497 *pRespBufType = CIFS_LARGE_BUFFER;
498 else
499 *pRespBufType = CIFS_SMALL_BUFFER;
500 iov[0].iov_len = receive_len + 4;
501 iov[1].iov_len = 0;
502
503 dump_smb(midQ->resp_buf, 80);
502 /* convert the length into a more usable form */ 504 /* convert the length into a more usable form */
503 if((receive_len > 24) && 505 if((receive_len > 24) &&
504 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 506 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
505 SECMODE_SIGN_ENABLED))) { 507 SECMODE_SIGN_ENABLED))) {
506 rc = cifs_verify_signature(in_buf, 508 rc = cifs_verify_signature(midQ->resp_buf,
507 ses->server->mac_signing_key, 509 ses->server->mac_signing_key,
508 midQ->sequence_number+1); 510 midQ->sequence_number+1);
509 if(rc) { 511 if(rc) {
@@ -512,18 +514,19 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
512 } 514 }
513 } 515 }
514 516
515 *pbytes_returned = in_buf->smb_buf_length;
516
517 /* BB special case reconnect tid and uid here? */ 517 /* BB special case reconnect tid and uid here? */
518 /* BB special case Errbadpassword and pwdexpired here */ 518 /* BB special case Errbadpassword and pwdexpired here */
519 rc = map_smb_to_linux_error(in_buf); 519 rc = map_smb_to_linux_error(midQ->resp_buf);
520 520
521 /* convert ByteCount if necessary */ 521 /* convert ByteCount if necessary */
522 if (receive_len >= 522 if (receive_len >=
523 sizeof (struct smb_hdr) - 523 sizeof (struct smb_hdr) -
524 4 /* do not count RFC1001 header */ + 524 4 /* do not count RFC1001 header */ +
525 (2 * in_buf->WordCount) + 2 /* bcc */ ) 525 (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ )
526 BCC(in_buf) = le16_to_cpu(BCC_LE(in_buf)); 526 BCC(midQ->resp_buf) =
527 le16_to_cpu(BCC_LE(midQ->resp_buf));
528 midQ->resp_buf = NULL; /* mark it so will not be freed
529 by DeleteMidQEntry */
527 } else { 530 } else {
528 rc = -EIO; 531 rc = -EIO;
529 cFYI(1,("Bad MID state?")); 532 cFYI(1,("Bad MID state?"));
@@ -549,7 +552,6 @@ out_unlock2:
549 552
550 return rc; 553 return rc;
551} 554}
552#endif /* CIFS_EXPERIMENTAL */
553 555
554int 556int
555SendReceive(const unsigned int xid, struct cifsSesInfo *ses, 557SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
@@ -790,7 +792,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
790 BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); 792 BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
791 } else { 793 } else {
792 rc = -EIO; 794 rc = -EIO;
793 cERROR(1,("Bad MID state? ")); 795 cERROR(1,("Bad MID state?"));
794 } 796 }
795 } 797 }
796cifs_no_response_exit: 798cifs_no_response_exit:
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index f375f87c7db..777e3363c2a 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -254,7 +254,8 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
254 rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value, 254 rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
255 buf_size, cifs_sb->local_nls, 255 buf_size, cifs_sb->local_nls,
256 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 256 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
257 } else if(strncmp(ea_name,POSIX_ACL_XATTR_ACCESS,strlen(POSIX_ACL_XATTR_ACCESS)) == 0) { 257 } else if(strncmp(ea_name,POSIX_ACL_XATTR_ACCESS,
258 strlen(POSIX_ACL_XATTR_ACCESS)) == 0) {
258#ifdef CONFIG_CIFS_POSIX 259#ifdef CONFIG_CIFS_POSIX
259 if(sb->s_flags & MS_POSIXACL) 260 if(sb->s_flags & MS_POSIXACL)
260 rc = CIFSSMBGetPosixACL(xid, pTcon, full_path, 261 rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
@@ -262,10 +263,27 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
262 cifs_sb->local_nls, 263 cifs_sb->local_nls,
263 cifs_sb->mnt_cifs_flags & 264 cifs_sb->mnt_cifs_flags &
264 CIFS_MOUNT_MAP_SPECIAL_CHR); 265 CIFS_MOUNT_MAP_SPECIAL_CHR);
266/* else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
267 __u16 fid;
268 int oplock = FALSE;
269 rc = CIFSSMBOpen(xid, pTcon, full_path,
270 FILE_OPEN, GENERIC_READ, 0, &fid,
271 &oplock, NULL, cifs_sb->local_nls,
272 cifs_sb->mnt_cifs_flags &
273 CIFS_MOUNT_MAP_SPECIAL_CHR);
274 if(rc == 0) {
275 rc = CIFSSMBGetCIFSACL(xid, pTcon, fid,
276 ea_value, buf_size,
277 ACL_TYPE_ACCESS);
278 CIFSSMBClose(xid, pTcon, fid)
279 }
280 } */ /* BB enable after fixing up return data */
281
265#else 282#else
266 cFYI(1,("query POSIX ACL not supported yet")); 283 cFYI(1,("query POSIX ACL not supported yet"));
267#endif /* CONFIG_CIFS_POSIX */ 284#endif /* CONFIG_CIFS_POSIX */
268 } else if(strncmp(ea_name,POSIX_ACL_XATTR_DEFAULT,strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) { 285 } else if(strncmp(ea_name,POSIX_ACL_XATTR_DEFAULT,
286 strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
269#ifdef CONFIG_CIFS_POSIX 287#ifdef CONFIG_CIFS_POSIX
270 if(sb->s_flags & MS_POSIXACL) 288 if(sb->s_flags & MS_POSIXACL)
271 rc = CIFSSMBGetPosixACL(xid, pTcon, full_path, 289 rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
diff --git a/fs/compat.c b/fs/compat.c
index 2468ac1df2f..ff0bafcff72 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -53,6 +53,8 @@
53#include <asm/mmu_context.h> 53#include <asm/mmu_context.h>
54#include <asm/ioctls.h> 54#include <asm/ioctls.h>
55 55
56extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
57
56/* 58/*
57 * Not all architectures have sys_utime, so implement this in terms 59 * Not all architectures have sys_utime, so implement this in terms
58 * of sys_utimes. 60 * of sys_utimes.
@@ -68,10 +70,10 @@ asmlinkage long compat_sys_utime(char __user *filename, struct compat_utimbuf __
68 tv[0].tv_usec = 0; 70 tv[0].tv_usec = 0;
69 tv[1].tv_usec = 0; 71 tv[1].tv_usec = 0;
70 } 72 }
71 return do_utimes(filename, t ? tv : NULL); 73 return do_utimes(AT_FDCWD, filename, t ? tv : NULL);
72} 74}
73 75
74asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval __user *t) 76asmlinkage long compat_sys_futimesat(int dfd, char __user *filename, struct compat_timeval __user *t)
75{ 77{
76 struct timeval tv[2]; 78 struct timeval tv[2];
77 79
@@ -82,14 +84,19 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
82 get_user(tv[1].tv_usec, &t[1].tv_usec)) 84 get_user(tv[1].tv_usec, &t[1].tv_usec))
83 return -EFAULT; 85 return -EFAULT;
84 } 86 }
85 return do_utimes(filename, t ? tv : NULL); 87 return do_utimes(dfd, filename, t ? tv : NULL);
88}
89
90asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval __user *t)
91{
92 return compat_sys_futimesat(AT_FDCWD, filename, t);
86} 93}
87 94
88asmlinkage long compat_sys_newstat(char __user * filename, 95asmlinkage long compat_sys_newstat(char __user * filename,
89 struct compat_stat __user *statbuf) 96 struct compat_stat __user *statbuf)
90{ 97{
91 struct kstat stat; 98 struct kstat stat;
92 int error = vfs_stat(filename, &stat); 99 int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
93 100
94 if (!error) 101 if (!error)
95 error = cp_compat_stat(&stat, statbuf); 102 error = cp_compat_stat(&stat, statbuf);
@@ -100,10 +107,31 @@ asmlinkage long compat_sys_newlstat(char __user * filename,
100 struct compat_stat __user *statbuf) 107 struct compat_stat __user *statbuf)
101{ 108{
102 struct kstat stat; 109 struct kstat stat;
103 int error = vfs_lstat(filename, &stat); 110 int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
111
112 if (!error)
113 error = cp_compat_stat(&stat, statbuf);
114 return error;
115}
116
117asmlinkage long compat_sys_newfstatat(int dfd, char __user *filename,
118 struct compat_stat __user *statbuf, int flag)
119{
120 struct kstat stat;
121 int error = -EINVAL;
122
123 if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
124 goto out;
125
126 if (flag & AT_SYMLINK_NOFOLLOW)
127 error = vfs_lstat_fd(dfd, filename, &stat);
128 else
129 error = vfs_stat_fd(dfd, filename, &stat);
104 130
105 if (!error) 131 if (!error)
106 error = cp_compat_stat(&stat, statbuf); 132 error = cp_compat_stat(&stat, statbuf);
133
134out:
107 return error; 135 return error;
108} 136}
109 137
@@ -1290,7 +1318,17 @@ out:
1290asmlinkage long 1318asmlinkage long
1291compat_sys_open(const char __user *filename, int flags, int mode) 1319compat_sys_open(const char __user *filename, int flags, int mode)
1292{ 1320{
1293 return do_sys_open(filename, flags, mode); 1321 return do_sys_open(AT_FDCWD, filename, flags, mode);
1322}
1323
1324/*
1325 * Exactly like fs/open.c:sys_openat(), except that it doesn't set the
1326 * O_LARGEFILE flag.
1327 */
1328asmlinkage long
1329compat_sys_openat(int dfd, const char __user *filename, int flags, int mode)
1330{
1331 return do_sys_open(dfd, filename, flags, mode);
1294} 1332}
1295 1333
1296/* 1334/*
@@ -1621,36 +1659,14 @@ static void select_bits_free(void *bits, int size)
1621#define MAX_SELECT_SECONDS \ 1659#define MAX_SELECT_SECONDS \
1622 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) 1660 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
1623 1661
1624asmlinkage long 1662int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1625compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, 1663 compat_ulong_t __user *outp, compat_ulong_t __user *exp, s64 *timeout)
1626 compat_ulong_t __user *exp, struct compat_timeval __user *tvp)
1627{ 1664{
1628 fd_set_bits fds; 1665 fd_set_bits fds;
1629 char *bits; 1666 char *bits;
1630 long timeout;
1631 int size, max_fdset, ret = -EINVAL; 1667 int size, max_fdset, ret = -EINVAL;
1632 struct fdtable *fdt; 1668 struct fdtable *fdt;
1633 1669
1634 timeout = MAX_SCHEDULE_TIMEOUT;
1635 if (tvp) {
1636 time_t sec, usec;
1637
1638 if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
1639 || __get_user(sec, &tvp->tv_sec)
1640 || __get_user(usec, &tvp->tv_usec)) {
1641 ret = -EFAULT;
1642 goto out_nofds;
1643 }
1644
1645 if (sec < 0 || usec < 0)
1646 goto out_nofds;
1647
1648 if ((unsigned long) sec < MAX_SELECT_SECONDS) {
1649 timeout = ROUND_UP(usec, 1000000/HZ);
1650 timeout += sec * (unsigned long) HZ;
1651 }
1652 }
1653
1654 if (n < 0) 1670 if (n < 0)
1655 goto out_nofds; 1671 goto out_nofds;
1656 1672
@@ -1687,19 +1703,7 @@ compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp
1687 zero_fd_set(n, fds.res_out); 1703 zero_fd_set(n, fds.res_out);
1688 zero_fd_set(n, fds.res_ex); 1704 zero_fd_set(n, fds.res_ex);
1689 1705
1690 ret = do_select(n, &fds, &timeout); 1706 ret = do_select(n, &fds, timeout);
1691
1692 if (tvp && !(current->personality & STICKY_TIMEOUTS)) {
1693 time_t sec = 0, usec = 0;
1694 if (timeout) {
1695 sec = timeout / HZ;
1696 usec = timeout % HZ;
1697 usec *= (1000000/HZ);
1698 }
1699 if (put_user(sec, &tvp->tv_sec) ||
1700 put_user(usec, &tvp->tv_usec))
1701 ret = -EFAULT;
1702 }
1703 1707
1704 if (ret < 0) 1708 if (ret < 0)
1705 goto out; 1709 goto out;
@@ -1720,6 +1724,224 @@ out_nofds:
1720 return ret; 1724 return ret;
1721} 1725}
1722 1726
1727asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
1728 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1729 struct compat_timeval __user *tvp)
1730{
1731 s64 timeout = -1;
1732 struct compat_timeval tv;
1733 int ret;
1734
1735 if (tvp) {
1736 if (copy_from_user(&tv, tvp, sizeof(tv)))
1737 return -EFAULT;
1738
1739 if (tv.tv_sec < 0 || tv.tv_usec < 0)
1740 return -EINVAL;
1741
1742 /* Cast to u64 to make GCC stop complaining */
1743 if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS)
1744 timeout = -1; /* infinite */
1745 else {
1746 timeout = ROUND_UP(tv.tv_usec, 1000000/HZ);
1747 timeout += tv.tv_sec * HZ;
1748 }
1749 }
1750
1751 ret = compat_core_sys_select(n, inp, outp, exp, &timeout);
1752
1753 if (tvp) {
1754 if (current->personality & STICKY_TIMEOUTS)
1755 goto sticky;
1756 tv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
1757 tv.tv_sec = timeout;
1758 if (copy_to_user(tvp, &tv, sizeof(tv))) {
1759sticky:
1760 /*
1761 * If an application puts its timeval in read-only
1762 * memory, we don't want the Linux-specific update to
1763 * the timeval to cause a fault after the select has
1764 * completed successfully. However, because we're not
1765 * updating the timeval, we can't restart the system
1766 * call.
1767 */
1768 if (ret == -ERESTARTNOHAND)
1769 ret = -EINTR;
1770 }
1771 }
1772
1773 return ret;
1774}
1775
1776#ifdef TIF_RESTORE_SIGMASK
1777asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
1778 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1779 struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
1780 compat_size_t sigsetsize)
1781{
1782 compat_sigset_t ss32;
1783 sigset_t ksigmask, sigsaved;
1784 long timeout = MAX_SCHEDULE_TIMEOUT;
1785 struct compat_timespec ts;
1786 int ret;
1787
1788 if (tsp) {
1789 if (copy_from_user(&ts, tsp, sizeof(ts)))
1790 return -EFAULT;
1791
1792 if (ts.tv_sec < 0 || ts.tv_nsec < 0)
1793 return -EINVAL;
1794 }
1795
1796 if (sigmask) {
1797 if (sigsetsize != sizeof(compat_sigset_t))
1798 return -EINVAL;
1799 if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
1800 return -EFAULT;
1801 sigset_from_compat(&ksigmask, &ss32);
1802
1803 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1804 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1805 }
1806
1807 do {
1808 if (tsp) {
1809 if ((unsigned long)ts.tv_sec < MAX_SELECT_SECONDS) {
1810 timeout = ROUND_UP(ts.tv_nsec, 1000000000/HZ);
1811 timeout += ts.tv_sec * (unsigned long)HZ;
1812 ts.tv_sec = 0;
1813 ts.tv_nsec = 0;
1814 } else {
1815 ts.tv_sec -= MAX_SELECT_SECONDS;
1816 timeout = MAX_SELECT_SECONDS * HZ;
1817 }
1818 }
1819
1820 ret = compat_core_sys_select(n, inp, outp, exp, &timeout);
1821
1822 } while (!ret && !timeout && tsp && (ts.tv_sec || ts.tv_nsec));
1823
1824 if (tsp && !(current->personality & STICKY_TIMEOUTS)) {
1825 ts.tv_sec += timeout / HZ;
1826 ts.tv_nsec += (timeout % HZ) * (1000000000/HZ);
1827 if (ts.tv_nsec >= 1000000000) {
1828 ts.tv_sec++;
1829 ts.tv_nsec -= 1000000000;
1830 }
1831 (void)copy_to_user(tsp, &ts, sizeof(ts));
1832 }
1833
1834 if (ret == -ERESTARTNOHAND) {
1835 /*
1836 * Don't restore the signal mask yet. Let do_signal() deliver
1837 * the signal on the way back to userspace, before the signal
1838 * mask is restored.
1839 */
1840 if (sigmask) {
1841 memcpy(&current->saved_sigmask, &sigsaved,
1842 sizeof(sigsaved));
1843 set_thread_flag(TIF_RESTORE_SIGMASK);
1844 }
1845 } else if (sigmask)
1846 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1847
1848 return ret;
1849}
1850
1851asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
1852 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1853 struct compat_timespec __user *tsp, void __user *sig)
1854{
1855 compat_size_t sigsetsize = 0;
1856 compat_uptr_t up = 0;
1857
1858 if (sig) {
1859 if (!access_ok(VERIFY_READ, sig,
1860 sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
1861 __get_user(up, (compat_uptr_t __user *)sig) ||
1862 __get_user(sigsetsize,
1863 (compat_size_t __user *)(sig+sizeof(up))))
1864 return -EFAULT;
1865 }
1866 return compat_sys_pselect7(n, inp, outp, exp, tsp, compat_ptr(up),
1867 sigsetsize);
1868}
1869
1870asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
1871 unsigned int nfds, struct compat_timespec __user *tsp,
1872 const compat_sigset_t __user *sigmask, compat_size_t sigsetsize)
1873{
1874 compat_sigset_t ss32;
1875 sigset_t ksigmask, sigsaved;
1876 struct compat_timespec ts;
1877 s64 timeout = -1;
1878 int ret;
1879
1880 if (tsp) {
1881 if (copy_from_user(&ts, tsp, sizeof(ts)))
1882 return -EFAULT;
1883
1884 /* We assume that ts.tv_sec is always lower than
1885 the number of seconds that can be expressed in
1886 an s64. Otherwise the compiler bitches at us */
1887 timeout = ROUND_UP(ts.tv_nsec, 1000000000/HZ);
1888 timeout += ts.tv_sec * HZ;
1889 }
1890
1891 if (sigmask) {
1892 if (sigsetsize |= sizeof(compat_sigset_t))
1893 return -EINVAL;
1894 if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
1895 return -EFAULT;
1896 sigset_from_compat(&ksigmask, &ss32);
1897
1898 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1899 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1900 }
1901
1902 ret = do_sys_poll(ufds, nfds, &timeout);
1903
1904 /* We can restart this syscall, usually */
1905 if (ret == -EINTR) {
1906 /*
1907 * Don't restore the signal mask yet. Let do_signal() deliver
1908 * the signal on the way back to userspace, before the signal
1909 * mask is restored.
1910 */
1911 if (sigmask) {
1912 memcpy(&current->saved_sigmask, &sigsaved,
1913 sizeof(sigsaved));
1914 set_thread_flag(TIF_RESTORE_SIGMASK);
1915 }
1916 ret = -ERESTARTNOHAND;
1917 } else if (sigmask)
1918 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1919
1920 if (tsp && timeout >= 0) {
1921 if (current->personality & STICKY_TIMEOUTS)
1922 goto sticky;
1923 /* Yes, we know it's actually an s64, but it's also positive. */
1924 ts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1000;
1925 ts.tv_sec = timeout;
1926 if (copy_to_user(tsp, &ts, sizeof(ts))) {
1927sticky:
1928 /*
1929 * If an application puts its timeval in read-only
1930 * memory, we don't want the Linux-specific update to
1931 * the timeval to cause a fault after the select has
1932 * completed successfully. However, because we're not
1933 * updating the timeval, we can't restart the system
1934 * call.
1935 */
1936 if (ret == -ERESTARTNOHAND && timeout >= 0)
1937 ret = -EINTR;
1938 }
1939 }
1940
1941 return ret;
1942}
1943#endif /* TIF_RESTORE_SIGMASK */
1944
1723#if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE) 1945#if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)
1724/* Stuff for NFS server syscalls... */ 1946/* Stuff for NFS server syscalls... */
1725struct compat_nfsctl_svc { 1947struct compat_nfsctl_svc {
diff --git a/fs/exec.c b/fs/exec.c
index 62b40af68cc..055378d2513 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -477,7 +477,7 @@ struct file *open_exec(const char *name)
477 int err; 477 int err;
478 struct file *file; 478 struct file *file;
479 479
480 err = path_lookup_open(name, LOOKUP_FOLLOW, &nd, FMODE_READ); 480 err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ);
481 file = ERR_PTR(err); 481 file = ERR_PTR(err);
482 482
483 if (!err) { 483 if (!err) {
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 5bfe40085fb..b06b54f1bbb 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -11,6 +11,33 @@ struct export_operations export_op_default;
11 11
12#define dprintk(fmt, args...) do{}while(0) 12#define dprintk(fmt, args...) do{}while(0)
13 13
14static struct dentry *
15find_acceptable_alias(struct dentry *result,
16 int (*acceptable)(void *context, struct dentry *dentry),
17 void *context)
18{
19 struct dentry *dentry, *toput = NULL;
20
21 spin_lock(&dcache_lock);
22 list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) {
23 dget_locked(dentry);
24 spin_unlock(&dcache_lock);
25 if (toput)
26 dput(toput);
27 if (dentry != result && acceptable(context, dentry)) {
28 dput(result);
29 return dentry;
30 }
31 spin_lock(&dcache_lock);
32 toput = dentry;
33 }
34 spin_unlock(&dcache_lock);
35
36 if (toput)
37 dput(toput);
38 return NULL;
39}
40
14/** 41/**
15 * find_exported_dentry - helper routine to implement export_operations->decode_fh 42 * find_exported_dentry - helper routine to implement export_operations->decode_fh
16 * @sb: The &super_block identifying the filesystem 43 * @sb: The &super_block identifying the filesystem
@@ -52,8 +79,7 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
52 struct dentry *target_dir; 79 struct dentry *target_dir;
53 int err; 80 int err;
54 struct export_operations *nops = sb->s_export_op; 81 struct export_operations *nops = sb->s_export_op;
55 struct list_head *le, *head; 82 struct dentry *alias;
56 struct dentry *toput = NULL;
57 int noprogress; 83 int noprogress;
58 char nbuf[NAME_MAX+1]; 84 char nbuf[NAME_MAX+1];
59 85
@@ -79,27 +105,10 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
79 /* there is no other dentry, so fail */ 105 /* there is no other dentry, so fail */
80 goto err_result; 106 goto err_result;
81 } 107 }
82 /* try any other aliases */ 108
83 spin_lock(&dcache_lock); 109 alias = find_acceptable_alias(result, acceptable, context);
84 head = &result->d_inode->i_dentry; 110 if (alias)
85 list_for_each(le, head) { 111 return alias;
86 struct dentry *dentry = list_entry(le, struct dentry, d_alias);
87 dget_locked(dentry);
88 spin_unlock(&dcache_lock);
89 if (toput)
90 dput(toput);
91 toput = NULL;
92 if (dentry != result &&
93 acceptable(context, dentry)) {
94 dput(result);
95 return dentry;
96 }
97 spin_lock(&dcache_lock);
98 toput = dentry;
99 }
100 spin_unlock(&dcache_lock);
101 if (toput)
102 dput(toput);
103 } 112 }
104 113
105 /* It's a directory, or we are required to confirm the file's 114 /* It's a directory, or we are required to confirm the file's
@@ -258,26 +267,10 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
258 /* now result is properly connected, it is our best bet */ 267 /* now result is properly connected, it is our best bet */
259 if (acceptable(context, result)) 268 if (acceptable(context, result))
260 return result; 269 return result;
261 /* one last try of the aliases.. */ 270
262 spin_lock(&dcache_lock); 271 alias = find_acceptable_alias(result, acceptable, context);
263 toput = NULL; 272 if (alias)
264 head = &result->d_inode->i_dentry; 273 return alias;
265 list_for_each(le, head) {
266 struct dentry *dentry = list_entry(le, struct dentry, d_alias);
267 dget_locked(dentry);
268 spin_unlock(&dcache_lock);
269 if (toput) dput(toput);
270 if (dentry != result &&
271 acceptable(context, dentry)) {
272 dput(result);
273 return dentry;
274 }
275 spin_lock(&dcache_lock);
276 toput = dentry;
277 }
278 spin_unlock(&dcache_lock);
279 if (toput)
280 dput(toput);
281 274
282 /* drat - I just cannot find anything acceptable */ 275 /* drat - I just cannot find anything acceptable */
283 dput(result); 276 dput(result);
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
index 89450ae3222..f13f1494d4f 100644
--- a/fs/hfs/bfind.c
+++ b/fs/hfs/bfind.c
@@ -64,7 +64,6 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
64 else 64 else
65 e = rec - 1; 65 e = rec - 1;
66 } while (b <= e); 66 } while (b <= e);
67 //printk("%d: %d,%d,%d\n", bnode->this, b, e, rec);
68 if (rec != e && e >= 0) { 67 if (rec != e && e >= 0) {
69 len = hfs_brec_lenoff(bnode, e, &off); 68 len = hfs_brec_lenoff(bnode, e, &off);
70 keylen = hfs_brec_keylen(bnode, e); 69 keylen = hfs_brec_keylen(bnode, e);
@@ -127,7 +126,7 @@ int hfs_brec_find(struct hfs_find_data *fd)
127 return res; 126 return res;
128 127
129invalid: 128invalid:
130 printk("HFS: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n", 129 printk(KERN_ERR "hfs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
131 height, bnode->height, bnode->type, nidx, parent); 130 height, bnode->height, bnode->type, nidx, parent);
132 res = -EIO; 131 res = -EIO;
133release: 132release:
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 3d5cdc6847c..a7a7d77f3fd 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -198,7 +198,7 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
198 198
199 // move down? 199 // move down?
200 if (!node->prev && !node->next) { 200 if (!node->prev && !node->next) {
201 printk("hfs_btree_del_level\n"); 201 printk(KERN_DEBUG "hfs_btree_del_level\n");
202 } 202 }
203 if (!node->parent) { 203 if (!node->parent) {
204 tree->root = 0; 204 tree->root = 0;
@@ -219,7 +219,7 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
219 struct hfs_bnode *node; 219 struct hfs_bnode *node;
220 220
221 if (cnid >= tree->node_count) { 221 if (cnid >= tree->node_count) {
222 printk("HFS: request for non-existent node %d in B*Tree\n", cnid); 222 printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
223 return NULL; 223 return NULL;
224 } 224 }
225 225
@@ -242,7 +242,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
242 loff_t off; 242 loff_t off;
243 243
244 if (cnid >= tree->node_count) { 244 if (cnid >= tree->node_count) {
245 printk("HFS: request for non-existent node %d in B*Tree\n", cnid); 245 printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
246 return NULL; 246 return NULL;
247 } 247 }
248 248
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 7d8fff2c25f..5c87cf4801f 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -362,7 +362,7 @@ again:
362 end_off = hfs_bnode_read_u16(parent, end_rec_off); 362 end_off = hfs_bnode_read_u16(parent, end_rec_off);
363 if (end_rec_off - end_off < diff) { 363 if (end_rec_off - end_off < diff) {
364 364
365 printk("splitting index node...\n"); 365 printk(KERN_DEBUG "hfs: splitting index node...\n");
366 fd->bnode = parent; 366 fd->bnode = parent;
367 new_node = hfs_bnode_split(fd); 367 new_node = hfs_bnode_split(fd);
368 if (IS_ERR(new_node)) 368 if (IS_ERR(new_node))
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 394725efa1c..7bb11edd148 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -111,7 +111,7 @@ void hfs_btree_close(struct hfs_btree *tree)
111 while ((node = tree->node_hash[i])) { 111 while ((node = tree->node_hash[i])) {
112 tree->node_hash[i] = node->next_hash; 112 tree->node_hash[i] = node->next_hash;
113 if (atomic_read(&node->refcnt)) 113 if (atomic_read(&node->refcnt))
114 printk("HFS: node %d:%d still has %d user(s)!\n", 114 printk(KERN_ERR "hfs: node %d:%d still has %d user(s)!\n",
115 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 115 node->tree->cnid, node->this, atomic_read(&node->refcnt));
116 hfs_bnode_free(node); 116 hfs_bnode_free(node);
117 tree->node_hash_cnt--; 117 tree->node_hash_cnt--;
@@ -252,7 +252,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
252 kunmap(*pagep); 252 kunmap(*pagep);
253 nidx = node->next; 253 nidx = node->next;
254 if (!nidx) { 254 if (!nidx) {
255 printk("create new bmap node...\n"); 255 printk(KERN_DEBUG "hfs: create new bmap node...\n");
256 next_node = hfs_bmap_new_bmap(node, idx); 256 next_node = hfs_bmap_new_bmap(node, idx);
257 } else 257 } else
258 next_node = hfs_bnode_find(tree, nidx); 258 next_node = hfs_bnode_find(tree, nidx);
@@ -292,7 +292,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
292 hfs_bnode_put(node); 292 hfs_bnode_put(node);
293 if (!i) { 293 if (!i) {
294 /* panic */; 294 /* panic */;
295 printk("HFS: unable to free bnode %u. bmap not found!\n", node->this); 295 printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
296 return; 296 return;
297 } 297 }
298 node = hfs_bnode_find(tree, i); 298 node = hfs_bnode_find(tree, i);
@@ -300,7 +300,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
300 return; 300 return;
301 if (node->type != HFS_NODE_MAP) { 301 if (node->type != HFS_NODE_MAP) {
302 /* panic */; 302 /* panic */;
303 printk("HFS: invalid bmap found! (%u,%d)\n", node->this, node->type); 303 printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
304 hfs_bnode_put(node); 304 hfs_bnode_put(node);
305 return; 305 return;
306 } 306 }
@@ -313,7 +313,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
313 m = 1 << (~nidx & 7); 313 m = 1 << (~nidx & 7);
314 byte = data[off]; 314 byte = data[off];
315 if (!(byte & m)) { 315 if (!(byte & m)) {
316 printk("HFS: trying to free free bnode %u(%d)\n", node->this, node->type); 316 printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
317 kunmap(page); 317 kunmap(page);
318 hfs_bnode_put(node); 318 hfs_bnode_put(node);
319 return; 319 return;
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 2fcd679f023..ba851576ebb 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -184,7 +184,7 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
184 184
185 type = rec.type; 185 type = rec.type;
186 if (type != HFS_CDR_THD && type != HFS_CDR_FTH) { 186 if (type != HFS_CDR_THD && type != HFS_CDR_FTH) {
187 printk("HFS-fs: Found bad thread record in catalog\n"); 187 printk(KERN_ERR "hfs: found bad thread record in catalog\n");
188 return -EIO; 188 return -EIO;
189 } 189 }
190 190
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index e1f24befba5..534e5a7480e 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -81,12 +81,12 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
81 case 1: 81 case 1:
82 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); 82 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
83 if (entry.type != HFS_CDR_THD) { 83 if (entry.type != HFS_CDR_THD) {
84 printk("HFS: bad catalog folder thread\n"); 84 printk(KERN_ERR "hfs: bad catalog folder thread\n");
85 err = -EIO; 85 err = -EIO;
86 goto out; 86 goto out;
87 } 87 }
88 //if (fd.entrylength < HFS_MIN_THREAD_SZ) { 88 //if (fd.entrylength < HFS_MIN_THREAD_SZ) {
89 // printk("HFS: truncated catalog thread\n"); 89 // printk(KERN_ERR "hfs: truncated catalog thread\n");
90 // err = -EIO; 90 // err = -EIO;
91 // goto out; 91 // goto out;
92 //} 92 //}
@@ -105,7 +105,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
105 105
106 for (;;) { 106 for (;;) {
107 if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) { 107 if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) {
108 printk("HFS: walked past end of dir\n"); 108 printk(KERN_ERR "hfs: walked past end of dir\n");
109 err = -EIO; 109 err = -EIO;
110 goto out; 110 goto out;
111 } 111 }
@@ -114,7 +114,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
114 len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName); 114 len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName);
115 if (type == HFS_CDR_DIR) { 115 if (type == HFS_CDR_DIR) {
116 if (fd.entrylength < sizeof(struct hfs_cat_dir)) { 116 if (fd.entrylength < sizeof(struct hfs_cat_dir)) {
117 printk("HFS: small dir entry\n"); 117 printk(KERN_ERR "hfs: small dir entry\n");
118 err = -EIO; 118 err = -EIO;
119 goto out; 119 goto out;
120 } 120 }
@@ -123,7 +123,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
123 break; 123 break;
124 } else if (type == HFS_CDR_FIL) { 124 } else if (type == HFS_CDR_FIL) {
125 if (fd.entrylength < sizeof(struct hfs_cat_file)) { 125 if (fd.entrylength < sizeof(struct hfs_cat_file)) {
126 printk("HFS: small file entry\n"); 126 printk(KERN_ERR "hfs: small file entry\n");
127 err = -EIO; 127 err = -EIO;
128 goto out; 128 goto out;
129 } 129 }
@@ -131,7 +131,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
131 be32_to_cpu(entry.file.FlNum), DT_REG)) 131 be32_to_cpu(entry.file.FlNum), DT_REG))
132 break; 132 break;
133 } else { 133 } else {
134 printk("HFS: bad catalog entry type %d\n", type); 134 printk(KERN_ERR "hfs: bad catalog entry type %d\n", type);
135 err = -EIO; 135 err = -EIO;
136 goto out; 136 goto out;
137 } 137 }
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index cc5dcd52e23..18ce47ab1b7 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -35,9 +35,6 @@
35#define dprint(flg, fmt, args...) \ 35#define dprint(flg, fmt, args...) \
36 if (flg & DBG_MASK) printk(fmt , ## args) 36 if (flg & DBG_MASK) printk(fmt , ## args)
37 37
38#define hfs_warn(format, args...) printk(KERN_WARNING format , ## args)
39#define hfs_error(format, args...) printk(KERN_ERR format , ## args)
40
41/* 38/*
42 * struct hfs_inode_info 39 * struct hfs_inode_info
43 * 40 *
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 050a4927649..39fd85b9b91 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -95,7 +95,6 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
95 } while (--i && nidx < tree->node_count); 95 } while (--i && nidx < tree->node_count);
96 spin_unlock(&tree->hash_lock); 96 spin_unlock(&tree->hash_lock);
97 } 97 }
98 //printk("releasepage: %lu,%x = %d\n", page->index, mask, res);
99 return res ? try_to_free_buffers(page) : 0; 98 return res ? try_to_free_buffers(page) : 0;
100} 99}
101 100
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 0a473f79c89..b4651e128d7 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -47,7 +47,7 @@ static int hfs_get_last_session(struct super_block *sb,
47 *start = (sector_t)te.cdte_addr.lba << 2; 47 *start = (sector_t)te.cdte_addr.lba << 2;
48 return 0; 48 return 0;
49 } 49 }
50 printk(KERN_ERR "HFS: Invalid session number or type of track\n"); 50 printk(KERN_ERR "hfs: invalid session number or type of track\n");
51 return -EINVAL; 51 return -EINVAL;
52 } 52 }
53 ms_info.addr_format = CDROM_LBA; 53 ms_info.addr_format = CDROM_LBA;
@@ -100,7 +100,7 @@ int hfs_mdb_get(struct super_block *sb)
100 100
101 HFS_SB(sb)->alloc_blksz = size = be32_to_cpu(mdb->drAlBlkSiz); 101 HFS_SB(sb)->alloc_blksz = size = be32_to_cpu(mdb->drAlBlkSiz);
102 if (!size || (size & (HFS_SECTOR_SIZE - 1))) { 102 if (!size || (size & (HFS_SECTOR_SIZE - 1))) {
103 hfs_warn("hfs_fs: bad allocation block size %d\n", size); 103 printk(KERN_ERR "hfs: bad allocation block size %d\n", size);
104 goto out_bh; 104 goto out_bh;
105 } 105 }
106 106
@@ -117,7 +117,7 @@ int hfs_mdb_get(struct super_block *sb)
117 size >>= 1; 117 size >>= 1;
118 brelse(bh); 118 brelse(bh);
119 if (!sb_set_blocksize(sb, size)) { 119 if (!sb_set_blocksize(sb, size)) {
120 printk("hfs_fs: unable to set blocksize to %u\n", size); 120 printk(KERN_ERR "hfs: unable to set blocksize to %u\n", size);
121 goto out; 121 goto out;
122 } 122 }
123 123
@@ -161,8 +161,8 @@ int hfs_mdb_get(struct super_block *sb)
161 } 161 }
162 162
163 if (!HFS_SB(sb)->alt_mdb) { 163 if (!HFS_SB(sb)->alt_mdb) {
164 hfs_warn("hfs_fs: unable to locate alternate MDB\n"); 164 printk(KERN_WARNING "hfs: unable to locate alternate MDB\n");
165 hfs_warn("hfs_fs: continuing without an alternate MDB\n"); 165 printk(KERN_WARNING "hfs: continuing without an alternate MDB\n");
166 } 166 }
167 167
168 HFS_SB(sb)->bitmap = (__be32 *)__get_free_pages(GFP_KERNEL, PAGE_SIZE < 8192 ? 1 : 0); 168 HFS_SB(sb)->bitmap = (__be32 *)__get_free_pages(GFP_KERNEL, PAGE_SIZE < 8192 ? 1 : 0);
@@ -177,7 +177,7 @@ int hfs_mdb_get(struct super_block *sb)
177 while (size) { 177 while (size) {
178 bh = sb_bread(sb, off >> sb->s_blocksize_bits); 178 bh = sb_bread(sb, off >> sb->s_blocksize_bits);
179 if (!bh) { 179 if (!bh) {
180 hfs_warn("hfs_fs: unable to read volume bitmap\n"); 180 printk(KERN_ERR "hfs: unable to read volume bitmap\n");
181 goto out; 181 goto out;
182 } 182 }
183 off2 = off & (sb->s_blocksize - 1); 183 off2 = off & (sb->s_blocksize - 1);
@@ -191,23 +191,23 @@ int hfs_mdb_get(struct super_block *sb)
191 191
192 HFS_SB(sb)->ext_tree = hfs_btree_open(sb, HFS_EXT_CNID, hfs_ext_keycmp); 192 HFS_SB(sb)->ext_tree = hfs_btree_open(sb, HFS_EXT_CNID, hfs_ext_keycmp);
193 if (!HFS_SB(sb)->ext_tree) { 193 if (!HFS_SB(sb)->ext_tree) {
194 hfs_warn("hfs_fs: unable to open extent tree\n"); 194 printk(KERN_ERR "hfs: unable to open extent tree\n");
195 goto out; 195 goto out;
196 } 196 }
197 HFS_SB(sb)->cat_tree = hfs_btree_open(sb, HFS_CAT_CNID, hfs_cat_keycmp); 197 HFS_SB(sb)->cat_tree = hfs_btree_open(sb, HFS_CAT_CNID, hfs_cat_keycmp);
198 if (!HFS_SB(sb)->cat_tree) { 198 if (!HFS_SB(sb)->cat_tree) {
199 hfs_warn("hfs_fs: unable to open catalog tree\n"); 199 printk(KERN_ERR "hfs: unable to open catalog tree\n");
200 goto out; 200 goto out;
201 } 201 }
202 202
203 attrib = mdb->drAtrb; 203 attrib = mdb->drAtrb;
204 if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) { 204 if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
205 hfs_warn("HFS-fs warning: Filesystem was not cleanly unmounted, " 205 printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, "
206 "running fsck.hfs is recommended. mounting read-only.\n"); 206 "running fsck.hfs is recommended. mounting read-only.\n");
207 sb->s_flags |= MS_RDONLY; 207 sb->s_flags |= MS_RDONLY;
208 } 208 }
209 if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) { 209 if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
210 hfs_warn("HFS-fs: Filesystem is marked locked, mounting read-only.\n"); 210 printk(KERN_WARNING "hfs: filesystem is marked locked, mounting read-only.\n");
211 sb->s_flags |= MS_RDONLY; 211 sb->s_flags |= MS_RDONLY;
212 } 212 }
213 if (!(sb->s_flags & MS_RDONLY)) { 213 if (!(sb->s_flags & MS_RDONLY)) {
@@ -303,7 +303,7 @@ void hfs_mdb_commit(struct super_block *sb)
303 while (size) { 303 while (size) {
304 bh = sb_bread(sb, block); 304 bh = sb_bread(sb, block);
305 if (!bh) { 305 if (!bh) {
306 hfs_warn("hfs_fs: unable to read volume bitmap\n"); 306 printk(KERN_ERR "hfs: unable to read volume bitmap\n");
307 break; 307 break;
308 } 308 }
309 len = min((int)sb->s_blocksize - off, size); 309 len = min((int)sb->s_blocksize - off, size);
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index c5074aeafca..1181d116117 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -101,12 +101,12 @@ static int hfs_remount(struct super_block *sb, int *flags, char *data)
101 return 0; 101 return 0;
102 if (!(*flags & MS_RDONLY)) { 102 if (!(*flags & MS_RDONLY)) {
103 if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) { 103 if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
104 printk("HFS-fs warning: Filesystem was not cleanly unmounted, " 104 printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, "
105 "running fsck.hfs is recommended. leaving read-only.\n"); 105 "running fsck.hfs is recommended. leaving read-only.\n");
106 sb->s_flags |= MS_RDONLY; 106 sb->s_flags |= MS_RDONLY;
107 *flags |= MS_RDONLY; 107 *flags |= MS_RDONLY;
108 } else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) { 108 } else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
109 printk("HFS-fs: Filesystem is marked locked, leaving read-only.\n"); 109 printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n");
110 sb->s_flags |= MS_RDONLY; 110 sb->s_flags |= MS_RDONLY;
111 *flags |= MS_RDONLY; 111 *flags |= MS_RDONLY;
112 } 112 }
@@ -229,21 +229,21 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
229 switch (token) { 229 switch (token) {
230 case opt_uid: 230 case opt_uid:
231 if (match_int(&args[0], &tmp)) { 231 if (match_int(&args[0], &tmp)) {
232 printk("HFS: uid requires an argument\n"); 232 printk(KERN_ERR "hfs: uid requires an argument\n");
233 return 0; 233 return 0;
234 } 234 }
235 hsb->s_uid = (uid_t)tmp; 235 hsb->s_uid = (uid_t)tmp;
236 break; 236 break;
237 case opt_gid: 237 case opt_gid:
238 if (match_int(&args[0], &tmp)) { 238 if (match_int(&args[0], &tmp)) {
239 printk("HFS: gid requires an argument\n"); 239 printk(KERN_ERR "hfs: gid requires an argument\n");
240 return 0; 240 return 0;
241 } 241 }
242 hsb->s_gid = (gid_t)tmp; 242 hsb->s_gid = (gid_t)tmp;
243 break; 243 break;
244 case opt_umask: 244 case opt_umask:
245 if (match_octal(&args[0], &tmp)) { 245 if (match_octal(&args[0], &tmp)) {
246 printk("HFS: umask requires a value\n"); 246 printk(KERN_ERR "hfs: umask requires a value\n");
247 return 0; 247 return 0;
248 } 248 }
249 hsb->s_file_umask = (umode_t)tmp; 249 hsb->s_file_umask = (umode_t)tmp;
@@ -251,39 +251,39 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
251 break; 251 break;
252 case opt_file_umask: 252 case opt_file_umask:
253 if (match_octal(&args[0], &tmp)) { 253 if (match_octal(&args[0], &tmp)) {
254 printk("HFS: file_umask requires a value\n"); 254 printk(KERN_ERR "hfs: file_umask requires a value\n");
255 return 0; 255 return 0;
256 } 256 }
257 hsb->s_file_umask = (umode_t)tmp; 257 hsb->s_file_umask = (umode_t)tmp;
258 break; 258 break;
259 case opt_dir_umask: 259 case opt_dir_umask:
260 if (match_octal(&args[0], &tmp)) { 260 if (match_octal(&args[0], &tmp)) {
261 printk("HFS: dir_umask requires a value\n"); 261 printk(KERN_ERR "hfs: dir_umask requires a value\n");
262 return 0; 262 return 0;
263 } 263 }
264 hsb->s_dir_umask = (umode_t)tmp; 264 hsb->s_dir_umask = (umode_t)tmp;
265 break; 265 break;
266 case opt_part: 266 case opt_part:
267 if (match_int(&args[0], &hsb->part)) { 267 if (match_int(&args[0], &hsb->part)) {
268 printk("HFS: part requires an argument\n"); 268 printk(KERN_ERR "hfs: part requires an argument\n");
269 return 0; 269 return 0;
270 } 270 }
271 break; 271 break;
272 case opt_session: 272 case opt_session:
273 if (match_int(&args[0], &hsb->session)) { 273 if (match_int(&args[0], &hsb->session)) {
274 printk("HFS: session requires an argument\n"); 274 printk(KERN_ERR "hfs: session requires an argument\n");
275 return 0; 275 return 0;
276 } 276 }
277 break; 277 break;
278 case opt_type: 278 case opt_type:
279 if (match_fourchar(&args[0], &hsb->s_type)) { 279 if (match_fourchar(&args[0], &hsb->s_type)) {
280 printk("HFS+-fs: type requires a 4 character value\n"); 280 printk(KERN_ERR "hfs: type requires a 4 character value\n");
281 return 0; 281 return 0;
282 } 282 }
283 break; 283 break;
284 case opt_creator: 284 case opt_creator:
285 if (match_fourchar(&args[0], &hsb->s_creator)) { 285 if (match_fourchar(&args[0], &hsb->s_creator)) {
286 printk("HFS+-fs: creator requires a 4 character value\n"); 286 printk(KERN_ERR "hfs: creator requires a 4 character value\n");
287 return 0; 287 return 0;
288 } 288 }
289 break; 289 break;
@@ -292,13 +292,13 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
292 break; 292 break;
293 case opt_codepage: 293 case opt_codepage:
294 if (hsb->nls_disk) { 294 if (hsb->nls_disk) {
295 printk("HFS+-fs: unable to change codepage\n"); 295 printk(KERN_ERR "hfs: unable to change codepage\n");
296 return 0; 296 return 0;
297 } 297 }
298 p = match_strdup(&args[0]); 298 p = match_strdup(&args[0]);
299 hsb->nls_disk = load_nls(p); 299 hsb->nls_disk = load_nls(p);
300 if (!hsb->nls_disk) { 300 if (!hsb->nls_disk) {
301 printk("HFS+-fs: unable to load codepage \"%s\"\n", p); 301 printk(KERN_ERR "hfs: unable to load codepage \"%s\"\n", p);
302 kfree(p); 302 kfree(p);
303 return 0; 303 return 0;
304 } 304 }
@@ -306,13 +306,13 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
306 break; 306 break;
307 case opt_iocharset: 307 case opt_iocharset:
308 if (hsb->nls_io) { 308 if (hsb->nls_io) {
309 printk("HFS: unable to change iocharset\n"); 309 printk(KERN_ERR "hfs: unable to change iocharset\n");
310 return 0; 310 return 0;
311 } 311 }
312 p = match_strdup(&args[0]); 312 p = match_strdup(&args[0]);
313 hsb->nls_io = load_nls(p); 313 hsb->nls_io = load_nls(p);
314 if (!hsb->nls_io) { 314 if (!hsb->nls_io) {
315 printk("HFS: unable to load iocharset \"%s\"\n", p); 315 printk(KERN_ERR "hfs: unable to load iocharset \"%s\"\n", p);
316 kfree(p); 316 kfree(p);
317 return 0; 317 return 0;
318 } 318 }
@@ -326,7 +326,7 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
326 if (hsb->nls_disk && !hsb->nls_io) { 326 if (hsb->nls_disk && !hsb->nls_io) {
327 hsb->nls_io = load_nls_default(); 327 hsb->nls_io = load_nls_default();
328 if (!hsb->nls_io) { 328 if (!hsb->nls_io) {
329 printk("HFS: unable to load default iocharset\n"); 329 printk(KERN_ERR "hfs: unable to load default iocharset\n");
330 return 0; 330 return 0;
331 } 331 }
332 } 332 }
@@ -364,7 +364,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
364 364
365 res = -EINVAL; 365 res = -EINVAL;
366 if (!parse_options((char *)data, sbi)) { 366 if (!parse_options((char *)data, sbi)) {
367 hfs_warn("hfs_fs: unable to parse mount options.\n"); 367 printk(KERN_ERR "hfs: unable to parse mount options.\n");
368 goto bail; 368 goto bail;
369 } 369 }
370 370
@@ -375,7 +375,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
375 res = hfs_mdb_get(sb); 375 res = hfs_mdb_get(sb);
376 if (res) { 376 if (res) {
377 if (!silent) 377 if (!silent)
378 hfs_warn("VFS: Can't find a HFS filesystem on dev %s.\n", 378 printk(KERN_WARNING "hfs: can't find a HFS filesystem on dev %s.\n",
379 hfs_mdb_name(sb)); 379 hfs_mdb_name(sb));
380 res = -EINVAL; 380 res = -EINVAL;
381 goto bail; 381 goto bail;
@@ -407,7 +407,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
407bail_iput: 407bail_iput:
408 iput(root_inode); 408 iput(root_inode);
409bail_no_root: 409bail_no_root:
410 hfs_warn("hfs_fs: get root inode failed.\n"); 410 printk(KERN_ERR "hfs: get root inode failed.\n");
411bail: 411bail:
412 hfs_mdb_put(sb); 412 hfs_mdb_put(sb);
413 return res; 413 return res;
@@ -454,7 +454,7 @@ static void __exit exit_hfs_fs(void)
454{ 454{
455 unregister_filesystem(&hfs_fs_type); 455 unregister_filesystem(&hfs_fs_type);
456 if (kmem_cache_destroy(hfs_inode_cachep)) 456 if (kmem_cache_destroy(hfs_inode_cachep))
457 printk(KERN_INFO "hfs_inode_cache: not all structures were freed\n"); 457 printk(KERN_ERR "hfs_inode_cache: not all structures were freed\n");
458} 458}
459 459
460module_init(init_hfs_fs) 460module_init(init_hfs_fs)
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index 257cdde0514..5007a41f1be 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -64,7 +64,6 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
64 else 64 else
65 e = rec - 1; 65 e = rec - 1;
66 } while (b <= e); 66 } while (b <= e);
67 //printk("%d: %d,%d,%d\n", bnode->this, b, e, rec);
68 if (rec != e && e >= 0) { 67 if (rec != e && e >= 0) {
69 len = hfs_brec_lenoff(bnode, e, &off); 68 len = hfs_brec_lenoff(bnode, e, &off);
70 keylen = hfs_brec_keylen(bnode, e); 69 keylen = hfs_brec_keylen(bnode, e);
@@ -127,7 +126,7 @@ int hfs_brec_find(struct hfs_find_data *fd)
127 return res; 126 return res;
128 127
129invalid: 128invalid:
130 printk("HFS+-fs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n", 129 printk(KERN_ERR "hfs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
131 height, bnode->height, bnode->type, nidx, parent); 130 height, bnode->height, bnode->type, nidx, parent);
132 res = -EIO; 131 res = -EIO;
133release: 132release:
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 930cd9212de..8f07e8fbd03 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -358,7 +358,7 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
358 358
359 // move down? 359 // move down?
360 if (!node->prev && !node->next) { 360 if (!node->prev && !node->next) {
361 printk("hfs_btree_del_level\n"); 361 printk(KERN_DEBUG "hfs_btree_del_level\n");
362 } 362 }
363 if (!node->parent) { 363 if (!node->parent) {
364 tree->root = 0; 364 tree->root = 0;
@@ -379,7 +379,7 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
379 struct hfs_bnode *node; 379 struct hfs_bnode *node;
380 380
381 if (cnid >= tree->node_count) { 381 if (cnid >= tree->node_count) {
382 printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid); 382 printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
383 return NULL; 383 return NULL;
384 } 384 }
385 385
@@ -402,7 +402,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
402 loff_t off; 402 loff_t off;
403 403
404 if (cnid >= tree->node_count) { 404 if (cnid >= tree->node_count) {
405 printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid); 405 printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
406 return NULL; 406 return NULL;
407 } 407 }
408 408
@@ -576,8 +576,9 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
576 node = hfs_bnode_findhash(tree, num); 576 node = hfs_bnode_findhash(tree, num);
577 spin_unlock(&tree->hash_lock); 577 spin_unlock(&tree->hash_lock);
578 if (node) { 578 if (node) {
579 printk("new node %u already hashed?\n", num); 579 printk(KERN_CRIT "new node %u already hashed?\n", num);
580 BUG(); 580 WARN_ON(1);
581 return node;
581 } 582 }
582 node = __hfs_bnode_create(tree, num); 583 node = __hfs_bnode_create(tree, num);
583 if (!node) 584 if (!node)
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 0ccef2ab790..c88e5d72a40 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -360,7 +360,7 @@ again:
360 end_off = hfs_bnode_read_u16(parent, end_rec_off); 360 end_off = hfs_bnode_read_u16(parent, end_rec_off);
361 if (end_rec_off - end_off < diff) { 361 if (end_rec_off - end_off < diff) {
362 362
363 printk("splitting index node...\n"); 363 printk(KERN_DEBUG "hfs: splitting index node...\n");
364 fd->bnode = parent; 364 fd->bnode = parent;
365 new_node = hfs_bnode_split(fd); 365 new_node = hfs_bnode_split(fd);
366 if (IS_ERR(new_node)) 366 if (IS_ERR(new_node))
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 44326aa2bd3..a67edfa34e9 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -31,17 +31,8 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
31 31
32 init_MUTEX(&tree->tree_lock); 32 init_MUTEX(&tree->tree_lock);
33 spin_lock_init(&tree->hash_lock); 33 spin_lock_init(&tree->hash_lock);
34 /* Set the correct compare function */
35 tree->sb = sb; 34 tree->sb = sb;
36 tree->cnid = id; 35 tree->cnid = id;
37 if (id == HFSPLUS_EXT_CNID) {
38 tree->keycmp = hfsplus_ext_cmp_key;
39 } else if (id == HFSPLUS_CAT_CNID) {
40 tree->keycmp = hfsplus_cat_cmp_key;
41 } else {
42 printk("HFS+-fs: unknown B*Tree requested\n");
43 goto free_tree;
44 }
45 tree->inode = iget(sb, id); 36 tree->inode = iget(sb, id);
46 if (!tree->inode) 37 if (!tree->inode)
47 goto free_tree; 38 goto free_tree;
@@ -64,6 +55,20 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
64 tree->max_key_len = be16_to_cpu(head->max_key_len); 55 tree->max_key_len = be16_to_cpu(head->max_key_len);
65 tree->depth = be16_to_cpu(head->depth); 56 tree->depth = be16_to_cpu(head->depth);
66 57
58 /* Set the correct compare function */
59 if (id == HFSPLUS_EXT_CNID) {
60 tree->keycmp = hfsplus_ext_cmp_key;
61 } else if (id == HFSPLUS_CAT_CNID) {
62 if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) &&
63 (head->key_type == HFSPLUS_KEY_BINARY))
64 tree->keycmp = hfsplus_cat_bin_cmp_key;
65 else
66 tree->keycmp = hfsplus_cat_case_cmp_key;
67 } else {
68 printk(KERN_ERR "hfs: unknown B*Tree requested\n");
69 goto fail_page;
70 }
71
67 size = tree->node_size; 72 size = tree->node_size;
68 if (!size || size & (size - 1)) 73 if (!size || size & (size - 1))
69 goto fail_page; 74 goto fail_page;
@@ -99,7 +104,7 @@ void hfs_btree_close(struct hfs_btree *tree)
99 while ((node = tree->node_hash[i])) { 104 while ((node = tree->node_hash[i])) {
100 tree->node_hash[i] = node->next_hash; 105 tree->node_hash[i] = node->next_hash;
101 if (atomic_read(&node->refcnt)) 106 if (atomic_read(&node->refcnt))
102 printk("HFS+: node %d:%d still has %d user(s)!\n", 107 printk(KERN_CRIT "hfs: node %d:%d still has %d user(s)!\n",
103 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 108 node->tree->cnid, node->this, atomic_read(&node->refcnt));
104 hfs_bnode_free(node); 109 hfs_bnode_free(node);
105 tree->node_hash_cnt--; 110 tree->node_hash_cnt--;
@@ -223,10 +228,6 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
223 tree->free_nodes--; 228 tree->free_nodes--;
224 mark_inode_dirty(tree->inode); 229 mark_inode_dirty(tree->inode);
225 hfs_bnode_put(node); 230 hfs_bnode_put(node);
226 if (!idx) {
227 printk("unexpected idx %u (%u)\n", idx, node->this);
228 BUG();
229 }
230 return hfs_bnode_create(tree, idx); 231 return hfs_bnode_create(tree, idx);
231 } 232 }
232 } 233 }
@@ -242,7 +243,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
242 kunmap(*pagep); 243 kunmap(*pagep);
243 nidx = node->next; 244 nidx = node->next;
244 if (!nidx) { 245 if (!nidx) {
245 printk("create new bmap node...\n"); 246 printk(KERN_DEBUG "hfs: create new bmap node...\n");
246 next_node = hfs_bmap_new_bmap(node, idx); 247 next_node = hfs_bmap_new_bmap(node, idx);
247 } else 248 } else
248 next_node = hfs_bnode_find(tree, nidx); 249 next_node = hfs_bnode_find(tree, nidx);
@@ -284,7 +285,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
284 hfs_bnode_put(node); 285 hfs_bnode_put(node);
285 if (!i) { 286 if (!i) {
286 /* panic */; 287 /* panic */;
287 printk("HFS: unable to free bnode %u. bmap not found!\n", node->this); 288 printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
288 return; 289 return;
289 } 290 }
290 node = hfs_bnode_find(tree, i); 291 node = hfs_bnode_find(tree, i);
@@ -292,7 +293,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
292 return; 293 return;
293 if (node->type != HFS_NODE_MAP) { 294 if (node->type != HFS_NODE_MAP) {
294 /* panic */; 295 /* panic */;
295 printk("HFS: invalid bmap found! (%u,%d)\n", node->this, node->type); 296 printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
296 hfs_bnode_put(node); 297 hfs_bnode_put(node);
297 return; 298 return;
298 } 299 }
@@ -305,7 +306,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
305 m = 1 << (~nidx & 7); 306 m = 1 << (~nidx & 7);
306 byte = data[off]; 307 byte = data[off];
307 if (!(byte & m)) { 308 if (!(byte & m)) {
308 printk("HFS: trying to free free bnode %u(%d)\n", node->this, node->type); 309 printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
309 kunmap(page); 310 kunmap(page);
310 hfs_bnode_put(node); 311 hfs_bnode_put(node);
311 return; 312 return;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 94712790c8b..f2d7c49ce75 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -13,7 +13,8 @@
13#include "hfsplus_fs.h" 13#include "hfsplus_fs.h"
14#include "hfsplus_raw.h" 14#include "hfsplus_raw.h"
15 15
16int hfsplus_cat_cmp_key(hfsplus_btree_key *k1, hfsplus_btree_key *k2) 16int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1,
17 const hfsplus_btree_key *k2)
17{ 18{
18 __be32 k1p, k2p; 19 __be32 k1p, k2p;
19 20
@@ -22,7 +23,20 @@ int hfsplus_cat_cmp_key(hfsplus_btree_key *k1, hfsplus_btree_key *k2)
22 if (k1p != k2p) 23 if (k1p != k2p)
23 return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1; 24 return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1;
24 25
25 return hfsplus_unistrcmp(&k1->cat.name, &k2->cat.name); 26 return hfsplus_strcasecmp(&k1->cat.name, &k2->cat.name);
27}
28
29int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
30 const hfsplus_btree_key *k2)
31{
32 __be32 k1p, k2p;
33
34 k1p = k1->cat.parent;
35 k2p = k2->cat.parent;
36 if (k1p != k2p)
37 return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1;
38
39 return hfsplus_strcmp(&k1->cat.name, &k2->cat.name);
26} 40}
27 41
28void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key, 42void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
@@ -80,8 +94,11 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i
80 memset(folder, 0, sizeof(*folder)); 94 memset(folder, 0, sizeof(*folder));
81 folder->type = cpu_to_be16(HFSPLUS_FOLDER); 95 folder->type = cpu_to_be16(HFSPLUS_FOLDER);
82 folder->id = cpu_to_be32(inode->i_ino); 96 folder->id = cpu_to_be32(inode->i_ino);
83 folder->create_date = folder->content_mod_date = 97 HFSPLUS_I(inode).create_date =
84 folder->attribute_mod_date = folder->access_date = hfsp_now2mt(); 98 folder->create_date =
99 folder->content_mod_date =
100 folder->attribute_mod_date =
101 folder->access_date = hfsp_now2mt();
85 hfsplus_set_perms(inode, &folder->permissions); 102 hfsplus_set_perms(inode, &folder->permissions);
86 if (inode == HFSPLUS_SB(inode->i_sb).hidden_dir) 103 if (inode == HFSPLUS_SB(inode->i_sb).hidden_dir)
87 /* invisible and namelocked */ 104 /* invisible and namelocked */
@@ -95,18 +112,27 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i
95 file->type = cpu_to_be16(HFSPLUS_FILE); 112 file->type = cpu_to_be16(HFSPLUS_FILE);
96 file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS); 113 file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS);
97 file->id = cpu_to_be32(cnid); 114 file->id = cpu_to_be32(cnid);
98 file->create_date = file->content_mod_date = 115 HFSPLUS_I(inode).create_date =
99 file->attribute_mod_date = file->access_date = hfsp_now2mt(); 116 file->create_date =
117 file->content_mod_date =
118 file->attribute_mod_date =
119 file->access_date = hfsp_now2mt();
100 if (cnid == inode->i_ino) { 120 if (cnid == inode->i_ino) {
101 hfsplus_set_perms(inode, &file->permissions); 121 hfsplus_set_perms(inode, &file->permissions);
102 file->user_info.fdType = cpu_to_be32(HFSPLUS_SB(inode->i_sb).type); 122 if (S_ISLNK(inode->i_mode)) {
103 file->user_info.fdCreator = cpu_to_be32(HFSPLUS_SB(inode->i_sb).creator); 123 file->user_info.fdType = cpu_to_be32(HFSP_SYMLINK_TYPE);
124 file->user_info.fdCreator = cpu_to_be32(HFSP_SYMLINK_CREATOR);
125 } else {
126 file->user_info.fdType = cpu_to_be32(HFSPLUS_SB(inode->i_sb).type);
127 file->user_info.fdCreator = cpu_to_be32(HFSPLUS_SB(inode->i_sb).creator);
128 }
104 if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE) 129 if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
105 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); 130 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
106 } else { 131 } else {
107 file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE); 132 file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE);
108 file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR); 133 file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR);
109 file->user_info.fdFlags = cpu_to_be16(0x100); 134 file->user_info.fdFlags = cpu_to_be16(0x100);
135 file->create_date = HFSPLUS_I(HFSPLUS_SB(inode->i_sb).hidden_dir).create_date;
110 file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode).dev); 136 file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode).dev);
111 } 137 }
112 return sizeof(*file); 138 return sizeof(*file);
@@ -139,7 +165,7 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
139 165
140 type = be16_to_cpu(tmp.type); 166 type = be16_to_cpu(tmp.type);
141 if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) { 167 if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) {
142 printk("HFS+-fs: Found bad thread record in catalog\n"); 168 printk(KERN_ERR "hfs: found bad thread record in catalog\n");
143 return -EIO; 169 return -EIO;
144 } 170 }
145 171
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 50c8f44b6c6..01a6fe3a395 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -66,25 +66,32 @@ again:
66 } 66 }
67 cnid = be32_to_cpu(entry.file.id); 67 cnid = be32_to_cpu(entry.file.id);
68 if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) && 68 if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) &&
69 entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR)) { 69 entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
70 (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb).hidden_dir).create_date ||
71 entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode).create_date) &&
72 HFSPLUS_SB(sb).hidden_dir) {
70 struct qstr str; 73 struct qstr str;
71 char name[32]; 74 char name[32];
72 75
73 if (dentry->d_fsdata) { 76 if (dentry->d_fsdata) {
74 err = -ENOENT; 77 /*
75 inode = NULL; 78 * We found a link pointing to another link,
76 goto out; 79 * so ignore it and treat it as regular file.
80 */
81 cnid = (unsigned long)dentry->d_fsdata;
82 linkid = 0;
83 } else {
84 dentry->d_fsdata = (void *)(unsigned long)cnid;
85 linkid = be32_to_cpu(entry.file.permissions.dev);
86 str.len = sprintf(name, "iNode%d", linkid);
87 str.name = name;
88 hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb).hidden_dir->i_ino, &str);
89 goto again;
77 } 90 }
78 dentry->d_fsdata = (void *)(unsigned long)cnid;
79 linkid = be32_to_cpu(entry.file.permissions.dev);
80 str.len = sprintf(name, "iNode%d", linkid);
81 str.name = name;
82 hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb).hidden_dir->i_ino, &str);
83 goto again;
84 } else if (!dentry->d_fsdata) 91 } else if (!dentry->d_fsdata)
85 dentry->d_fsdata = (void *)(unsigned long)cnid; 92 dentry->d_fsdata = (void *)(unsigned long)cnid;
86 } else { 93 } else {
87 printk("HFS+-fs: Illegal catalog entry type in lookup\n"); 94 printk(KERN_ERR "hfs: invalid catalog entry type in lookup\n");
88 err = -EIO; 95 err = -EIO;
89 goto fail; 96 goto fail;
90 } 97 }
@@ -132,12 +139,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
132 case 1: 139 case 1:
133 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); 140 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
134 if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { 141 if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
135 printk("HFS+-fs: bad catalog folder thread\n"); 142 printk(KERN_ERR "hfs: bad catalog folder thread\n");
136 err = -EIO; 143 err = -EIO;
137 goto out; 144 goto out;
138 } 145 }
139 if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) { 146 if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) {
140 printk("HFS+-fs: truncated catalog thread\n"); 147 printk(KERN_ERR "hfs: truncated catalog thread\n");
141 err = -EIO; 148 err = -EIO;
142 goto out; 149 goto out;
143 } 150 }
@@ -156,7 +163,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
156 163
157 for (;;) { 164 for (;;) {
158 if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { 165 if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) {
159 printk("HFS+-fs: walked past end of dir\n"); 166 printk(KERN_ERR "hfs: walked past end of dir\n");
160 err = -EIO; 167 err = -EIO;
161 goto out; 168 goto out;
162 } 169 }
@@ -168,7 +175,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
168 goto out; 175 goto out;
169 if (type == HFSPLUS_FOLDER) { 176 if (type == HFSPLUS_FOLDER) {
170 if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { 177 if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
171 printk("HFS+-fs: small dir entry\n"); 178 printk(KERN_ERR "hfs: small dir entry\n");
172 err = -EIO; 179 err = -EIO;
173 goto out; 180 goto out;
174 } 181 }
@@ -180,7 +187,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
180 break; 187 break;
181 } else if (type == HFSPLUS_FILE) { 188 } else if (type == HFSPLUS_FILE) {
182 if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { 189 if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
183 printk("HFS+-fs: small file entry\n"); 190 printk(KERN_ERR "hfs: small file entry\n");
184 err = -EIO; 191 err = -EIO;
185 goto out; 192 goto out;
186 } 193 }
@@ -188,7 +195,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
188 be32_to_cpu(entry.file.id), DT_REG)) 195 be32_to_cpu(entry.file.id), DT_REG))
189 break; 196 break;
190 } else { 197 } else {
191 printk("HFS+-fs: bad catalog entry type\n"); 198 printk(KERN_ERR "hfs: bad catalog entry type\n");
192 err = -EIO; 199 err = -EIO;
193 goto out; 200 goto out;
194 } 201 }
@@ -330,7 +337,8 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
330 if (res) 337 if (res)
331 return res; 338 return res;
332 339
333 inode->i_nlink--; 340 if (inode->i_nlink > 0)
341 inode->i_nlink--;
334 hfsplus_delete_inode(inode); 342 hfsplus_delete_inode(inode);
335 if (inode->i_ino != cnid && !inode->i_nlink) { 343 if (inode->i_ino != cnid && !inode->i_nlink) {
336 if (!atomic_read(&HFSPLUS_I(inode).opencnt)) { 344 if (!atomic_read(&HFSPLUS_I(inode).opencnt)) {
@@ -339,7 +347,8 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
339 hfsplus_delete_inode(inode); 347 hfsplus_delete_inode(inode);
340 } else 348 } else
341 inode->i_flags |= S_DEAD; 349 inode->i_flags |= S_DEAD;
342 } 350 } else
351 inode->i_nlink = 0;
343 inode->i_ctime = CURRENT_TIME_SEC; 352 inode->i_ctime = CURRENT_TIME_SEC;
344 mark_inode_dirty(inode); 353 mark_inode_dirty(inode);
345 354
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index e3ff56a0301..1a7480089e8 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -16,7 +16,8 @@
16#include "hfsplus_raw.h" 16#include "hfsplus_raw.h"
17 17
18/* Compare two extents keys, returns 0 on same, pos/neg for difference */ 18/* Compare two extents keys, returns 0 on same, pos/neg for difference */
19int hfsplus_ext_cmp_key(hfsplus_btree_key *k1, hfsplus_btree_key *k2) 19int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
20 const hfsplus_btree_key *k2)
20{ 21{
21 __be32 k1id, k2id; 22 __be32 k1id, k2id;
22 __be32 k1s, k2s; 23 __be32 k1s, k2s;
@@ -349,10 +350,9 @@ int hfsplus_file_extend(struct inode *inode)
349 350
350 if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) { 351 if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) {
351 // extend alloc file 352 // extend alloc file
352 printk("extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8, 353 printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8,
353 HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks); 354 HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks);
354 return -ENOSPC; 355 return -ENOSPC;
355 //BUG();
356 } 356 }
357 357
358 down(&HFSPLUS_I(inode).extents_lock); 358 down(&HFSPLUS_I(inode).extents_lock);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 0fa1ab6250b..7ae393637a0 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -36,7 +36,7 @@
36#define HFSPLUS_TYPE_DATA 0x00 36#define HFSPLUS_TYPE_DATA 0x00
37#define HFSPLUS_TYPE_RSRC 0xFF 37#define HFSPLUS_TYPE_RSRC 0xFF
38 38
39typedef int (*btree_keycmp)(hfsplus_btree_key *, hfsplus_btree_key *); 39typedef int (*btree_keycmp)(const hfsplus_btree_key *, const hfsplus_btree_key *);
40 40
41#define NODE_HASH_SIZE 256 41#define NODE_HASH_SIZE 256
42 42
@@ -149,6 +149,7 @@ struct hfsplus_sb_info {
149#define HFSPLUS_SB_WRITEBACKUP 0x0001 149#define HFSPLUS_SB_WRITEBACKUP 0x0001
150#define HFSPLUS_SB_NODECOMPOSE 0x0002 150#define HFSPLUS_SB_NODECOMPOSE 0x0002
151#define HFSPLUS_SB_FORCE 0x0004 151#define HFSPLUS_SB_FORCE 0x0004
152#define HFSPLUS_SB_HFSX 0x0008
152 153
153 154
154struct hfsplus_inode_info { 155struct hfsplus_inode_info {
@@ -165,6 +166,7 @@ struct hfsplus_inode_info {
165 struct inode *rsrc_inode; 166 struct inode *rsrc_inode;
166 unsigned long flags; 167 unsigned long flags;
167 168
169 __be32 create_date;
168 /* Device number in hfsplus_permissions in catalog */ 170 /* Device number in hfsplus_permissions in catalog */
169 u32 dev; 171 u32 dev;
170 /* BSD system and user file flags */ 172 /* BSD system and user file flags */
@@ -303,7 +305,8 @@ int hfs_brec_read(struct hfs_find_data *, void *, int);
303int hfs_brec_goto(struct hfs_find_data *, int); 305int hfs_brec_goto(struct hfs_find_data *, int);
304 306
305/* catalog.c */ 307/* catalog.c */
306int hfsplus_cat_cmp_key(hfsplus_btree_key *, hfsplus_btree_key *); 308int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
309int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
307void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *, u32, struct qstr *); 310void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *, u32, struct qstr *);
308int hfsplus_find_cat(struct super_block *, u32, struct hfs_find_data *); 311int hfsplus_find_cat(struct super_block *, u32, struct hfs_find_data *);
309int hfsplus_create_cat(u32, struct inode *, struct qstr *, struct inode *); 312int hfsplus_create_cat(u32, struct inode *, struct qstr *, struct inode *);
@@ -312,7 +315,7 @@ int hfsplus_rename_cat(u32, struct inode *, struct qstr *,
312 struct inode *, struct qstr *); 315 struct inode *, struct qstr *);
313 316
314/* extents.c */ 317/* extents.c */
315int hfsplus_ext_cmp_key(hfsplus_btree_key *, hfsplus_btree_key *); 318int hfsplus_ext_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
316void hfsplus_ext_write_extent(struct inode *); 319void hfsplus_ext_write_extent(struct inode *);
317int hfsplus_get_block(struct inode *, sector_t, struct buffer_head *, int); 320int hfsplus_get_block(struct inode *, sector_t, struct buffer_head *, int);
318int hfsplus_free_fork(struct super_block *, u32, struct hfsplus_fork_raw *, int); 321int hfsplus_free_fork(struct super_block *, u32, struct hfsplus_fork_raw *, int);
@@ -350,7 +353,8 @@ extern u16 hfsplus_decompose_table[];
350extern u16 hfsplus_compose_table[]; 353extern u16 hfsplus_compose_table[];
351 354
352/* unicode.c */ 355/* unicode.c */
353int hfsplus_unistrcmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *); 356int hfsplus_strcasecmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *);
357int hfsplus_strcmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *);
354int hfsplus_uni2asc(struct super_block *, const struct hfsplus_unistr *, char *, int *); 358int hfsplus_uni2asc(struct super_block *, const struct hfsplus_unistr *, char *, int *);
355int hfsplus_asc2uni(struct super_block *, struct hfsplus_unistr *, const char *, int); 359int hfsplus_asc2uni(struct super_block *, struct hfsplus_unistr *, const char *, int);
356 360
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index b4fbed63321..49205531a50 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -22,8 +22,10 @@
22#define HFSPLUS_SECTOR_SHIFT 9 22#define HFSPLUS_SECTOR_SHIFT 9
23#define HFSPLUS_VOLHEAD_SECTOR 2 23#define HFSPLUS_VOLHEAD_SECTOR 2
24#define HFSPLUS_VOLHEAD_SIG 0x482b 24#define HFSPLUS_VOLHEAD_SIG 0x482b
25#define HFSPLUS_VOLHEAD_SIGX 0x4858
25#define HFSPLUS_SUPER_MAGIC 0x482b 26#define HFSPLUS_SUPER_MAGIC 0x482b
26#define HFSPLUS_CURRENT_VERSION 4 27#define HFSPLUS_MIN_VERSION 4
28#define HFSPLUS_CURRENT_VERSION 5
27 29
28#define HFSP_WRAP_MAGIC 0x4244 30#define HFSP_WRAP_MAGIC 0x4244
29#define HFSP_WRAP_ATTRIB_SLOCK 0x8000 31#define HFSP_WRAP_ATTRIB_SLOCK 0x8000
@@ -41,6 +43,9 @@
41#define HFSP_HARDLINK_TYPE 0x686c6e6b /* 'hlnk' */ 43#define HFSP_HARDLINK_TYPE 0x686c6e6b /* 'hlnk' */
42#define HFSP_HFSPLUS_CREATOR 0x6866732b /* 'hfs+' */ 44#define HFSP_HFSPLUS_CREATOR 0x6866732b /* 'hfs+' */
43 45
46#define HFSP_SYMLINK_TYPE 0x736c6e6b /* 'slnk' */
47#define HFSP_SYMLINK_CREATOR 0x72686170 /* 'rhap' */
48
44#define HFSP_MOUNT_VERSION 0x482b4c78 /* 'H+Lx' */ 49#define HFSP_MOUNT_VERSION 0x482b4c78 /* 'H+Lx' */
45 50
46/* Structures used on disk */ 51/* Structures used on disk */
@@ -161,7 +166,7 @@ struct hfs_btree_header_rec {
161 u16 reserved1; 166 u16 reserved1;
162 __be32 clump_size; 167 __be32 clump_size;
163 u8 btree_type; 168 u8 btree_type;
164 u8 reserved2; 169 u8 key_type;
165 __be32 attributes; 170 __be32 attributes;
166 u32 reserved3[16]; 171 u32 reserved3[16];
167} __packed; 172} __packed;
@@ -186,6 +191,10 @@ struct hfs_btree_header_rec {
186#define HFSPLUS_EXCH_CNID 15 /* ExchangeFiles temp id */ 191#define HFSPLUS_EXCH_CNID 15 /* ExchangeFiles temp id */
187#define HFSPLUS_FIRSTUSER_CNID 16 /* first available user id */ 192#define HFSPLUS_FIRSTUSER_CNID 16 /* first available user id */
188 193
194/* btree key type */
195#define HFSPLUS_KEY_CASEFOLDING 0xCF /* case-insensitive */
196#define HFSPLUS_KEY_BINARY 0xBC /* case-sensitive */
197
189/* HFS+ catalog entry key */ 198/* HFS+ catalog entry key */
190struct hfsplus_cat_key { 199struct hfsplus_cat_key {
191 __be16 key_len; 200 __be16 key_len;
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 7acff6c5464..12ed2b7d046 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -18,13 +18,11 @@
18 18
19static int hfsplus_readpage(struct file *file, struct page *page) 19static int hfsplus_readpage(struct file *file, struct page *page)
20{ 20{
21 //printk("readpage: %lu\n", page->index);
22 return block_read_full_page(page, hfsplus_get_block); 21 return block_read_full_page(page, hfsplus_get_block);
23} 22}
24 23
25static int hfsplus_writepage(struct page *page, struct writeback_control *wbc) 24static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
26{ 25{
27 //printk("writepage: %lu\n", page->index);
28 return block_write_full_page(page, hfsplus_get_block, wbc); 26 return block_write_full_page(page, hfsplus_get_block, wbc);
29} 27}
30 28
@@ -92,7 +90,6 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
92 } while (--i && nidx < tree->node_count); 90 } while (--i && nidx < tree->node_count);
93 spin_unlock(&tree->hash_lock); 91 spin_unlock(&tree->hash_lock);
94 } 92 }
95 //printk("releasepage: %lu,%x = %d\n", page->index, mask, res);
96 return res ? try_to_free_buffers(page) : 0; 93 return res ? try_to_free_buffers(page) : 0;
97} 94}
98 95
@@ -434,7 +431,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
434 inode->i_size = 2 + be32_to_cpu(folder->valence); 431 inode->i_size = 2 + be32_to_cpu(folder->valence);
435 inode->i_atime = hfsp_mt2ut(folder->access_date); 432 inode->i_atime = hfsp_mt2ut(folder->access_date);
436 inode->i_mtime = hfsp_mt2ut(folder->content_mod_date); 433 inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
437 inode->i_ctime = inode->i_mtime; 434 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
435 HFSPLUS_I(inode).create_date = folder->create_date;
438 HFSPLUS_I(inode).fs_blocks = 0; 436 HFSPLUS_I(inode).fs_blocks = 0;
439 inode->i_op = &hfsplus_dir_inode_operations; 437 inode->i_op = &hfsplus_dir_inode_operations;
440 inode->i_fop = &hfsplus_dir_operations; 438 inode->i_fop = &hfsplus_dir_operations;
@@ -465,9 +463,10 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
465 } 463 }
466 inode->i_atime = hfsp_mt2ut(file->access_date); 464 inode->i_atime = hfsp_mt2ut(file->access_date);
467 inode->i_mtime = hfsp_mt2ut(file->content_mod_date); 465 inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
468 inode->i_ctime = inode->i_mtime; 466 inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
467 HFSPLUS_I(inode).create_date = file->create_date;
469 } else { 468 } else {
470 printk("HFS+-fs: bad catalog entry used to create inode\n"); 469 printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
471 res = -EIO; 470 res = -EIO;
472 } 471 }
473 return res; 472 return res;
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index 935dafba007..dc64fac0083 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -83,58 +83,58 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
83 switch (token) { 83 switch (token) {
84 case opt_creator: 84 case opt_creator:
85 if (match_fourchar(&args[0], &sbi->creator)) { 85 if (match_fourchar(&args[0], &sbi->creator)) {
86 printk("HFS+-fs: creator requires a 4 character value\n"); 86 printk(KERN_ERR "hfs: creator requires a 4 character value\n");
87 return 0; 87 return 0;
88 } 88 }
89 break; 89 break;
90 case opt_type: 90 case opt_type:
91 if (match_fourchar(&args[0], &sbi->type)) { 91 if (match_fourchar(&args[0], &sbi->type)) {
92 printk("HFS+-fs: type requires a 4 character value\n"); 92 printk(KERN_ERR "hfs: type requires a 4 character value\n");
93 return 0; 93 return 0;
94 } 94 }
95 break; 95 break;
96 case opt_umask: 96 case opt_umask:
97 if (match_octal(&args[0], &tmp)) { 97 if (match_octal(&args[0], &tmp)) {
98 printk("HFS+-fs: umask requires a value\n"); 98 printk(KERN_ERR "hfs: umask requires a value\n");
99 return 0; 99 return 0;
100 } 100 }
101 sbi->umask = (umode_t)tmp; 101 sbi->umask = (umode_t)tmp;
102 break; 102 break;
103 case opt_uid: 103 case opt_uid:
104 if (match_int(&args[0], &tmp)) { 104 if (match_int(&args[0], &tmp)) {
105 printk("HFS+-fs: uid requires an argument\n"); 105 printk(KERN_ERR "hfs: uid requires an argument\n");
106 return 0; 106 return 0;
107 } 107 }
108 sbi->uid = (uid_t)tmp; 108 sbi->uid = (uid_t)tmp;
109 break; 109 break;
110 case opt_gid: 110 case opt_gid:
111 if (match_int(&args[0], &tmp)) { 111 if (match_int(&args[0], &tmp)) {
112 printk("HFS+-fs: gid requires an argument\n"); 112 printk(KERN_ERR "hfs: gid requires an argument\n");
113 return 0; 113 return 0;
114 } 114 }
115 sbi->gid = (gid_t)tmp; 115 sbi->gid = (gid_t)tmp;
116 break; 116 break;
117 case opt_part: 117 case opt_part:
118 if (match_int(&args[0], &sbi->part)) { 118 if (match_int(&args[0], &sbi->part)) {
119 printk("HFS+-fs: part requires an argument\n"); 119 printk(KERN_ERR "hfs: part requires an argument\n");
120 return 0; 120 return 0;
121 } 121 }
122 break; 122 break;
123 case opt_session: 123 case opt_session:
124 if (match_int(&args[0], &sbi->session)) { 124 if (match_int(&args[0], &sbi->session)) {
125 printk("HFS+-fs: session requires an argument\n"); 125 printk(KERN_ERR "hfs: session requires an argument\n");
126 return 0; 126 return 0;
127 } 127 }
128 break; 128 break;
129 case opt_nls: 129 case opt_nls:
130 if (sbi->nls) { 130 if (sbi->nls) {
131 printk("HFS+-fs: unable to change nls mapping\n"); 131 printk(KERN_ERR "hfs: unable to change nls mapping\n");
132 return 0; 132 return 0;
133 } 133 }
134 p = match_strdup(&args[0]); 134 p = match_strdup(&args[0]);
135 sbi->nls = load_nls(p); 135 sbi->nls = load_nls(p);
136 if (!sbi->nls) { 136 if (!sbi->nls) {
137 printk("HFS+-fs: unable to load nls mapping \"%s\"\n", p); 137 printk(KERN_ERR "hfs: unable to load nls mapping \"%s\"\n", p);
138 kfree(p); 138 kfree(p);
139 return 0; 139 return 0;
140 } 140 }
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index d791780def5..7843f792a4b 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -169,7 +169,7 @@ static void hfsplus_write_super(struct super_block *sb)
169 block = HFSPLUS_SB(sb).blockoffset; 169 block = HFSPLUS_SB(sb).blockoffset;
170 block += (HFSPLUS_SB(sb).sect_count - 2) >> (sb->s_blocksize_bits - 9); 170 block += (HFSPLUS_SB(sb).sect_count - 2) >> (sb->s_blocksize_bits - 9);
171 offset = ((HFSPLUS_SB(sb).sect_count - 2) << 9) & (sb->s_blocksize - 1); 171 offset = ((HFSPLUS_SB(sb).sect_count - 2) << 9) & (sb->s_blocksize - 1);
172 printk("backup: %u,%u,%u,%u\n", HFSPLUS_SB(sb).blockoffset, 172 printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n", HFSPLUS_SB(sb).blockoffset,
173 HFSPLUS_SB(sb).sect_count, block, offset); 173 HFSPLUS_SB(sb).sect_count, block, offset);
174 bh = sb_bread(sb, block); 174 bh = sb_bread(sb, block);
175 if (bh) { 175 if (bh) {
@@ -179,7 +179,7 @@ static void hfsplus_write_super(struct super_block *sb)
179 mark_buffer_dirty(bh); 179 mark_buffer_dirty(bh);
180 brelse(bh); 180 brelse(bh);
181 } else 181 } else
182 printk("backup not found!\n"); 182 printk(KERN_WARNING "hfs: backup not found!\n");
183 } 183 }
184 } 184 }
185 HFSPLUS_SB(sb).flags &= ~HFSPLUS_SB_WRITEBACKUP; 185 HFSPLUS_SB(sb).flags &= ~HFSPLUS_SB_WRITEBACKUP;
@@ -240,18 +240,18 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
240 return -EINVAL; 240 return -EINVAL;
241 241
242 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { 242 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
243 printk("HFS+-fs warning: Filesystem was not cleanly unmounted, " 243 printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, "
244 "running fsck.hfsplus is recommended. leaving read-only.\n"); 244 "running fsck.hfsplus is recommended. leaving read-only.\n");
245 sb->s_flags |= MS_RDONLY; 245 sb->s_flags |= MS_RDONLY;
246 *flags |= MS_RDONLY; 246 *flags |= MS_RDONLY;
247 } else if (sbi.flags & HFSPLUS_SB_FORCE) { 247 } else if (sbi.flags & HFSPLUS_SB_FORCE) {
248 /* nothing */ 248 /* nothing */
249 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 249 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
250 printk("HFS+-fs: Filesystem is marked locked, leaving read-only.\n"); 250 printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n");
251 sb->s_flags |= MS_RDONLY; 251 sb->s_flags |= MS_RDONLY;
252 *flags |= MS_RDONLY; 252 *flags |= MS_RDONLY;
253 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) { 253 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
254 printk("HFS+-fs: Filesystem is marked journaled, leaving read-only.\n"); 254 printk(KERN_WARNING "hfs: filesystem is marked journaled, leaving read-only.\n");
255 sb->s_flags |= MS_RDONLY; 255 sb->s_flags |= MS_RDONLY;
256 *flags |= MS_RDONLY; 256 *flags |= MS_RDONLY;
257 } 257 }
@@ -292,8 +292,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
292 INIT_HLIST_HEAD(&sbi->rsrc_inodes); 292 INIT_HLIST_HEAD(&sbi->rsrc_inodes);
293 hfsplus_fill_defaults(sbi); 293 hfsplus_fill_defaults(sbi);
294 if (!hfsplus_parse_options(data, sbi)) { 294 if (!hfsplus_parse_options(data, sbi)) {
295 if (!silent) 295 printk(KERN_ERR "hfs: unable to parse mount options\n");
296 printk("HFS+-fs: unable to parse mount options\n");
297 err = -EINVAL; 296 err = -EINVAL;
298 goto cleanup; 297 goto cleanup;
299 } 298 }
@@ -302,7 +301,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
302 nls = sbi->nls; 301 nls = sbi->nls;
303 sbi->nls = load_nls("utf8"); 302 sbi->nls = load_nls("utf8");
304 if (!sbi->nls) { 303 if (!sbi->nls) {
305 printk("HFS+: unable to load nls for utf8\n"); 304 printk(KERN_ERR "hfs: unable to load nls for utf8\n");
306 err = -EINVAL; 305 err = -EINVAL;
307 goto cleanup; 306 goto cleanup;
308 } 307 }
@@ -310,17 +309,17 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
310 /* Grab the volume header */ 309 /* Grab the volume header */
311 if (hfsplus_read_wrapper(sb)) { 310 if (hfsplus_read_wrapper(sb)) {
312 if (!silent) 311 if (!silent)
313 printk("HFS+-fs: unable to find HFS+ superblock\n"); 312 printk(KERN_WARNING "hfs: unable to find HFS+ superblock\n");
314 err = -EINVAL; 313 err = -EINVAL;
315 goto cleanup; 314 goto cleanup;
316 } 315 }
317 vhdr = HFSPLUS_SB(sb).s_vhdr; 316 vhdr = HFSPLUS_SB(sb).s_vhdr;
318 317
319 /* Copy parts of the volume header into the superblock */ 318 /* Copy parts of the volume header into the superblock */
320 sb->s_magic = be16_to_cpu(vhdr->signature); 319 sb->s_magic = HFSPLUS_VOLHEAD_SIG;
321 if (be16_to_cpu(vhdr->version) != HFSPLUS_CURRENT_VERSION) { 320 if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
322 if (!silent) 321 be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
323 printk("HFS+-fs: wrong filesystem version\n"); 322 printk(KERN_ERR "hfs: wrong filesystem version\n");
324 goto cleanup; 323 goto cleanup;
325 } 324 }
326 HFSPLUS_SB(sb).total_blocks = be32_to_cpu(vhdr->total_blocks); 325 HFSPLUS_SB(sb).total_blocks = be32_to_cpu(vhdr->total_blocks);
@@ -341,20 +340,17 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
341 sb->s_maxbytes = MAX_LFS_FILESIZE; 340 sb->s_maxbytes = MAX_LFS_FILESIZE;
342 341
343 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { 342 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
344 if (!silent) 343 printk(KERN_WARNING "hfs: Filesystem was not cleanly unmounted, "
345 printk("HFS+-fs warning: Filesystem was not cleanly unmounted, " 344 "running fsck.hfsplus is recommended. mounting read-only.\n");
346 "running fsck.hfsplus is recommended. mounting read-only.\n");
347 sb->s_flags |= MS_RDONLY; 345 sb->s_flags |= MS_RDONLY;
348 } else if (sbi->flags & HFSPLUS_SB_FORCE) { 346 } else if (sbi->flags & HFSPLUS_SB_FORCE) {
349 /* nothing */ 347 /* nothing */
350 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 348 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
351 if (!silent) 349 printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n");
352 printk("HFS+-fs: Filesystem is marked locked, mounting read-only.\n");
353 sb->s_flags |= MS_RDONLY; 350 sb->s_flags |= MS_RDONLY;
354 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) { 351 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
355 if (!silent) 352 printk(KERN_WARNING "hfs: write access to a jounaled filesystem is not supported, "
356 printk("HFS+-fs: write access to a jounaled filesystem is not supported, " 353 "use the force option at your own risk, mounting read-only.\n");
357 "use the force option at your own risk, mounting read-only.\n");
358 sb->s_flags |= MS_RDONLY; 354 sb->s_flags |= MS_RDONLY;
359 } 355 }
360 sbi->flags &= ~HFSPLUS_SB_FORCE; 356 sbi->flags &= ~HFSPLUS_SB_FORCE;
@@ -362,21 +358,18 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
362 /* Load metadata objects (B*Trees) */ 358 /* Load metadata objects (B*Trees) */
363 HFSPLUS_SB(sb).ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); 359 HFSPLUS_SB(sb).ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
364 if (!HFSPLUS_SB(sb).ext_tree) { 360 if (!HFSPLUS_SB(sb).ext_tree) {
365 if (!silent) 361 printk(KERN_ERR "hfs: failed to load extents file\n");
366 printk("HFS+-fs: failed to load extents file\n");
367 goto cleanup; 362 goto cleanup;
368 } 363 }
369 HFSPLUS_SB(sb).cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); 364 HFSPLUS_SB(sb).cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
370 if (!HFSPLUS_SB(sb).cat_tree) { 365 if (!HFSPLUS_SB(sb).cat_tree) {
371 if (!silent) 366 printk(KERN_ERR "hfs: failed to load catalog file\n");
372 printk("HFS+-fs: failed to load catalog file\n");
373 goto cleanup; 367 goto cleanup;
374 } 368 }
375 369
376 HFSPLUS_SB(sb).alloc_file = iget(sb, HFSPLUS_ALLOC_CNID); 370 HFSPLUS_SB(sb).alloc_file = iget(sb, HFSPLUS_ALLOC_CNID);
377 if (!HFSPLUS_SB(sb).alloc_file) { 371 if (!HFSPLUS_SB(sb).alloc_file) {
378 if (!silent) 372 printk(KERN_ERR "hfs: failed to load allocation file\n");
379 printk("HFS+-fs: failed to load allocation file\n");
380 goto cleanup; 373 goto cleanup;
381 } 374 }
382 375
@@ -384,8 +377,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
384 root = iget(sb, HFSPLUS_ROOT_CNID); 377 root = iget(sb, HFSPLUS_ROOT_CNID);
385 sb->s_root = d_alloc_root(root); 378 sb->s_root = d_alloc_root(root);
386 if (!sb->s_root) { 379 if (!sb->s_root) {
387 if (!silent) 380 printk(KERN_ERR "hfs: failed to load root directory\n");
388 printk("HFS+-fs: failed to load root directory\n");
389 iput(root); 381 iput(root);
390 goto cleanup; 382 goto cleanup;
391 } 383 }
@@ -419,7 +411,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
419 sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh); 411 sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh);
420 412
421 if (!HFSPLUS_SB(sb).hidden_dir) { 413 if (!HFSPLUS_SB(sb).hidden_dir) {
422 printk("HFS+: create hidden dir...\n"); 414 printk(KERN_DEBUG "hfs: create hidden dir...\n");
423 HFSPLUS_SB(sb).hidden_dir = hfsplus_new_inode(sb, S_IFDIR); 415 HFSPLUS_SB(sb).hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
424 hfsplus_create_cat(HFSPLUS_SB(sb).hidden_dir->i_ino, sb->s_root->d_inode, 416 hfsplus_create_cat(HFSPLUS_SB(sb).hidden_dir->i_ino, sb->s_root->d_inode,
425 &str, HFSPLUS_SB(sb).hidden_dir); 417 &str, HFSPLUS_SB(sb).hidden_dir);
@@ -499,7 +491,7 @@ static void __exit exit_hfsplus_fs(void)
499{ 491{
500 unregister_filesystem(&hfsplus_fs_type); 492 unregister_filesystem(&hfsplus_fs_type);
501 if (kmem_cache_destroy(hfsplus_inode_cachep)) 493 if (kmem_cache_destroy(hfsplus_inode_cachep))
502 printk(KERN_INFO "hfsplus_inode_cache: not all structures were freed\n"); 494 printk(KERN_ERR "hfsplus_inode_cache: not all structures were freed\n");
503} 495}
504 496
505module_init(init_hfsplus_fs) 497module_init(init_hfsplus_fs)
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index 060c69048c3..689c8bd721f 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -28,7 +28,8 @@ static inline u16 case_fold(u16 c)
28} 28}
29 29
30/* Compare unicode strings, return values like normal strcmp */ 30/* Compare unicode strings, return values like normal strcmp */
31int hfsplus_unistrcmp(const struct hfsplus_unistr *s1, const struct hfsplus_unistr *s2) 31int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
32 const struct hfsplus_unistr *s2)
32{ 33{
33 u16 len1, len2, c1, c2; 34 u16 len1, len2, c1, c2;
34 const hfsplus_unichr *p1, *p2; 35 const hfsplus_unichr *p1, *p2;
@@ -59,6 +60,33 @@ int hfsplus_unistrcmp(const struct hfsplus_unistr *s1, const struct hfsplus_unis
59 } 60 }
60} 61}
61 62
63/* Compare names as a sequence of 16-bit unsigned integers */
64int hfsplus_strcmp(const struct hfsplus_unistr *s1,
65 const struct hfsplus_unistr *s2)
66{
67 u16 len1, len2, c1, c2;
68 const hfsplus_unichr *p1, *p2;
69 int len;
70
71 len1 = be16_to_cpu(s1->length);
72 len2 = be16_to_cpu(s2->length);
73 p1 = s1->unicode;
74 p2 = s2->unicode;
75
76 for (len = min(len1, len2); len > 0; len--) {
77 c1 = be16_to_cpu(*p1);
78 c2 = be16_to_cpu(*p2);
79 if (c1 != c2)
80 return c1 < c2 ? -1 : 1;
81 p1++;
82 p2++;
83 }
84
85 return len1 < len2 ? -1 :
86 len1 > len2 ? 1 : 0;
87}
88
89
62#define Hangul_SBase 0xac00 90#define Hangul_SBase 0xac00
63#define Hangul_LBase 0x1100 91#define Hangul_LBase 0x1100
64#define Hangul_VBase 0x1161 92#define Hangul_VBase 0x1161
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 95455e83923..72cab78f050 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -28,8 +28,11 @@ static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
28{ 28{
29 u32 extent; 29 u32 extent;
30 u16 attrib; 30 u16 attrib;
31 __be16 sig;
31 32
32 if (be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_EMBEDSIG)) != HFSPLUS_VOLHEAD_SIG) 33 sig = *(__be16 *)(bufptr + HFSP_WRAPOFF_EMBEDSIG);
34 if (sig != cpu_to_be16(HFSPLUS_VOLHEAD_SIG) &&
35 sig != cpu_to_be16(HFSPLUS_VOLHEAD_SIGX))
33 return 0; 36 return 0;
34 37
35 attrib = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ATTRIB)); 38 attrib = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ATTRIB));
@@ -70,7 +73,7 @@ static int hfsplus_get_last_session(struct super_block *sb,
70 *start = (sector_t)te.cdte_addr.lba << 2; 73 *start = (sector_t)te.cdte_addr.lba << 2;
71 return 0; 74 return 0;
72 } 75 }
73 printk(KERN_ERR "HFS: Invalid session number or type of track\n"); 76 printk(KERN_ERR "hfs: invalid session number or type of track\n");
74 return -EINVAL; 77 return -EINVAL;
75 } 78 }
76 ms_info.addr_format = CDROM_LBA; 79 ms_info.addr_format = CDROM_LBA;
@@ -114,6 +117,10 @@ int hfsplus_read_wrapper(struct super_block *sb)
114 } 117 }
115 if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG)) 118 if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
116 break; 119 break;
120 if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIGX)) {
121 HFSPLUS_SB(sb).flags |= HFSPLUS_SB_HFSX;
122 break;
123 }
117 brelse(bh); 124 brelse(bh);
118 125
119 /* check for a partition block 126 /* check for a partition block
@@ -143,7 +150,7 @@ int hfsplus_read_wrapper(struct super_block *sb)
143 blocksize >>= 1; 150 blocksize >>= 1;
144 151
145 if (sb_set_blocksize(sb, blocksize) != blocksize) { 152 if (sb_set_blocksize(sb, blocksize) != blocksize) {
146 printk("HFS+: unable to blocksize to %u!\n", blocksize); 153 printk(KERN_ERR "hfs: unable to set blocksize to %u!\n", blocksize);
147 return -EINVAL; 154 return -EINVAL;
148 } 155 }
149 156
@@ -158,7 +165,9 @@ int hfsplus_read_wrapper(struct super_block *sb)
158 return -EIO; 165 return -EIO;
159 166
160 /* should still be the same... */ 167 /* should still be the same... */
161 if (be16_to_cpu(vhdr->signature) != HFSPLUS_VOLHEAD_SIG) 168 if (vhdr->signature != (HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX ?
169 cpu_to_be16(HFSPLUS_VOLHEAD_SIGX) :
170 cpu_to_be16(HFSPLUS_VOLHEAD_SIG)))
162 goto error; 171 goto error;
163 HFSPLUS_SB(sb).s_vhbh = bh; 172 HFSPLUS_SB(sb).s_vhbh = bh;
164 HFSPLUS_SB(sb).s_vhdr = vhdr; 173 HFSPLUS_SB(sb).s_vhdr = vhdr;
diff --git a/fs/inotify.c b/fs/inotify.c
index 2fecb7af4a7..878ccca6121 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -33,6 +33,7 @@
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/writeback.h> 34#include <linux/writeback.h>
35#include <linux/inotify.h> 35#include <linux/inotify.h>
36#include <linux/syscalls.h>
36 37
37#include <asm/ioctls.h> 38#include <asm/ioctls.h>
38 39
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index cb3cef525c3..e6265a0b56b 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -338,7 +338,7 @@ restart:
338 * done (maybe it's a new transaction, but it fell at the same 338 * done (maybe it's a new transaction, but it fell at the same
339 * address). 339 * address).
340 */ 340 */
341 if (journal->j_checkpoint_transactions == transaction || 341 if (journal->j_checkpoint_transactions == transaction &&
342 transaction->t_tid == this_tid) { 342 transaction->t_tid == this_tid) {
343 int batch_count = 0; 343 int batch_count = 0;
344 struct buffer_head *bhs[NR_BATCH]; 344 struct buffer_head *bhs[NR_BATCH];
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 002ad2bbc76..29e62d98bae 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -829,7 +829,8 @@ restart_loop:
829 journal->j_committing_transaction = NULL; 829 journal->j_committing_transaction = NULL;
830 spin_unlock(&journal->j_state_lock); 830 spin_unlock(&journal->j_state_lock);
831 831
832 if (commit_transaction->t_checkpoint_list == NULL) { 832 if (commit_transaction->t_checkpoint_list == NULL &&
833 commit_transaction->t_checkpoint_io_list == NULL) {
833 __journal_drop_transaction(journal, commit_transaction); 834 __journal_drop_transaction(journal, commit_transaction);
834 } else { 835 } else {
835 if (journal->j_checkpoint_transactions == NULL) { 836 if (journal->j_checkpoint_transactions == NULL) {
diff --git a/fs/namei.c b/fs/namei.c
index 33fb5bd34a8..4acdac043b6 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -30,6 +30,8 @@
30#include <linux/audit.h> 30#include <linux/audit.h>
31#include <linux/capability.h> 31#include <linux/capability.h>
32#include <linux/file.h> 32#include <linux/file.h>
33#include <linux/fcntl.h>
34#include <linux/namei.h>
33#include <asm/namei.h> 35#include <asm/namei.h>
34#include <asm/uaccess.h> 36#include <asm/uaccess.h>
35 37
@@ -1063,7 +1065,8 @@ set_it:
1063} 1065}
1064 1066
1065/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ 1067/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
1066int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata *nd) 1068static int fastcall do_path_lookup(int dfd, const char *name,
1069 unsigned int flags, struct nameidata *nd)
1067{ 1070{
1068 int retval = 0; 1071 int retval = 0;
1069 1072
@@ -1083,9 +1086,38 @@ int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata
1083 } 1086 }
1084 nd->mnt = mntget(current->fs->rootmnt); 1087 nd->mnt = mntget(current->fs->rootmnt);
1085 nd->dentry = dget(current->fs->root); 1088 nd->dentry = dget(current->fs->root);
1086 } else { 1089 } else if (dfd == AT_FDCWD) {
1087 nd->mnt = mntget(current->fs->pwdmnt); 1090 nd->mnt = mntget(current->fs->pwdmnt);
1088 nd->dentry = dget(current->fs->pwd); 1091 nd->dentry = dget(current->fs->pwd);
1092 } else {
1093 struct file *file;
1094 int fput_needed;
1095 struct dentry *dentry;
1096
1097 file = fget_light(dfd, &fput_needed);
1098 if (!file) {
1099 retval = -EBADF;
1100 goto out_fail;
1101 }
1102
1103 dentry = file->f_dentry;
1104
1105 if (!S_ISDIR(dentry->d_inode->i_mode)) {
1106 retval = -ENOTDIR;
1107 fput_light(file, fput_needed);
1108 goto out_fail;
1109 }
1110
1111 retval = file_permission(file, MAY_EXEC);
1112 if (retval) {
1113 fput_light(file, fput_needed);
1114 goto out_fail;
1115 }
1116
1117 nd->mnt = mntget(file->f_vfsmnt);
1118 nd->dentry = dget(dentry);
1119
1120 fput_light(file, fput_needed);
1089 } 1121 }
1090 read_unlock(&current->fs->lock); 1122 read_unlock(&current->fs->lock);
1091 current->total_link_count = 0; 1123 current->total_link_count = 0;
@@ -1094,11 +1126,19 @@ out:
1094 if (unlikely(current->audit_context 1126 if (unlikely(current->audit_context
1095 && nd && nd->dentry && nd->dentry->d_inode)) 1127 && nd && nd->dentry && nd->dentry->d_inode))
1096 audit_inode(name, nd->dentry->d_inode, flags); 1128 audit_inode(name, nd->dentry->d_inode, flags);
1129out_fail:
1097 return retval; 1130 return retval;
1098} 1131}
1099 1132
1100static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags, 1133int fastcall path_lookup(const char *name, unsigned int flags,
1101 struct nameidata *nd, int open_flags, int create_mode) 1134 struct nameidata *nd)
1135{
1136 return do_path_lookup(AT_FDCWD, name, flags, nd);
1137}
1138
1139static int __path_lookup_intent_open(int dfd, const char *name,
1140 unsigned int lookup_flags, struct nameidata *nd,
1141 int open_flags, int create_mode)
1102{ 1142{
1103 struct file *filp = get_empty_filp(); 1143 struct file *filp = get_empty_filp();
1104 int err; 1144 int err;
@@ -1108,7 +1148,7 @@ static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags
1108 nd->intent.open.file = filp; 1148 nd->intent.open.file = filp;
1109 nd->intent.open.flags = open_flags; 1149 nd->intent.open.flags = open_flags;
1110 nd->intent.open.create_mode = create_mode; 1150 nd->intent.open.create_mode = create_mode;
1111 err = path_lookup(name, lookup_flags|LOOKUP_OPEN, nd); 1151 err = do_path_lookup(dfd, name, lookup_flags|LOOKUP_OPEN, nd);
1112 if (IS_ERR(nd->intent.open.file)) { 1152 if (IS_ERR(nd->intent.open.file)) {
1113 if (err == 0) { 1153 if (err == 0) {
1114 err = PTR_ERR(nd->intent.open.file); 1154 err = PTR_ERR(nd->intent.open.file);
@@ -1126,10 +1166,10 @@ static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags
1126 * @nd: pointer to nameidata 1166 * @nd: pointer to nameidata
1127 * @open_flags: open intent flags 1167 * @open_flags: open intent flags
1128 */ 1168 */
1129int path_lookup_open(const char *name, unsigned int lookup_flags, 1169int path_lookup_open(int dfd, const char *name, unsigned int lookup_flags,
1130 struct nameidata *nd, int open_flags) 1170 struct nameidata *nd, int open_flags)
1131{ 1171{
1132 return __path_lookup_intent_open(name, lookup_flags, nd, 1172 return __path_lookup_intent_open(dfd, name, lookup_flags, nd,
1133 open_flags, 0); 1173 open_flags, 0);
1134} 1174}
1135 1175
@@ -1141,12 +1181,12 @@ int path_lookup_open(const char *name, unsigned int lookup_flags,
1141 * @open_flags: open intent flags 1181 * @open_flags: open intent flags
1142 * @create_mode: create intent flags 1182 * @create_mode: create intent flags
1143 */ 1183 */
1144static int path_lookup_create(const char *name, unsigned int lookup_flags, 1184static int path_lookup_create(int dfd, const char *name,
1145 struct nameidata *nd, int open_flags, 1185 unsigned int lookup_flags, struct nameidata *nd,
1146 int create_mode) 1186 int open_flags, int create_mode)
1147{ 1187{
1148 return __path_lookup_intent_open(name, lookup_flags|LOOKUP_CREATE, nd, 1188 return __path_lookup_intent_open(dfd, name, lookup_flags|LOOKUP_CREATE,
1149 open_flags, create_mode); 1189 nd, open_flags, create_mode);
1150} 1190}
1151 1191
1152int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags, 1192int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags,
@@ -1156,7 +1196,7 @@ int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags,
1156 int err = PTR_ERR(tmp); 1196 int err = PTR_ERR(tmp);
1157 1197
1158 if (!IS_ERR(tmp)) { 1198 if (!IS_ERR(tmp)) {
1159 err = __path_lookup_intent_open(tmp, lookup_flags, nd, open_flags, 0); 1199 err = __path_lookup_intent_open(AT_FDCWD, tmp, lookup_flags, nd, open_flags, 0);
1160 putname(tmp); 1200 putname(tmp);
1161 } 1201 }
1162 return err; 1202 return err;
@@ -1248,18 +1288,24 @@ access:
1248 * that namei follows links, while lnamei does not. 1288 * that namei follows links, while lnamei does not.
1249 * SMP-safe 1289 * SMP-safe
1250 */ 1290 */
1251int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) 1291int fastcall __user_walk_fd(int dfd, const char __user *name, unsigned flags,
1292 struct nameidata *nd)
1252{ 1293{
1253 char *tmp = getname(name); 1294 char *tmp = getname(name);
1254 int err = PTR_ERR(tmp); 1295 int err = PTR_ERR(tmp);
1255 1296
1256 if (!IS_ERR(tmp)) { 1297 if (!IS_ERR(tmp)) {
1257 err = path_lookup(tmp, flags, nd); 1298 err = do_path_lookup(dfd, tmp, flags, nd);
1258 putname(tmp); 1299 putname(tmp);
1259 } 1300 }
1260 return err; 1301 return err;
1261} 1302}
1262 1303
1304int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd)
1305{
1306 return __user_walk_fd(AT_FDCWD, name, flags, nd);
1307}
1308
1263/* 1309/*
1264 * It's inline, so penalty for filesystems that don't use sticky bit is 1310 * It's inline, so penalty for filesystems that don't use sticky bit is
1265 * minimal. 1311 * minimal.
@@ -1518,7 +1564,8 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
1518 * for symlinks (where the permissions are checked later). 1564 * for symlinks (where the permissions are checked later).
1519 * SMP-safe 1565 * SMP-safe
1520 */ 1566 */
1521int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd) 1567int open_namei(int dfd, const char *pathname, int flag,
1568 int mode, struct nameidata *nd)
1522{ 1569{
1523 int acc_mode, error; 1570 int acc_mode, error;
1524 struct path path; 1571 struct path path;
@@ -1540,7 +1587,8 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
1540 * The simplest case - just a plain lookup. 1587 * The simplest case - just a plain lookup.
1541 */ 1588 */
1542 if (!(flag & O_CREAT)) { 1589 if (!(flag & O_CREAT)) {
1543 error = path_lookup_open(pathname, lookup_flags(flag), nd, flag); 1590 error = path_lookup_open(dfd, pathname, lookup_flags(flag),
1591 nd, flag);
1544 if (error) 1592 if (error)
1545 return error; 1593 return error;
1546 goto ok; 1594 goto ok;
@@ -1549,7 +1597,7 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
1549 /* 1597 /*
1550 * Create - we need to know the parent. 1598 * Create - we need to know the parent.
1551 */ 1599 */
1552 error = path_lookup_create(pathname, LOOKUP_PARENT, nd, flag, mode); 1600 error = path_lookup_create(dfd,pathname,LOOKUP_PARENT,nd,flag,mode);
1553 if (error) 1601 if (error)
1554 return error; 1602 return error;
1555 1603
@@ -1744,7 +1792,8 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1744 return error; 1792 return error;
1745} 1793}
1746 1794
1747asmlinkage long sys_mknod(const char __user * filename, int mode, unsigned dev) 1795asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode,
1796 unsigned dev)
1748{ 1797{
1749 int error = 0; 1798 int error = 0;
1750 char * tmp; 1799 char * tmp;
@@ -1757,7 +1806,7 @@ asmlinkage long sys_mknod(const char __user * filename, int mode, unsigned dev)
1757 if (IS_ERR(tmp)) 1806 if (IS_ERR(tmp))
1758 return PTR_ERR(tmp); 1807 return PTR_ERR(tmp);
1759 1808
1760 error = path_lookup(tmp, LOOKUP_PARENT, &nd); 1809 error = do_path_lookup(dfd, tmp, LOOKUP_PARENT, &nd);
1761 if (error) 1810 if (error)
1762 goto out; 1811 goto out;
1763 dentry = lookup_create(&nd, 0); 1812 dentry = lookup_create(&nd, 0);
@@ -1793,6 +1842,11 @@ out:
1793 return error; 1842 return error;
1794} 1843}
1795 1844
1845asmlinkage long sys_mknod(const char __user *filename, int mode, unsigned dev)
1846{
1847 return sys_mknodat(AT_FDCWD, filename, mode, dev);
1848}
1849
1796int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1850int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1797{ 1851{
1798 int error = may_create(dir, dentry, NULL); 1852 int error = may_create(dir, dentry, NULL);
@@ -1815,7 +1869,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1815 return error; 1869 return error;
1816} 1870}
1817 1871
1818asmlinkage long sys_mkdir(const char __user * pathname, int mode) 1872asmlinkage long sys_mkdirat(int dfd, const char __user *pathname, int mode)
1819{ 1873{
1820 int error = 0; 1874 int error = 0;
1821 char * tmp; 1875 char * tmp;
@@ -1826,7 +1880,7 @@ asmlinkage long sys_mkdir(const char __user * pathname, int mode)
1826 struct dentry *dentry; 1880 struct dentry *dentry;
1827 struct nameidata nd; 1881 struct nameidata nd;
1828 1882
1829 error = path_lookup(tmp, LOOKUP_PARENT, &nd); 1883 error = do_path_lookup(dfd, tmp, LOOKUP_PARENT, &nd);
1830 if (error) 1884 if (error)
1831 goto out; 1885 goto out;
1832 dentry = lookup_create(&nd, 1); 1886 dentry = lookup_create(&nd, 1);
@@ -1846,6 +1900,11 @@ out:
1846 return error; 1900 return error;
1847} 1901}
1848 1902
1903asmlinkage long sys_mkdir(const char __user *pathname, int mode)
1904{
1905 return sys_mkdirat(AT_FDCWD, pathname, mode);
1906}
1907
1849/* 1908/*
1850 * We try to drop the dentry early: we should have 1909 * We try to drop the dentry early: we should have
1851 * a usage count of 2 if we're the only user of this 1910 * a usage count of 2 if we're the only user of this
@@ -1907,7 +1966,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
1907 return error; 1966 return error;
1908} 1967}
1909 1968
1910asmlinkage long sys_rmdir(const char __user * pathname) 1969static long do_rmdir(int dfd, const char __user *pathname)
1911{ 1970{
1912 int error = 0; 1971 int error = 0;
1913 char * name; 1972 char * name;
@@ -1918,7 +1977,7 @@ asmlinkage long sys_rmdir(const char __user * pathname)
1918 if(IS_ERR(name)) 1977 if(IS_ERR(name))
1919 return PTR_ERR(name); 1978 return PTR_ERR(name);
1920 1979
1921 error = path_lookup(name, LOOKUP_PARENT, &nd); 1980 error = do_path_lookup(dfd, name, LOOKUP_PARENT, &nd);
1922 if (error) 1981 if (error)
1923 goto exit; 1982 goto exit;
1924 1983
@@ -1948,6 +2007,11 @@ exit:
1948 return error; 2007 return error;
1949} 2008}
1950 2009
2010asmlinkage long sys_rmdir(const char __user *pathname)
2011{
2012 return do_rmdir(AT_FDCWD, pathname);
2013}
2014
1951int vfs_unlink(struct inode *dir, struct dentry *dentry) 2015int vfs_unlink(struct inode *dir, struct dentry *dentry)
1952{ 2016{
1953 int error = may_delete(dir, dentry, 0); 2017 int error = may_delete(dir, dentry, 0);
@@ -1984,7 +2048,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
1984 * writeout happening, and we don't want to prevent access to the directory 2048 * writeout happening, and we don't want to prevent access to the directory
1985 * while waiting on the I/O. 2049 * while waiting on the I/O.
1986 */ 2050 */
1987asmlinkage long sys_unlink(const char __user * pathname) 2051static long do_unlinkat(int dfd, const char __user *pathname)
1988{ 2052{
1989 int error = 0; 2053 int error = 0;
1990 char * name; 2054 char * name;
@@ -1996,7 +2060,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
1996 if(IS_ERR(name)) 2060 if(IS_ERR(name))
1997 return PTR_ERR(name); 2061 return PTR_ERR(name);
1998 2062
1999 error = path_lookup(name, LOOKUP_PARENT, &nd); 2063 error = do_path_lookup(dfd, name, LOOKUP_PARENT, &nd);
2000 if (error) 2064 if (error)
2001 goto exit; 2065 goto exit;
2002 error = -EISDIR; 2066 error = -EISDIR;
@@ -2031,6 +2095,22 @@ slashes:
2031 goto exit2; 2095 goto exit2;
2032} 2096}
2033 2097
2098asmlinkage long sys_unlinkat(int dfd, const char __user *pathname, int flag)
2099{
2100 if ((flag & ~AT_REMOVEDIR) != 0)
2101 return -EINVAL;
2102
2103 if (flag & AT_REMOVEDIR)
2104 return do_rmdir(dfd, pathname);
2105
2106 return do_unlinkat(dfd, pathname);
2107}
2108
2109asmlinkage long sys_unlink(const char __user *pathname)
2110{
2111 return do_unlinkat(AT_FDCWD, pathname);
2112}
2113
2034int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname, int mode) 2114int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname, int mode)
2035{ 2115{
2036 int error = may_create(dir, dentry, NULL); 2116 int error = may_create(dir, dentry, NULL);
@@ -2052,7 +2132,8 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname, i
2052 return error; 2132 return error;
2053} 2133}
2054 2134
2055asmlinkage long sys_symlink(const char __user * oldname, const char __user * newname) 2135asmlinkage long sys_symlinkat(const char __user *oldname,
2136 int newdfd, const char __user *newname)
2056{ 2137{
2057 int error = 0; 2138 int error = 0;
2058 char * from; 2139 char * from;
@@ -2067,7 +2148,7 @@ asmlinkage long sys_symlink(const char __user * oldname, const char __user * new
2067 struct dentry *dentry; 2148 struct dentry *dentry;
2068 struct nameidata nd; 2149 struct nameidata nd;
2069 2150
2070 error = path_lookup(to, LOOKUP_PARENT, &nd); 2151 error = do_path_lookup(newdfd, to, LOOKUP_PARENT, &nd);
2071 if (error) 2152 if (error)
2072 goto out; 2153 goto out;
2073 dentry = lookup_create(&nd, 0); 2154 dentry = lookup_create(&nd, 0);
@@ -2085,6 +2166,11 @@ out:
2085 return error; 2166 return error;
2086} 2167}
2087 2168
2169asmlinkage long sys_symlink(const char __user *oldname, const char __user *newname)
2170{
2171 return sys_symlinkat(oldname, AT_FDCWD, newname);
2172}
2173
2088int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) 2174int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
2089{ 2175{
2090 struct inode *inode = old_dentry->d_inode; 2176 struct inode *inode = old_dentry->d_inode;
@@ -2132,7 +2218,8 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
2132 * with linux 2.0, and to avoid hard-linking to directories 2218 * with linux 2.0, and to avoid hard-linking to directories
2133 * and other special files. --ADM 2219 * and other special files. --ADM
2134 */ 2220 */
2135asmlinkage long sys_link(const char __user * oldname, const char __user * newname) 2221asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
2222 int newdfd, const char __user *newname)
2136{ 2223{
2137 struct dentry *new_dentry; 2224 struct dentry *new_dentry;
2138 struct nameidata nd, old_nd; 2225 struct nameidata nd, old_nd;
@@ -2143,10 +2230,10 @@ asmlinkage long sys_link(const char __user * oldname, const char __user * newnam
2143 if (IS_ERR(to)) 2230 if (IS_ERR(to))
2144 return PTR_ERR(to); 2231 return PTR_ERR(to);
2145 2232
2146 error = __user_walk(oldname, 0, &old_nd); 2233 error = __user_walk_fd(olddfd, oldname, 0, &old_nd);
2147 if (error) 2234 if (error)
2148 goto exit; 2235 goto exit;
2149 error = path_lookup(to, LOOKUP_PARENT, &nd); 2236 error = do_path_lookup(newdfd, to, LOOKUP_PARENT, &nd);
2150 if (error) 2237 if (error)
2151 goto out; 2238 goto out;
2152 error = -EXDEV; 2239 error = -EXDEV;
@@ -2169,6 +2256,11 @@ exit:
2169 return error; 2256 return error;
2170} 2257}
2171 2258
2259asmlinkage long sys_link(const char __user *oldname, const char __user *newname)
2260{
2261 return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname);
2262}
2263
2172/* 2264/*
2173 * The worst of all namespace operations - renaming directory. "Perverted" 2265 * The worst of all namespace operations - renaming directory. "Perverted"
2174 * doesn't even start to describe it. Somebody in UCB had a heck of a trip... 2266 * doesn't even start to describe it. Somebody in UCB had a heck of a trip...
@@ -2315,7 +2407,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2315 return error; 2407 return error;
2316} 2408}
2317 2409
2318static int do_rename(const char * oldname, const char * newname) 2410static int do_rename(int olddfd, const char *oldname,
2411 int newdfd, const char *newname)
2319{ 2412{
2320 int error = 0; 2413 int error = 0;
2321 struct dentry * old_dir, * new_dir; 2414 struct dentry * old_dir, * new_dir;
@@ -2323,11 +2416,11 @@ static int do_rename(const char * oldname, const char * newname)
2323 struct dentry * trap; 2416 struct dentry * trap;
2324 struct nameidata oldnd, newnd; 2417 struct nameidata oldnd, newnd;
2325 2418
2326 error = path_lookup(oldname, LOOKUP_PARENT, &oldnd); 2419 error = do_path_lookup(olddfd, oldname, LOOKUP_PARENT, &oldnd);
2327 if (error) 2420 if (error)
2328 goto exit; 2421 goto exit;
2329 2422
2330 error = path_lookup(newname, LOOKUP_PARENT, &newnd); 2423 error = do_path_lookup(newdfd, newname, LOOKUP_PARENT, &newnd);
2331 if (error) 2424 if (error)
2332 goto exit1; 2425 goto exit1;
2333 2426
@@ -2391,7 +2484,8 @@ exit:
2391 return error; 2484 return error;
2392} 2485}
2393 2486
2394asmlinkage long sys_rename(const char __user * oldname, const char __user * newname) 2487asmlinkage long sys_renameat(int olddfd, const char __user *oldname,
2488 int newdfd, const char __user *newname)
2395{ 2489{
2396 int error; 2490 int error;
2397 char * from; 2491 char * from;
@@ -2403,13 +2497,18 @@ asmlinkage long sys_rename(const char __user * oldname, const char __user * newn
2403 to = getname(newname); 2497 to = getname(newname);
2404 error = PTR_ERR(to); 2498 error = PTR_ERR(to);
2405 if (!IS_ERR(to)) { 2499 if (!IS_ERR(to)) {
2406 error = do_rename(from,to); 2500 error = do_rename(olddfd, from, newdfd, to);
2407 putname(to); 2501 putname(to);
2408 } 2502 }
2409 putname(from); 2503 putname(from);
2410 return error; 2504 return error;
2411} 2505}
2412 2506
2507asmlinkage long sys_rename(const char __user *oldname, const char __user *newname)
2508{
2509 return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname);
2510}
2511
2413int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) 2512int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
2414{ 2513{
2415 int len; 2514 int len;
@@ -2553,6 +2652,7 @@ struct inode_operations page_symlink_inode_operations = {
2553}; 2652};
2554 2653
2555EXPORT_SYMBOL(__user_walk); 2654EXPORT_SYMBOL(__user_walk);
2655EXPORT_SYMBOL(__user_walk_fd);
2556EXPORT_SYMBOL(follow_down); 2656EXPORT_SYMBOL(follow_down);
2557EXPORT_SYMBOL(follow_up); 2657EXPORT_SYMBOL(follow_up);
2558EXPORT_SYMBOL(get_write_access); /* binfmt_aout */ 2658EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index 0b14938b5b6..0d4cf948606 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -5,6 +5,7 @@
5 * 5 *
6 */ 6 */
7#include <linux/config.h> 7#include <linux/config.h>
8#include <linux/types.h>
8#include <linux/file.h> 9#include <linux/file.h>
9#include <linux/fs.h> 10#include <linux/fs.h>
10#include <linux/sunrpc/svc.h> 11#include <linux/sunrpc/svc.h>
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 361b4007d4a..a00fe868629 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -192,6 +192,14 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
192 } 192 }
193 if (status) 193 if (status)
194 goto out; 194 goto out;
195
196 /* Openowner is now set, so sequence id will get bumped. Now we need
197 * these checks before we do any creates: */
198 if (nfs4_in_grace() && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
199 return nfserr_grace;
200 if (!nfs4_in_grace() && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
201 return nfserr_no_grace;
202
195 switch (open->op_claim_type) { 203 switch (open->op_claim_type) {
196 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 204 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
197 status = nfserr_inval; 205 status = nfserr_inval;
@@ -210,6 +218,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
210 goto out; 218 goto out;
211 break; 219 break;
212 case NFS4_OPEN_CLAIM_PREVIOUS: 220 case NFS4_OPEN_CLAIM_PREVIOUS:
221 open->op_stateowner->so_confirmed = 1;
213 /* 222 /*
214 * The CURRENT_FH is already set to the file being 223 * The CURRENT_FH is already set to the file being
215 * opened. (1) set open->op_cinfo, (2) set 224 * opened. (1) set open->op_cinfo, (2) set
@@ -221,6 +230,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
221 goto out; 230 goto out;
222 break; 231 break;
223 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 232 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
233 open->op_stateowner->so_confirmed = 1;
224 printk("NFSD: unsupported OPEN claim type %d\n", 234 printk("NFSD: unsupported OPEN claim type %d\n",
225 open->op_claim_type); 235 open->op_claim_type);
226 status = nfserr_notsupp; 236 status = nfserr_notsupp;
@@ -584,31 +594,23 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_se
584{ 594{
585 int status = nfs_ok; 595 int status = nfs_ok;
586 596
587 if (!current_fh->fh_dentry)
588 return nfserr_nofilehandle;
589
590 status = nfs_ok;
591 if (setattr->sa_iattr.ia_valid & ATTR_SIZE) { 597 if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
592 nfs4_lock_state(); 598 nfs4_lock_state();
593 if ((status = nfs4_preprocess_stateid_op(current_fh, 599 status = nfs4_preprocess_stateid_op(current_fh,
594 &setattr->sa_stateid, 600 &setattr->sa_stateid, CHECK_FH | WR_STATE, NULL);
595 CHECK_FH | WR_STATE, NULL))) {
596 dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
597 goto out_unlock;
598 }
599 nfs4_unlock_state(); 601 nfs4_unlock_state();
602 if (status) {
603 dprintk("NFSD: nfsd4_setattr: couldn't process stateid!");
604 return status;
605 }
600 } 606 }
601 status = nfs_ok; 607 status = nfs_ok;
602 if (setattr->sa_acl != NULL) 608 if (setattr->sa_acl != NULL)
603 status = nfsd4_set_nfs4_acl(rqstp, current_fh, setattr->sa_acl); 609 status = nfsd4_set_nfs4_acl(rqstp, current_fh, setattr->sa_acl);
604 if (status) 610 if (status)
605 goto out; 611 return status;
606 status = nfsd_setattr(rqstp, current_fh, &setattr->sa_iattr, 612 status = nfsd_setattr(rqstp, current_fh, &setattr->sa_iattr,
607 0, (time_t)0); 613 0, (time_t)0);
608out:
609 return status;
610out_unlock:
611 nfs4_unlock_state();
612 return status; 614 return status;
613} 615}
614 616
@@ -626,15 +628,17 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
626 return nfserr_inval; 628 return nfserr_inval;
627 629
628 nfs4_lock_state(); 630 nfs4_lock_state();
629 if ((status = nfs4_preprocess_stateid_op(current_fh, stateid, 631 status = nfs4_preprocess_stateid_op(current_fh, stateid,
630 CHECK_FH | WR_STATE, &filp))) { 632 CHECK_FH | WR_STATE, &filp);
631 dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
632 goto out;
633 }
634 if (filp) 633 if (filp)
635 get_file(filp); 634 get_file(filp);
636 nfs4_unlock_state(); 635 nfs4_unlock_state();
637 636
637 if (status) {
638 dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
639 return status;
640 }
641
638 write->wr_bytes_written = write->wr_buflen; 642 write->wr_bytes_written = write->wr_buflen;
639 write->wr_how_written = write->wr_stable_how; 643 write->wr_how_written = write->wr_stable_how;
640 p = (u32 *)write->wr_verifier.data; 644 p = (u32 *)write->wr_verifier.data;
@@ -650,9 +654,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
650 if (status == nfserr_symlink) 654 if (status == nfserr_symlink)
651 status = nfserr_inval; 655 status = nfserr_inval;
652 return status; 656 return status;
653out:
654 nfs4_unlock_state();
655 return status;
656} 657}
657 658
658/* This routine never returns NFS_OK! If there are no other errors, it 659/* This routine never returns NFS_OK! If there are no other errors, it
@@ -768,6 +769,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
768 while (!status && resp->opcnt < args->opcnt) { 769 while (!status && resp->opcnt < args->opcnt) {
769 op = &args->ops[resp->opcnt++]; 770 op = &args->ops[resp->opcnt++];
770 771
772 dprintk("nfsv4 compound op #%d: %d\n", resp->opcnt, op->opnum);
773
771 /* 774 /*
772 * The XDR decode routines may have pre-set op->status; 775 * The XDR decode routines may have pre-set op->status;
773 * for example, if there is a miscellaneous XDR error 776 * for example, if there is a miscellaneous XDR error
@@ -792,17 +795,13 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
792 /* All operations except RENEW, SETCLIENTID, RESTOREFH 795 /* All operations except RENEW, SETCLIENTID, RESTOREFH
793 * SETCLIENTID_CONFIRM, PUTFH and PUTROOTFH 796 * SETCLIENTID_CONFIRM, PUTFH and PUTROOTFH
794 * require a valid current filehandle 797 * require a valid current filehandle
795 *
796 * SETATTR NOFILEHANDLE error handled in nfsd4_setattr
797 * due to required returned bitmap argument
798 */ 798 */
799 if ((!current_fh->fh_dentry) && 799 if ((!current_fh->fh_dentry) &&
800 !((op->opnum == OP_PUTFH) || (op->opnum == OP_PUTROOTFH) || 800 !((op->opnum == OP_PUTFH) || (op->opnum == OP_PUTROOTFH) ||
801 (op->opnum == OP_SETCLIENTID) || 801 (op->opnum == OP_SETCLIENTID) ||
802 (op->opnum == OP_SETCLIENTID_CONFIRM) || 802 (op->opnum == OP_SETCLIENTID_CONFIRM) ||
803 (op->opnum == OP_RENEW) || (op->opnum == OP_RESTOREFH) || 803 (op->opnum == OP_RENEW) || (op->opnum == OP_RESTOREFH) ||
804 (op->opnum == OP_RELEASE_LOCKOWNER) || 804 (op->opnum == OP_RELEASE_LOCKOWNER))) {
805 (op->opnum == OP_SETATTR))) {
806 op->status = nfserr_nofilehandle; 805 op->status = nfserr_nofilehandle;
807 goto encode_op; 806 goto encode_op;
808 } 807 }
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index be963a133aa..06da7506363 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -222,8 +222,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
222 222
223 nfs4_save_user(&uid, &gid); 223 nfs4_save_user(&uid, &gid);
224 224
225 filp = dentry_open(dget(dir), mntget(rec_dir.mnt), 225 filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY);
226 O_RDWR);
227 status = PTR_ERR(filp); 226 status = PTR_ERR(filp);
228 if (IS_ERR(filp)) 227 if (IS_ERR(filp))
229 goto out; 228 goto out;
@@ -400,9 +399,10 @@ nfsd4_init_recdir(char *rec_dirname)
400 399
401 nfs4_save_user(&uid, &gid); 400 nfs4_save_user(&uid, &gid);
402 401
403 status = path_lookup(rec_dirname, LOOKUP_FOLLOW, &rec_dir); 402 status = path_lookup(rec_dirname, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
404 if (status == -ENOENT) 403 &rec_dir);
405 printk("NFSD: recovery directory %s doesn't exist\n", 404 if (status)
405 printk("NFSD: unable to find recovery directory %s\n",
406 rec_dirname); 406 rec_dirname);
407 407
408 if (!status) 408 if (!status)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 6bbefd06f10..1143cfb6454 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1088,7 +1088,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, str
1088 sop->so_seqid = open->op_seqid; 1088 sop->so_seqid = open->op_seqid;
1089 sop->so_confirmed = 0; 1089 sop->so_confirmed = 0;
1090 rp = &sop->so_replay; 1090 rp = &sop->so_replay;
1091 rp->rp_status = NFSERR_SERVERFAULT; 1091 rp->rp_status = nfserr_serverfault;
1092 rp->rp_buflen = 0; 1092 rp->rp_buflen = 0;
1093 rp->rp_buf = rp->rp_ibuf; 1093 rp->rp_buf = rp->rp_ibuf;
1094 return sop; 1094 return sop;
@@ -1178,7 +1178,6 @@ release_stateid(struct nfs4_stateid *stp, int flags)
1178 locks_remove_posix(filp, (fl_owner_t) stp->st_stateowner); 1178 locks_remove_posix(filp, (fl_owner_t) stp->st_stateowner);
1179 put_nfs4_file(stp->st_file); 1179 put_nfs4_file(stp->st_file);
1180 kmem_cache_free(stateid_slab, stp); 1180 kmem_cache_free(stateid_slab, stp);
1181 stp = NULL;
1182} 1181}
1183 1182
1184static void 1183static void
@@ -1191,22 +1190,6 @@ move_to_close_lru(struct nfs4_stateowner *sop)
1191 sop->so_time = get_seconds(); 1190 sop->so_time = get_seconds();
1192} 1191}
1193 1192
1194static void
1195release_state_owner(struct nfs4_stateid *stp, int flag)
1196{
1197 struct nfs4_stateowner *sop = stp->st_stateowner;
1198
1199 dprintk("NFSD: release_state_owner\n");
1200 release_stateid(stp, flag);
1201
1202 /* place unused nfs4_stateowners on so_close_lru list to be
1203 * released by the laundromat service after the lease period
1204 * to enable us to handle CLOSE replay
1205 */
1206 if (sop->so_confirmed && list_empty(&sop->so_stateids))
1207 move_to_close_lru(sop);
1208}
1209
1210static int 1193static int
1211cmp_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, clientid_t *clid) { 1194cmp_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, clientid_t *clid) {
1212 return ((sop->so_owner.len == owner->len) && 1195 return ((sop->so_owner.len == owner->len) &&
@@ -1446,92 +1429,61 @@ static struct lock_manager_operations nfsd_lease_mng_ops = {
1446}; 1429};
1447 1430
1448 1431
1449/*
1450 * nfsd4_process_open1()
1451 * lookup stateowner.
1452 * found:
1453 * check confirmed
1454 * confirmed:
1455 * check seqid
1456 * not confirmed:
1457 * delete owner
1458 * create new owner
1459 * notfound:
1460 * verify clientid
1461 * create new owner
1462 *
1463 * called with nfs4_lock_state() held.
1464 */
1465int 1432int
1466nfsd4_process_open1(struct nfsd4_open *open) 1433nfsd4_process_open1(struct nfsd4_open *open)
1467{ 1434{
1468 int status;
1469 clientid_t *clientid = &open->op_clientid; 1435 clientid_t *clientid = &open->op_clientid;
1470 struct nfs4_client *clp = NULL; 1436 struct nfs4_client *clp = NULL;
1471 unsigned int strhashval; 1437 unsigned int strhashval;
1472 struct nfs4_stateowner *sop = NULL; 1438 struct nfs4_stateowner *sop = NULL;
1473 1439
1474 status = nfserr_inval;
1475 if (!check_name(open->op_owner)) 1440 if (!check_name(open->op_owner))
1476 goto out; 1441 return nfserr_inval;
1477 1442
1478 if (STALE_CLIENTID(&open->op_clientid)) 1443 if (STALE_CLIENTID(&open->op_clientid))
1479 return nfserr_stale_clientid; 1444 return nfserr_stale_clientid;
1480 1445
1481 strhashval = ownerstr_hashval(clientid->cl_id, open->op_owner); 1446 strhashval = ownerstr_hashval(clientid->cl_id, open->op_owner);
1482 sop = find_openstateowner_str(strhashval, open); 1447 sop = find_openstateowner_str(strhashval, open);
1483 if (sop) { 1448 open->op_stateowner = sop;
1484 open->op_stateowner = sop; 1449 if (!sop) {
1485 /* check for replay */ 1450 /* Make sure the client's lease hasn't expired. */
1486 if (open->op_seqid == sop->so_seqid - 1){
1487 if (sop->so_replay.rp_buflen)
1488 return NFSERR_REPLAY_ME;
1489 else {
1490 /* The original OPEN failed so spectacularly
1491 * that we don't even have replay data saved!
1492 * Therefore, we have no choice but to continue
1493 * processing this OPEN; presumably, we'll
1494 * fail again for the same reason.
1495 */
1496 dprintk("nfsd4_process_open1:"
1497 " replay with no replay cache\n");
1498 goto renew;
1499 }
1500 } else if (sop->so_confirmed) {
1501 if (open->op_seqid == sop->so_seqid)
1502 goto renew;
1503 status = nfserr_bad_seqid;
1504 goto out;
1505 } else {
1506 /* If we get here, we received an OPEN for an
1507 * unconfirmed nfs4_stateowner. Since the seqid's are
1508 * different, purge the existing nfs4_stateowner, and
1509 * instantiate a new one.
1510 */
1511 clp = sop->so_client;
1512 release_stateowner(sop);
1513 }
1514 } else {
1515 /* nfs4_stateowner not found.
1516 * Verify clientid and instantiate new nfs4_stateowner.
1517 * If verify fails this is presumably the result of the
1518 * client's lease expiring.
1519 */
1520 status = nfserr_expired;
1521 clp = find_confirmed_client(clientid); 1451 clp = find_confirmed_client(clientid);
1522 if (clp == NULL) 1452 if (clp == NULL)
1523 goto out; 1453 return nfserr_expired;
1454 goto renew;
1524 } 1455 }
1525 status = nfserr_resource; 1456 if (!sop->so_confirmed) {
1526 sop = alloc_init_open_stateowner(strhashval, clp, open); 1457 /* Replace unconfirmed owners without checking for replay. */
1527 if (sop == NULL) 1458 clp = sop->so_client;
1528 goto out; 1459 release_stateowner(sop);
1529 open->op_stateowner = sop; 1460 open->op_stateowner = NULL;
1461 goto renew;
1462 }
1463 if (open->op_seqid == sop->so_seqid - 1) {
1464 if (sop->so_replay.rp_buflen)
1465 return NFSERR_REPLAY_ME;
1466 /* The original OPEN failed so spectacularly
1467 * that we don't even have replay data saved!
1468 * Therefore, we have no choice but to continue
1469 * processing this OPEN; presumably, we'll
1470 * fail again for the same reason.
1471 */
1472 dprintk("nfsd4_process_open1: replay with no replay cache\n");
1473 goto renew;
1474 }
1475 if (open->op_seqid != sop->so_seqid)
1476 return nfserr_bad_seqid;
1530renew: 1477renew:
1531 status = nfs_ok; 1478 if (open->op_stateowner == NULL) {
1479 sop = alloc_init_open_stateowner(strhashval, clp, open);
1480 if (sop == NULL)
1481 return nfserr_resource;
1482 open->op_stateowner = sop;
1483 }
1484 list_del_init(&sop->so_close_lru);
1532 renew_client(sop->so_client); 1485 renew_client(sop->so_client);
1533out: 1486 return nfs_ok;
1534 return status;
1535} 1487}
1536 1488
1537static inline int 1489static inline int
@@ -1648,7 +1600,7 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
1648 if (!open->op_truncate) 1600 if (!open->op_truncate)
1649 return 0; 1601 return 0;
1650 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 1602 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
1651 return -EINVAL; 1603 return nfserr_inval;
1652 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); 1604 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
1653} 1605}
1654 1606
@@ -1657,26 +1609,26 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_sta
1657{ 1609{
1658 struct file *filp = stp->st_vfs_file; 1610 struct file *filp = stp->st_vfs_file;
1659 struct inode *inode = filp->f_dentry->d_inode; 1611 struct inode *inode = filp->f_dentry->d_inode;
1660 unsigned int share_access; 1612 unsigned int share_access, new_writer;
1661 int status; 1613 int status;
1662 1614
1663 set_access(&share_access, stp->st_access_bmap); 1615 set_access(&share_access, stp->st_access_bmap);
1664 share_access = ~share_access; 1616 new_writer = (~share_access) & open->op_share_access
1665 share_access &= open->op_share_access; 1617 & NFS4_SHARE_ACCESS_WRITE;
1666
1667 if (!(share_access & NFS4_SHARE_ACCESS_WRITE))
1668 return nfsd4_truncate(rqstp, cur_fh, open);
1669 1618
1670 status = get_write_access(inode); 1619 if (new_writer) {
1671 if (status) 1620 status = get_write_access(inode);
1672 return nfserrno(status); 1621 if (status)
1622 return nfserrno(status);
1623 }
1673 status = nfsd4_truncate(rqstp, cur_fh, open); 1624 status = nfsd4_truncate(rqstp, cur_fh, open);
1674 if (status) { 1625 if (status) {
1675 put_write_access(inode); 1626 if (new_writer)
1627 put_write_access(inode);
1676 return status; 1628 return status;
1677 } 1629 }
1678 /* remember the open */ 1630 /* remember the open */
1679 filp->f_mode = (filp->f_mode | FMODE_WRITE) & ~FMODE_READ; 1631 filp->f_mode |= open->op_share_access;
1680 set_bit(open->op_share_access, &stp->st_access_bmap); 1632 set_bit(open->op_share_access, &stp->st_access_bmap);
1681 set_bit(open->op_share_deny, &stp->st_deny_bmap); 1633 set_bit(open->op_share_deny, &stp->st_deny_bmap);
1682 1634
@@ -1780,12 +1732,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
1780 struct nfs4_delegation *dp = NULL; 1732 struct nfs4_delegation *dp = NULL;
1781 int status; 1733 int status;
1782 1734
1783 if (nfs4_in_grace() && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
1784 return nfserr_grace;
1785
1786 if (!nfs4_in_grace() && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
1787 return nfserr_no_grace;
1788
1789 status = nfserr_inval; 1735 status = nfserr_inval;
1790 if (!TEST_ACCESS(open->op_share_access) || !TEST_DENY(open->op_share_deny)) 1736 if (!TEST_ACCESS(open->op_share_access) || !TEST_DENY(open->op_share_deny))
1791 goto out; 1737 goto out;
@@ -2423,15 +2369,19 @@ nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_clos
2423 CHECK_FH | OPEN_STATE | CLOSE_STATE, 2369 CHECK_FH | OPEN_STATE | CLOSE_STATE,
2424 &close->cl_stateowner, &stp, NULL))) 2370 &close->cl_stateowner, &stp, NULL)))
2425 goto out; 2371 goto out;
2426 /*
2427 * Return success, but first update the stateid.
2428 */
2429 status = nfs_ok; 2372 status = nfs_ok;
2430 update_stateid(&stp->st_stateid); 2373 update_stateid(&stp->st_stateid);
2431 memcpy(&close->cl_stateid, &stp->st_stateid, sizeof(stateid_t)); 2374 memcpy(&close->cl_stateid, &stp->st_stateid, sizeof(stateid_t));
2432 2375
2433 /* release_state_owner() calls nfsd_close() if needed */ 2376 /* release_stateid() calls nfsd_close() if needed */
2434 release_state_owner(stp, OPEN_STATE); 2377 release_stateid(stp, OPEN_STATE);
2378
2379 /* place unused nfs4_stateowners on so_close_lru list to be
2380 * released by the laundromat service after the lease period
2381 * to enable us to handle CLOSE replay
2382 */
2383 if (list_empty(&close->cl_stateowner->so_stateids))
2384 move_to_close_lru(close->cl_stateowner);
2435out: 2385out:
2436 if (close->cl_stateowner) { 2386 if (close->cl_stateowner) {
2437 nfs4_get_stateowner(close->cl_stateowner); 2387 nfs4_get_stateowner(close->cl_stateowner);
@@ -2633,7 +2583,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, str
2633 sop->so_seqid = lock->lk_new_lock_seqid + 1; 2583 sop->so_seqid = lock->lk_new_lock_seqid + 1;
2634 sop->so_confirmed = 1; 2584 sop->so_confirmed = 1;
2635 rp = &sop->so_replay; 2585 rp = &sop->so_replay;
2636 rp->rp_status = NFSERR_SERVERFAULT; 2586 rp->rp_status = nfserr_serverfault;
2637 rp->rp_buflen = 0; 2587 rp->rp_buflen = 0;
2638 rp->rp_buf = rp->rp_ibuf; 2588 rp->rp_buf = rp->rp_ibuf;
2639 return sop; 2589 return sop;
@@ -2700,6 +2650,11 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2700 if (check_lock_length(lock->lk_offset, lock->lk_length)) 2650 if (check_lock_length(lock->lk_offset, lock->lk_length))
2701 return nfserr_inval; 2651 return nfserr_inval;
2702 2652
2653 if ((status = fh_verify(rqstp, current_fh, S_IFREG, MAY_LOCK))) {
2654 dprintk("NFSD: nfsd4_lock: permission denied!\n");
2655 return status;
2656 }
2657
2703 nfs4_lock_state(); 2658 nfs4_lock_state();
2704 2659
2705 if (lock->lk_is_new) { 2660 if (lock->lk_is_new) {
@@ -2720,11 +2675,11 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2720 lock->lk_new_open_seqid, 2675 lock->lk_new_open_seqid,
2721 &lock->lk_new_open_stateid, 2676 &lock->lk_new_open_stateid,
2722 CHECK_FH | OPEN_STATE, 2677 CHECK_FH | OPEN_STATE,
2723 &lock->lk_stateowner, &open_stp, 2678 &lock->lk_replay_owner, &open_stp,
2724 lock); 2679 lock);
2725 if (status) 2680 if (status)
2726 goto out; 2681 goto out;
2727 open_sop = lock->lk_stateowner; 2682 open_sop = lock->lk_replay_owner;
2728 /* create lockowner and lock stateid */ 2683 /* create lockowner and lock stateid */
2729 fp = open_stp->st_file; 2684 fp = open_stp->st_file;
2730 strhashval = lock_ownerstr_hashval(fp->fi_inode, 2685 strhashval = lock_ownerstr_hashval(fp->fi_inode,
@@ -2739,29 +2694,22 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2739 if (lock_sop == NULL) 2694 if (lock_sop == NULL)
2740 goto out; 2695 goto out;
2741 lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp); 2696 lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp);
2742 if (lock_stp == NULL) { 2697 if (lock_stp == NULL)
2743 release_stateowner(lock_sop);
2744 goto out; 2698 goto out;
2745 }
2746 } else { 2699 } else {
2747 /* lock (lock owner + lock stateid) already exists */ 2700 /* lock (lock owner + lock stateid) already exists */
2748 status = nfs4_preprocess_seqid_op(current_fh, 2701 status = nfs4_preprocess_seqid_op(current_fh,
2749 lock->lk_old_lock_seqid, 2702 lock->lk_old_lock_seqid,
2750 &lock->lk_old_lock_stateid, 2703 &lock->lk_old_lock_stateid,
2751 CHECK_FH | LOCK_STATE, 2704 CHECK_FH | LOCK_STATE,
2752 &lock->lk_stateowner, &lock_stp, lock); 2705 &lock->lk_replay_owner, &lock_stp, lock);
2753 if (status) 2706 if (status)
2754 goto out; 2707 goto out;
2755 lock_sop = lock->lk_stateowner; 2708 lock_sop = lock->lk_replay_owner;
2756 } 2709 }
2757 /* lock->lk_stateowner and lock_stp have been created or found */ 2710 /* lock->lk_replay_owner and lock_stp have been created or found */
2758 filp = lock_stp->st_vfs_file; 2711 filp = lock_stp->st_vfs_file;
2759 2712
2760 if ((status = fh_verify(rqstp, current_fh, S_IFREG, MAY_LOCK))) {
2761 dprintk("NFSD: nfsd4_lock: permission denied!\n");
2762 goto out;
2763 }
2764
2765 status = nfserr_grace; 2713 status = nfserr_grace;
2766 if (nfs4_in_grace() && !lock->lk_reclaim) 2714 if (nfs4_in_grace() && !lock->lk_reclaim)
2767 goto out; 2715 goto out;
@@ -2802,8 +2750,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2802 */ 2750 */
2803 2751
2804 status = posix_lock_file(filp, &file_lock); 2752 status = posix_lock_file(filp, &file_lock);
2805 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private)
2806 file_lock.fl_ops->fl_release_private(&file_lock);
2807 dprintk("NFSD: nfsd4_lock: posix_lock_file status %d\n",status); 2753 dprintk("NFSD: nfsd4_lock: posix_lock_file status %d\n",status);
2808 switch (-status) { 2754 switch (-status) {
2809 case 0: /* success! */ 2755 case 0: /* success! */
@@ -2815,9 +2761,12 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2815 goto conflicting_lock; 2761 goto conflicting_lock;
2816 case (EDEADLK): 2762 case (EDEADLK):
2817 status = nfserr_deadlock; 2763 status = nfserr_deadlock;
2764 dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status);
2765 goto out;
2818 default: 2766 default:
2767 status = nfserrno(status);
2819 dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status); 2768 dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status);
2820 goto out_destroy_new_stateid; 2769 goto out;
2821 } 2770 }
2822 2771
2823conflicting_lock: 2772conflicting_lock:
@@ -2831,20 +2780,12 @@ conflicting_lock:
2831 goto out; 2780 goto out;
2832 } 2781 }
2833 nfs4_set_lock_denied(conflock, &lock->lk_denied); 2782 nfs4_set_lock_denied(conflock, &lock->lk_denied);
2834
2835out_destroy_new_stateid:
2836 if (lock->lk_is_new) {
2837 dprintk("NFSD: nfsd4_lock: destroy new stateid!\n");
2838 /*
2839 * An error encountered after instantiation of the new
2840 * stateid has forced us to destroy it.
2841 */
2842 release_state_owner(lock_stp, LOCK_STATE);
2843 }
2844out: 2783out:
2845 if (lock->lk_stateowner) { 2784 if (status && lock->lk_is_new && lock_sop)
2846 nfs4_get_stateowner(lock->lk_stateowner); 2785 release_stateowner(lock_sop);
2847 *replay_owner = lock->lk_stateowner; 2786 if (lock->lk_replay_owner) {
2787 nfs4_get_stateowner(lock->lk_replay_owner);
2788 *replay_owner = lock->lk_replay_owner;
2848 } 2789 }
2849 nfs4_unlock_state(); 2790 nfs4_unlock_state();
2850 return status; 2791 return status;
@@ -2977,8 +2918,6 @@ nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2977 * Try to unlock the file in the VFS. 2918 * Try to unlock the file in the VFS.
2978 */ 2919 */
2979 status = posix_lock_file(filp, &file_lock); 2920 status = posix_lock_file(filp, &file_lock);
2980 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private)
2981 file_lock.fl_ops->fl_release_private(&file_lock);
2982 if (status) { 2921 if (status) {
2983 dprintk("NFSD: nfs4_locku: posix_lock_file failed!\n"); 2922 dprintk("NFSD: nfs4_locku: posix_lock_file failed!\n");
2984 goto out_nfserr; 2923 goto out_nfserr;
@@ -3016,9 +2955,10 @@ check_for_locks(struct file *filp, struct nfs4_stateowner *lowner)
3016 2955
3017 lock_kernel(); 2956 lock_kernel();
3018 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { 2957 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
3019 if ((*flpp)->fl_owner == (fl_owner_t)lowner) 2958 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
3020 status = 1; 2959 status = 1;
3021 goto out; 2960 goto out;
2961 }
3022 } 2962 }
3023out: 2963out:
3024 unlock_kernel(); 2964 unlock_kernel();
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index dcd67318694..69d3501173a 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -528,7 +528,7 @@ nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
528{ 528{
529 DECODE_HEAD; 529 DECODE_HEAD;
530 530
531 lock->lk_stateowner = NULL; 531 lock->lk_replay_owner = NULL;
532 /* 532 /*
533 * type, reclaim(boolean), offset, length, new_lock_owner(boolean) 533 * type, reclaim(boolean), offset, length, new_lock_owner(boolean)
534 */ 534 */
@@ -1764,10 +1764,11 @@ nfsd4_encode_dirent(struct readdir_cd *ccd, const char *name, int namlen,
1764 */ 1764 */
1765 if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR)) 1765 if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR))
1766 goto fail; 1766 goto fail;
1767 nfserr = nfserr_toosmall;
1768 p = nfsd4_encode_rdattr_error(p, buflen, nfserr); 1767 p = nfsd4_encode_rdattr_error(p, buflen, nfserr);
1769 if (p == NULL) 1768 if (p == NULL) {
1769 nfserr = nfserr_toosmall;
1770 goto fail; 1770 goto fail;
1771 }
1771 } 1772 }
1772 cd->buflen -= (p - cd->buffer); 1773 cd->buflen -= (p - cd->buffer);
1773 cd->buffer = p; 1774 cd->buffer = p;
@@ -1895,7 +1896,6 @@ nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denie
1895static void 1896static void
1896nfsd4_encode_lock(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_lock *lock) 1897nfsd4_encode_lock(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_lock *lock)
1897{ 1898{
1898
1899 ENCODE_SEQID_OP_HEAD; 1899 ENCODE_SEQID_OP_HEAD;
1900 1900
1901 if (!nfserr) { 1901 if (!nfserr) {
@@ -1906,7 +1906,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_lock
1906 } else if (nfserr == nfserr_denied) 1906 } else if (nfserr == nfserr_denied)
1907 nfsd4_encode_lock_denied(resp, &lock->lk_denied); 1907 nfsd4_encode_lock_denied(resp, &lock->lk_denied);
1908 1908
1909 ENCODE_SEQID_OP_TAIL(lock->lk_stateowner); 1909 ENCODE_SEQID_OP_TAIL(lock->lk_replay_owner);
1910} 1910}
1911 1911
1912static void 1912static void
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 0aa1b9603d7..3e6b75cd90f 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -36,6 +36,22 @@ nfsd_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
36 return nfs_ok; 36 return nfs_ok;
37} 37}
38 38
39static int
40nfsd_return_attrs(int err, struct nfsd_attrstat *resp)
41{
42 if (err) return err;
43 return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt,
44 resp->fh.fh_dentry,
45 &resp->stat));
46}
47static int
48nfsd_return_dirop(int err, struct nfsd_diropres *resp)
49{
50 if (err) return err;
51 return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt,
52 resp->fh.fh_dentry,
53 &resp->stat));
54}
39/* 55/*
40 * Get a file's attributes 56 * Get a file's attributes
41 * N.B. After this call resp->fh needs an fh_put 57 * N.B. After this call resp->fh needs an fh_put
@@ -44,10 +60,12 @@ static int
44nfsd_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp, 60nfsd_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
45 struct nfsd_attrstat *resp) 61 struct nfsd_attrstat *resp)
46{ 62{
63 int nfserr;
47 dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh)); 64 dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh));
48 65
49 fh_copy(&resp->fh, &argp->fh); 66 fh_copy(&resp->fh, &argp->fh);
50 return fh_verify(rqstp, &resp->fh, 0, MAY_NOP); 67 nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP);
68 return nfsd_return_attrs(nfserr, resp);
51} 69}
52 70
53/* 71/*
@@ -58,12 +76,14 @@ static int
58nfsd_proc_setattr(struct svc_rqst *rqstp, struct nfsd_sattrargs *argp, 76nfsd_proc_setattr(struct svc_rqst *rqstp, struct nfsd_sattrargs *argp,
59 struct nfsd_attrstat *resp) 77 struct nfsd_attrstat *resp)
60{ 78{
79 int nfserr;
61 dprintk("nfsd: SETATTR %s, valid=%x, size=%ld\n", 80 dprintk("nfsd: SETATTR %s, valid=%x, size=%ld\n",
62 SVCFH_fmt(&argp->fh), 81 SVCFH_fmt(&argp->fh),
63 argp->attrs.ia_valid, (long) argp->attrs.ia_size); 82 argp->attrs.ia_valid, (long) argp->attrs.ia_size);
64 83
65 fh_copy(&resp->fh, &argp->fh); 84 fh_copy(&resp->fh, &argp->fh);
66 return nfsd_setattr(rqstp, &resp->fh, &argp->attrs,0, (time_t)0); 85 nfserr = nfsd_setattr(rqstp, &resp->fh, &argp->attrs,0, (time_t)0);
86 return nfsd_return_attrs(nfserr, resp);
67} 87}
68 88
69/* 89/*
@@ -86,7 +106,7 @@ nfsd_proc_lookup(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
86 &resp->fh); 106 &resp->fh);
87 107
88 fh_put(&argp->fh); 108 fh_put(&argp->fh);
89 return nfserr; 109 return nfsd_return_dirop(nfserr, resp);
90} 110}
91 111
92/* 112/*
@@ -142,7 +162,10 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
142 argp->vec, argp->vlen, 162 argp->vec, argp->vlen,
143 &resp->count); 163 &resp->count);
144 164
145 return nfserr; 165 if (nfserr) return nfserr;
166 return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt,
167 resp->fh.fh_dentry,
168 &resp->stat));
146} 169}
147 170
148/* 171/*
@@ -165,7 +188,7 @@ nfsd_proc_write(struct svc_rqst *rqstp, struct nfsd_writeargs *argp,
165 argp->vec, argp->vlen, 188 argp->vec, argp->vlen,
166 argp->len, 189 argp->len,
167 &stable); 190 &stable);
168 return nfserr; 191 return nfsd_return_attrs(nfserr, resp);
169} 192}
170 193
171/* 194/*
@@ -322,7 +345,7 @@ out_unlock:
322 345
323done: 346done:
324 fh_put(dirfhp); 347 fh_put(dirfhp);
325 return nfserr; 348 return nfsd_return_dirop(nfserr, resp);
326} 349}
327 350
328static int 351static int
@@ -425,7 +448,7 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
425 nfserr = nfsd_create(rqstp, &argp->fh, argp->name, argp->len, 448 nfserr = nfsd_create(rqstp, &argp->fh, argp->name, argp->len,
426 &argp->attrs, S_IFDIR, 0, &resp->fh); 449 &argp->attrs, S_IFDIR, 0, &resp->fh);
427 fh_put(&argp->fh); 450 fh_put(&argp->fh);
428 return nfserr; 451 return nfsd_return_dirop(nfserr, resp);
429} 452}
430 453
431/* 454/*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index eef0576a778..5320e5afadd 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -710,14 +710,15 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
710{ 710{
711 struct inode *inode = dp->d_inode; 711 struct inode *inode = dp->d_inode;
712 int (*fsync) (struct file *, struct dentry *, int); 712 int (*fsync) (struct file *, struct dentry *, int);
713 int err = nfs_ok; 713 int err;
714 714
715 filemap_fdatawrite(inode->i_mapping); 715 err = filemap_fdatawrite(inode->i_mapping);
716 if (fop && (fsync = fop->fsync)) 716 if (err == 0 && fop && (fsync = fop->fsync))
717 err=fsync(filp, dp, 0); 717 err = fsync(filp, dp, 0);
718 filemap_fdatawait(inode->i_mapping); 718 if (err == 0)
719 err = filemap_fdatawait(inode->i_mapping);
719 720
720 return nfserrno(err); 721 return err;
721} 722}
722 723
723 724
@@ -734,10 +735,10 @@ nfsd_sync(struct file *filp)
734 return err; 735 return err;
735} 736}
736 737
737void 738int
738nfsd_sync_dir(struct dentry *dp) 739nfsd_sync_dir(struct dentry *dp)
739{ 740{
740 nfsd_dosync(NULL, dp, dp->d_inode->i_fop); 741 return nfsd_dosync(NULL, dp, dp->d_inode->i_fop);
741} 742}
742 743
743/* 744/*
@@ -814,7 +815,7 @@ nfsd_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset
814 return size; 815 return size;
815} 816}
816 817
817static inline int 818static int
818nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, 819nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
819 loff_t offset, struct kvec *vec, int vlen, unsigned long *count) 820 loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
820{ 821{
@@ -878,7 +879,7 @@ static void kill_suid(struct dentry *dentry)
878 mutex_unlock(&dentry->d_inode->i_mutex); 879 mutex_unlock(&dentry->d_inode->i_mutex);
879} 880}
880 881
881static inline int 882static int
882nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, 883nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
883 loff_t offset, struct kvec *vec, int vlen, 884 loff_t offset, struct kvec *vec, int vlen,
884 unsigned long cnt, int *stablep) 885 unsigned long cnt, int *stablep)
@@ -890,9 +891,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
890 int err = 0; 891 int err = 0;
891 int stable = *stablep; 892 int stable = *stablep;
892 893
894#ifdef MSNFS
893 err = nfserr_perm; 895 err = nfserr_perm;
894 896
895#ifdef MSNFS
896 if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) && 897 if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
897 (!lock_may_write(file->f_dentry->d_inode, offset, cnt))) 898 (!lock_may_write(file->f_dentry->d_inode, offset, cnt)))
898 goto out; 899 goto out;
@@ -1064,7 +1065,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
1064 return err; 1065 return err;
1065 if (EX_ISSYNC(fhp->fh_export)) { 1066 if (EX_ISSYNC(fhp->fh_export)) {
1066 if (file->f_op && file->f_op->fsync) { 1067 if (file->f_op && file->f_op->fsync) {
1067 err = nfsd_sync(file); 1068 err = nfserrno(nfsd_sync(file));
1068 } else { 1069 } else {
1069 err = nfserr_notsupp; 1070 err = nfserr_notsupp;
1070 } 1071 }
@@ -1132,7 +1133,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
1132 "nfsd_create: parent %s/%s not locked!\n", 1133 "nfsd_create: parent %s/%s not locked!\n",
1133 dentry->d_parent->d_name.name, 1134 dentry->d_parent->d_name.name,
1134 dentry->d_name.name); 1135 dentry->d_name.name);
1135 err = -EIO; 1136 err = nfserr_io;
1136 goto out; 1137 goto out;
1137 } 1138 }
1138 } 1139 }
@@ -1175,7 +1176,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
1175 goto out_nfserr; 1176 goto out_nfserr;
1176 1177
1177 if (EX_ISSYNC(fhp->fh_export)) { 1178 if (EX_ISSYNC(fhp->fh_export)) {
1178 nfsd_sync_dir(dentry); 1179 err = nfserrno(nfsd_sync_dir(dentry));
1179 write_inode_now(dchild->d_inode, 1); 1180 write_inode_now(dchild->d_inode, 1);
1180 } 1181 }
1181 1182
@@ -1185,9 +1186,11 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
1185 * send along the gid when it tries to implement setgid 1186 * send along the gid when it tries to implement setgid
1186 * directories via NFS. 1187 * directories via NFS.
1187 */ 1188 */
1188 err = 0; 1189 if ((iap->ia_valid &= ~(ATTR_UID|ATTR_GID|ATTR_MODE)) != 0) {
1189 if ((iap->ia_valid &= ~(ATTR_UID|ATTR_GID|ATTR_MODE)) != 0) 1190 int err2 = nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
1190 err = nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0); 1191 if (err2)
1192 err = err2;
1193 }
1191 /* 1194 /*
1192 * Update the file handle to get the new inode info. 1195 * Update the file handle to get the new inode info.
1193 */ 1196 */
@@ -1306,17 +1309,10 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
1306 goto out_nfserr; 1309 goto out_nfserr;
1307 1310
1308 if (EX_ISSYNC(fhp->fh_export)) { 1311 if (EX_ISSYNC(fhp->fh_export)) {
1309 nfsd_sync_dir(dentry); 1312 err = nfserrno(nfsd_sync_dir(dentry));
1310 /* setattr will sync the child (or not) */ 1313 /* setattr will sync the child (or not) */
1311 } 1314 }
1312 1315
1313 /*
1314 * Update the filehandle to get the new inode info.
1315 */
1316 err = fh_update(resfhp);
1317 if (err)
1318 goto out;
1319
1320 if (createmode == NFS3_CREATE_EXCLUSIVE) { 1316 if (createmode == NFS3_CREATE_EXCLUSIVE) {
1321 /* Cram the verifier into atime/mtime/mode */ 1317 /* Cram the verifier into atime/mtime/mode */
1322 iap->ia_valid = ATTR_MTIME|ATTR_ATIME 1318 iap->ia_valid = ATTR_MTIME|ATTR_ATIME
@@ -1337,8 +1333,17 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
1337 * implement setgid directories via NFS. Clear out all that cruft. 1333 * implement setgid directories via NFS. Clear out all that cruft.
1338 */ 1334 */
1339 set_attr: 1335 set_attr:
1340 if ((iap->ia_valid &= ~(ATTR_UID|ATTR_GID)) != 0) 1336 if ((iap->ia_valid &= ~(ATTR_UID|ATTR_GID)) != 0) {
1341 err = nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0); 1337 int err2 = nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
1338 if (err2)
1339 err = err2;
1340 }
1341
1342 /*
1343 * Update the filehandle to get the new inode info.
1344 */
1345 if (!err)
1346 err = fh_update(resfhp);
1342 1347
1343 out: 1348 out:
1344 fh_unlock(fhp); 1349 fh_unlock(fhp);
@@ -1447,10 +1452,10 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
1447 } else 1452 } else
1448 err = vfs_symlink(dentry->d_inode, dnew, path, mode); 1453 err = vfs_symlink(dentry->d_inode, dnew, path, mode);
1449 1454
1450 if (!err) { 1455 if (!err)
1451 if (EX_ISSYNC(fhp->fh_export)) 1456 if (EX_ISSYNC(fhp->fh_export))
1452 nfsd_sync_dir(dentry); 1457 err = nfsd_sync_dir(dentry);
1453 } else 1458 if (err)
1454 err = nfserrno(err); 1459 err = nfserrno(err);
1455 fh_unlock(fhp); 1460 fh_unlock(fhp);
1456 1461
@@ -1506,7 +1511,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
1506 err = vfs_link(dold, dirp, dnew); 1511 err = vfs_link(dold, dirp, dnew);
1507 if (!err) { 1512 if (!err) {
1508 if (EX_ISSYNC(ffhp->fh_export)) { 1513 if (EX_ISSYNC(ffhp->fh_export)) {
1509 nfsd_sync_dir(ddir); 1514 err = nfserrno(nfsd_sync_dir(ddir));
1510 write_inode_now(dest, 1); 1515 write_inode_now(dest, 1);
1511 } 1516 }
1512 } else { 1517 } else {
@@ -1590,13 +1595,14 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
1590 if ((ffhp->fh_export->ex_flags & NFSEXP_MSNFS) && 1595 if ((ffhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
1591 ((atomic_read(&odentry->d_count) > 1) 1596 ((atomic_read(&odentry->d_count) > 1)
1592 || (atomic_read(&ndentry->d_count) > 1))) { 1597 || (atomic_read(&ndentry->d_count) > 1))) {
1593 err = nfserr_perm; 1598 err = -EPERM;
1594 } else 1599 } else
1595#endif 1600#endif
1596 err = vfs_rename(fdir, odentry, tdir, ndentry); 1601 err = vfs_rename(fdir, odentry, tdir, ndentry);
1597 if (!err && EX_ISSYNC(tfhp->fh_export)) { 1602 if (!err && EX_ISSYNC(tfhp->fh_export)) {
1598 nfsd_sync_dir(tdentry); 1603 err = nfsd_sync_dir(tdentry);
1599 nfsd_sync_dir(fdentry); 1604 if (!err)
1605 err = nfsd_sync_dir(fdentry);
1600 } 1606 }
1601 1607
1602 out_dput_new: 1608 out_dput_new:
@@ -1661,7 +1667,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
1661#ifdef MSNFS 1667#ifdef MSNFS
1662 if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) && 1668 if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
1663 (atomic_read(&rdentry->d_count) > 1)) { 1669 (atomic_read(&rdentry->d_count) > 1)) {
1664 err = nfserr_perm; 1670 err = -EPERM;
1665 } else 1671 } else
1666#endif 1672#endif
1667 err = vfs_unlink(dirp, rdentry); 1673 err = vfs_unlink(dirp, rdentry);
@@ -1671,17 +1677,14 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
1671 1677
1672 dput(rdentry); 1678 dput(rdentry);
1673 1679
1674 if (err) 1680 if (err == 0 &&
1675 goto out_nfserr; 1681 EX_ISSYNC(fhp->fh_export))
1676 if (EX_ISSYNC(fhp->fh_export)) 1682 err = nfsd_sync_dir(dentry);
1677 nfsd_sync_dir(dentry);
1678
1679out:
1680 return err;
1681 1683
1682out_nfserr: 1684out_nfserr:
1683 err = nfserrno(err); 1685 err = nfserrno(err);
1684 goto out; 1686out:
1687 return err;
1685} 1688}
1686 1689
1687/* 1690/*
diff --git a/fs/open.c b/fs/open.c
index 8e20c1f3256..70e0230d8e7 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -20,6 +20,7 @@
20#include <linux/security.h> 20#include <linux/security.h>
21#include <linux/mount.h> 21#include <linux/mount.h>
22#include <linux/vfs.h> 22#include <linux/vfs.h>
23#include <linux/fcntl.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <linux/fs.h> 25#include <linux/fs.h>
25#include <linux/personality.h> 26#include <linux/personality.h>
@@ -383,7 +384,7 @@ asmlinkage long sys_utime(char __user * filename, struct utimbuf __user * times)
383 384
384 error = get_user(newattrs.ia_atime.tv_sec, &times->actime); 385 error = get_user(newattrs.ia_atime.tv_sec, &times->actime);
385 newattrs.ia_atime.tv_nsec = 0; 386 newattrs.ia_atime.tv_nsec = 0;
386 if (!error) 387 if (!error)
387 error = get_user(newattrs.ia_mtime.tv_sec, &times->modtime); 388 error = get_user(newattrs.ia_mtime.tv_sec, &times->modtime);
388 newattrs.ia_mtime.tv_nsec = 0; 389 newattrs.ia_mtime.tv_nsec = 0;
389 if (error) 390 if (error)
@@ -414,14 +415,14 @@ out:
414 * must be owner or have write permission. 415 * must be owner or have write permission.
415 * Else, update from *times, must be owner or super user. 416 * Else, update from *times, must be owner or super user.
416 */ 417 */
417long do_utimes(char __user * filename, struct timeval * times) 418long do_utimes(int dfd, char __user *filename, struct timeval *times)
418{ 419{
419 int error; 420 int error;
420 struct nameidata nd; 421 struct nameidata nd;
421 struct inode * inode; 422 struct inode * inode;
422 struct iattr newattrs; 423 struct iattr newattrs;
423 424
424 error = user_path_walk(filename, &nd); 425 error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd);
425 426
426 if (error) 427 if (error)
427 goto out; 428 goto out;
@@ -461,13 +462,18 @@ out:
461 return error; 462 return error;
462} 463}
463 464
464asmlinkage long sys_utimes(char __user * filename, struct timeval __user * utimes) 465asmlinkage long sys_futimesat(int dfd, char __user *filename, struct timeval __user *utimes)
465{ 466{
466 struct timeval times[2]; 467 struct timeval times[2];
467 468
468 if (utimes && copy_from_user(&times, utimes, sizeof(times))) 469 if (utimes && copy_from_user(&times, utimes, sizeof(times)))
469 return -EFAULT; 470 return -EFAULT;
470 return do_utimes(filename, utimes ? times : NULL); 471 return do_utimes(dfd, filename, utimes ? times : NULL);
472}
473
474asmlinkage long sys_utimes(char __user *filename, struct timeval __user *utimes)
475{
476 return sys_futimesat(AT_FDCWD, filename, utimes);
471} 477}
472 478
473 479
@@ -476,7 +482,7 @@ asmlinkage long sys_utimes(char __user * filename, struct timeval __user * utime
476 * We do this by temporarily clearing all FS-related capabilities and 482 * We do this by temporarily clearing all FS-related capabilities and
477 * switching the fsuid/fsgid around to the real ones. 483 * switching the fsuid/fsgid around to the real ones.
478 */ 484 */
479asmlinkage long sys_access(const char __user * filename, int mode) 485asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode)
480{ 486{
481 struct nameidata nd; 487 struct nameidata nd;
482 int old_fsuid, old_fsgid; 488 int old_fsuid, old_fsgid;
@@ -506,7 +512,7 @@ asmlinkage long sys_access(const char __user * filename, int mode)
506 else 512 else
507 current->cap_effective = current->cap_permitted; 513 current->cap_effective = current->cap_permitted;
508 514
509 res = __user_walk(filename, LOOKUP_FOLLOW|LOOKUP_ACCESS, &nd); 515 res = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW|LOOKUP_ACCESS, &nd);
510 if (!res) { 516 if (!res) {
511 res = vfs_permission(&nd, mode); 517 res = vfs_permission(&nd, mode);
512 /* SuS v2 requires we report a read only fs too */ 518 /* SuS v2 requires we report a read only fs too */
@@ -523,6 +529,11 @@ asmlinkage long sys_access(const char __user * filename, int mode)
523 return res; 529 return res;
524} 530}
525 531
532asmlinkage long sys_access(const char __user *filename, int mode)
533{
534 return sys_faccessat(AT_FDCWD, filename, mode);
535}
536
526asmlinkage long sys_chdir(const char __user * filename) 537asmlinkage long sys_chdir(const char __user * filename)
527{ 538{
528 struct nameidata nd; 539 struct nameidata nd;
@@ -635,14 +646,15 @@ out:
635 return err; 646 return err;
636} 647}
637 648
638asmlinkage long sys_chmod(const char __user * filename, mode_t mode) 649asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
650 mode_t mode)
639{ 651{
640 struct nameidata nd; 652 struct nameidata nd;
641 struct inode * inode; 653 struct inode * inode;
642 int error; 654 int error;
643 struct iattr newattrs; 655 struct iattr newattrs;
644 656
645 error = user_path_walk(filename, &nd); 657 error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd);
646 if (error) 658 if (error)
647 goto out; 659 goto out;
648 inode = nd.dentry->d_inode; 660 inode = nd.dentry->d_inode;
@@ -669,6 +681,11 @@ out:
669 return error; 681 return error;
670} 682}
671 683
684asmlinkage long sys_chmod(const char __user *filename, mode_t mode)
685{
686 return sys_fchmodat(AT_FDCWD, filename, mode);
687}
688
672static int chown_common(struct dentry * dentry, uid_t user, gid_t group) 689static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
673{ 690{
674 struct inode * inode; 691 struct inode * inode;
@@ -717,6 +734,26 @@ asmlinkage long sys_chown(const char __user * filename, uid_t user, gid_t group)
717 return error; 734 return error;
718} 735}
719 736
737asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
738 gid_t group, int flag)
739{
740 struct nameidata nd;
741 int error = -EINVAL;
742 int follow;
743
744 if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
745 goto out;
746
747 follow = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
748 error = __user_walk_fd(dfd, filename, follow, &nd);
749 if (!error) {
750 error = chown_common(nd.dentry, user, group);
751 path_release(&nd);
752 }
753out:
754 return error;
755}
756
720asmlinkage long sys_lchown(const char __user * filename, uid_t user, gid_t group) 757asmlinkage long sys_lchown(const char __user * filename, uid_t user, gid_t group)
721{ 758{
722 struct nameidata nd; 759 struct nameidata nd;
@@ -820,7 +857,8 @@ cleanup_file:
820 * for the internal routines (ie open_namei()/follow_link() etc). 00 is 857 * for the internal routines (ie open_namei()/follow_link() etc). 00 is
821 * used by symlinks. 858 * used by symlinks.
822 */ 859 */
823struct file *filp_open(const char * filename, int flags, int mode) 860static struct file *do_filp_open(int dfd, const char *filename, int flags,
861 int mode)
824{ 862{
825 int namei_flags, error; 863 int namei_flags, error;
826 struct nameidata nd; 864 struct nameidata nd;
@@ -829,12 +867,17 @@ struct file *filp_open(const char * filename, int flags, int mode)
829 if ((namei_flags+1) & O_ACCMODE) 867 if ((namei_flags+1) & O_ACCMODE)
830 namei_flags++; 868 namei_flags++;
831 869
832 error = open_namei(filename, namei_flags, mode, &nd); 870 error = open_namei(dfd, filename, namei_flags, mode, &nd);
833 if (!error) 871 if (!error)
834 return nameidata_to_filp(&nd, flags); 872 return nameidata_to_filp(&nd, flags);
835 873
836 return ERR_PTR(error); 874 return ERR_PTR(error);
837} 875}
876
877struct file *filp_open(const char *filename, int flags, int mode)
878{
879 return do_filp_open(AT_FDCWD, filename, flags, mode);
880}
838EXPORT_SYMBOL(filp_open); 881EXPORT_SYMBOL(filp_open);
839 882
840/** 883/**
@@ -991,7 +1034,7 @@ void fastcall put_unused_fd(unsigned int fd)
991EXPORT_SYMBOL(put_unused_fd); 1034EXPORT_SYMBOL(put_unused_fd);
992 1035
993/* 1036/*
994 * Install a file pointer in the fd array. 1037 * Install a file pointer in the fd array.
995 * 1038 *
996 * The VFS is full of places where we drop the files lock between 1039 * The VFS is full of places where we drop the files lock between
997 * setting the open_fds bitmap and installing the file in the file 1040 * setting the open_fds bitmap and installing the file in the file
@@ -1016,7 +1059,7 @@ void fastcall fd_install(unsigned int fd, struct file * file)
1016 1059
1017EXPORT_SYMBOL(fd_install); 1060EXPORT_SYMBOL(fd_install);
1018 1061
1019long do_sys_open(const char __user *filename, int flags, int mode) 1062long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
1020{ 1063{
1021 char *tmp = getname(filename); 1064 char *tmp = getname(filename);
1022 int fd = PTR_ERR(tmp); 1065 int fd = PTR_ERR(tmp);
@@ -1024,7 +1067,7 @@ long do_sys_open(const char __user *filename, int flags, int mode)
1024 if (!IS_ERR(tmp)) { 1067 if (!IS_ERR(tmp)) {
1025 fd = get_unused_fd(); 1068 fd = get_unused_fd();
1026 if (fd >= 0) { 1069 if (fd >= 0) {
1027 struct file *f = filp_open(tmp, flags, mode); 1070 struct file *f = do_filp_open(dfd, tmp, flags, mode);
1028 if (IS_ERR(f)) { 1071 if (IS_ERR(f)) {
1029 put_unused_fd(fd); 1072 put_unused_fd(fd);
1030 fd = PTR_ERR(f); 1073 fd = PTR_ERR(f);
@@ -1043,10 +1086,20 @@ asmlinkage long sys_open(const char __user *filename, int flags, int mode)
1043 if (force_o_largefile()) 1086 if (force_o_largefile())
1044 flags |= O_LARGEFILE; 1087 flags |= O_LARGEFILE;
1045 1088
1046 return do_sys_open(filename, flags, mode); 1089 return do_sys_open(AT_FDCWD, filename, flags, mode);
1047} 1090}
1048EXPORT_SYMBOL_GPL(sys_open); 1091EXPORT_SYMBOL_GPL(sys_open);
1049 1092
1093asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
1094 int mode)
1095{
1096 if (force_o_largefile())
1097 flags |= O_LARGEFILE;
1098
1099 return do_sys_open(dfd, filename, flags, mode);
1100}
1101EXPORT_SYMBOL_GPL(sys_openat);
1102
1050#ifndef __alpha__ 1103#ifndef __alpha__
1051 1104
1052/* 1105/*
diff --git a/fs/select.c b/fs/select.c
index f10a10317d5..c0f02d36c60 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -179,12 +179,11 @@ get_max:
179#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) 179#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
180#define POLLEX_SET (POLLPRI) 180#define POLLEX_SET (POLLPRI)
181 181
182int do_select(int n, fd_set_bits *fds, long *timeout) 182int do_select(int n, fd_set_bits *fds, s64 *timeout)
183{ 183{
184 struct poll_wqueues table; 184 struct poll_wqueues table;
185 poll_table *wait; 185 poll_table *wait;
186 int retval, i; 186 int retval, i;
187 long __timeout = *timeout;
188 187
189 rcu_read_lock(); 188 rcu_read_lock();
190 retval = max_select_fd(n, fds); 189 retval = max_select_fd(n, fds);
@@ -196,11 +195,12 @@ int do_select(int n, fd_set_bits *fds, long *timeout)
196 195
197 poll_initwait(&table); 196 poll_initwait(&table);
198 wait = &table.pt; 197 wait = &table.pt;
199 if (!__timeout) 198 if (!*timeout)
200 wait = NULL; 199 wait = NULL;
201 retval = 0; 200 retval = 0;
202 for (;;) { 201 for (;;) {
203 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; 202 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
203 long __timeout;
204 204
205 set_current_state(TASK_INTERRUPTIBLE); 205 set_current_state(TASK_INTERRUPTIBLE);
206 206
@@ -255,22 +255,32 @@ int do_select(int n, fd_set_bits *fds, long *timeout)
255 *rexp = res_ex; 255 *rexp = res_ex;
256 } 256 }
257 wait = NULL; 257 wait = NULL;
258 if (retval || !__timeout || signal_pending(current)) 258 if (retval || !*timeout || signal_pending(current))
259 break; 259 break;
260 if(table.error) { 260 if(table.error) {
261 retval = table.error; 261 retval = table.error;
262 break; 262 break;
263 } 263 }
264
265 if (*timeout < 0) {
266 /* Wait indefinitely */
267 __timeout = MAX_SCHEDULE_TIMEOUT;
268 } else if (unlikely(*timeout >= (s64)MAX_SCHEDULE_TIMEOUT - 1)) {
269 /* Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in a loop */
270 __timeout = MAX_SCHEDULE_TIMEOUT - 1;
271 *timeout -= __timeout;
272 } else {
273 __timeout = *timeout;
274 *timeout = 0;
275 }
264 __timeout = schedule_timeout(__timeout); 276 __timeout = schedule_timeout(__timeout);
277 if (*timeout >= 0)
278 *timeout += __timeout;
265 } 279 }
266 __set_current_state(TASK_RUNNING); 280 __set_current_state(TASK_RUNNING);
267 281
268 poll_freewait(&table); 282 poll_freewait(&table);
269 283
270 /*
271 * Up-to-date the caller timeout.
272 */
273 *timeout = __timeout;
274 return retval; 284 return retval;
275} 285}
276 286
@@ -295,36 +305,14 @@ static void select_bits_free(void *bits, int size)
295#define MAX_SELECT_SECONDS \ 305#define MAX_SELECT_SECONDS \
296 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) 306 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
297 307
298asmlinkage long 308static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
299sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp) 309 fd_set __user *exp, s64 *timeout)
300{ 310{
301 fd_set_bits fds; 311 fd_set_bits fds;
302 char *bits; 312 char *bits;
303 long timeout;
304 int ret, size, max_fdset; 313 int ret, size, max_fdset;
305 struct fdtable *fdt; 314 struct fdtable *fdt;
306 315
307 timeout = MAX_SCHEDULE_TIMEOUT;
308 if (tvp) {
309 time_t sec, usec;
310
311 if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
312 || __get_user(sec, &tvp->tv_sec)
313 || __get_user(usec, &tvp->tv_usec)) {
314 ret = -EFAULT;
315 goto out_nofds;
316 }
317
318 ret = -EINVAL;
319 if (sec < 0 || usec < 0)
320 goto out_nofds;
321
322 if ((unsigned long) sec < MAX_SELECT_SECONDS) {
323 timeout = ROUND_UP(usec, 1000000/HZ);
324 timeout += sec * (unsigned long) HZ;
325 }
326 }
327
328 ret = -EINVAL; 316 ret = -EINVAL;
329 if (n < 0) 317 if (n < 0)
330 goto out_nofds; 318 goto out_nofds;
@@ -362,18 +350,7 @@ sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, s
362 zero_fd_set(n, fds.res_out); 350 zero_fd_set(n, fds.res_out);
363 zero_fd_set(n, fds.res_ex); 351 zero_fd_set(n, fds.res_ex);
364 352
365 ret = do_select(n, &fds, &timeout); 353 ret = do_select(n, &fds, timeout);
366
367 if (tvp && !(current->personality & STICKY_TIMEOUTS)) {
368 time_t sec = 0, usec = 0;
369 if (timeout) {
370 sec = timeout / HZ;
371 usec = timeout % HZ;
372 usec *= (1000000/HZ);
373 }
374 put_user(sec, &tvp->tv_sec);
375 put_user(usec, &tvp->tv_usec);
376 }
377 354
378 if (ret < 0) 355 if (ret < 0)
379 goto out; 356 goto out;
@@ -395,6 +372,154 @@ out_nofds:
395 return ret; 372 return ret;
396} 373}
397 374
375asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
376 fd_set __user *exp, struct timeval __user *tvp)
377{
378 s64 timeout = -1;
379 struct timeval tv;
380 int ret;
381
382 if (tvp) {
383 if (copy_from_user(&tv, tvp, sizeof(tv)))
384 return -EFAULT;
385
386 if (tv.tv_sec < 0 || tv.tv_usec < 0)
387 return -EINVAL;
388
389 /* Cast to u64 to make GCC stop complaining */
390 if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS)
391 timeout = -1; /* infinite */
392 else {
393 timeout = ROUND_UP(tv.tv_usec, USEC_PER_SEC/HZ);
394 timeout += tv.tv_sec * HZ;
395 }
396 }
397
398 ret = core_sys_select(n, inp, outp, exp, &timeout);
399
400 if (tvp) {
401 if (current->personality & STICKY_TIMEOUTS)
402 goto sticky;
403 tv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
404 tv.tv_sec = timeout;
405 if (copy_to_user(tvp, &tv, sizeof(tv))) {
406sticky:
407 /*
408 * If an application puts its timeval in read-only
409 * memory, we don't want the Linux-specific update to
410 * the timeval to cause a fault after the select has
411 * completed successfully. However, because we're not
412 * updating the timeval, we can't restart the system
413 * call.
414 */
415 if (ret == -ERESTARTNOHAND)
416 ret = -EINTR;
417 }
418 }
419
420 return ret;
421}
422
423#ifdef TIF_RESTORE_SIGMASK
424asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp,
425 fd_set __user *exp, struct timespec __user *tsp,
426 const sigset_t __user *sigmask, size_t sigsetsize)
427{
428 s64 timeout = MAX_SCHEDULE_TIMEOUT;
429 sigset_t ksigmask, sigsaved;
430 struct timespec ts;
431 int ret;
432
433 if (tsp) {
434 if (copy_from_user(&ts, tsp, sizeof(ts)))
435 return -EFAULT;
436
437 if (ts.tv_sec < 0 || ts.tv_nsec < 0)
438 return -EINVAL;
439
440 /* Cast to u64 to make GCC stop complaining */
441 if ((u64)ts.tv_sec >= (u64)MAX_INT64_SECONDS)
442 timeout = -1; /* infinite */
443 else {
444 timeout = ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ);
445 timeout += ts.tv_sec * HZ;
446 }
447 }
448
449 if (sigmask) {
450 /* XXX: Don't preclude handling different sized sigset_t's. */
451 if (sigsetsize != sizeof(sigset_t))
452 return -EINVAL;
453 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
454 return -EFAULT;
455
456 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
457 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
458 }
459
460 ret = core_sys_select(n, inp, outp, exp, &timeout);
461
462 if (tsp) {
463 if (current->personality & STICKY_TIMEOUTS)
464 goto sticky;
465 ts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1000;
466 ts.tv_sec = timeout;
467 if (copy_to_user(tsp, &ts, sizeof(ts))) {
468sticky:
469 /*
470 * If an application puts its timeval in read-only
471 * memory, we don't want the Linux-specific update to
472 * the timeval to cause a fault after the select has
473 * completed successfully. However, because we're not
474 * updating the timeval, we can't restart the system
475 * call.
476 */
477 if (ret == -ERESTARTNOHAND)
478 ret = -EINTR;
479 }
480 }
481
482 if (ret == -ERESTARTNOHAND) {
483 /*
484 * Don't restore the signal mask yet. Let do_signal() deliver
485 * the signal on the way back to userspace, before the signal
486 * mask is restored.
487 */
488 if (sigmask) {
489 memcpy(&current->saved_sigmask, &sigsaved,
490 sizeof(sigsaved));
491 set_thread_flag(TIF_RESTORE_SIGMASK);
492 }
493 } else if (sigmask)
494 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
495
496 return ret;
497}
498
499/*
500 * Most architectures can't handle 7-argument syscalls. So we provide a
501 * 6-argument version where the sixth argument is a pointer to a structure
502 * which has a pointer to the sigset_t itself followed by a size_t containing
503 * the sigset size.
504 */
505asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
506 fd_set __user *exp, struct timespec __user *tsp, void __user *sig)
507{
508 size_t sigsetsize = 0;
509 sigset_t __user *up = NULL;
510
511 if (sig) {
512 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
513 || __get_user(up, (sigset_t * __user *)sig)
514 || __get_user(sigsetsize,
515 (size_t * __user)(sig+sizeof(void *))))
516 return -EFAULT;
517 }
518
519 return sys_pselect7(n, inp, outp, exp, tsp, up, sigsetsize);
520}
521#endif /* TIF_RESTORE_SIGMASK */
522
398struct poll_list { 523struct poll_list {
399 struct poll_list *next; 524 struct poll_list *next;
400 int len; 525 int len;
@@ -436,16 +561,19 @@ static void do_pollfd(unsigned int num, struct pollfd * fdpage,
436} 561}
437 562
438static int do_poll(unsigned int nfds, struct poll_list *list, 563static int do_poll(unsigned int nfds, struct poll_list *list,
439 struct poll_wqueues *wait, long timeout) 564 struct poll_wqueues *wait, s64 *timeout)
440{ 565{
441 int count = 0; 566 int count = 0;
442 poll_table* pt = &wait->pt; 567 poll_table* pt = &wait->pt;
443 568
444 if (!timeout) 569 /* Optimise the no-wait case */
570 if (!(*timeout))
445 pt = NULL; 571 pt = NULL;
446 572
447 for (;;) { 573 for (;;) {
448 struct poll_list *walk; 574 struct poll_list *walk;
575 long __timeout;
576
449 set_current_state(TASK_INTERRUPTIBLE); 577 set_current_state(TASK_INTERRUPTIBLE);
450 walk = list; 578 walk = list;
451 while(walk != NULL) { 579 while(walk != NULL) {
@@ -453,18 +581,36 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
453 walk = walk->next; 581 walk = walk->next;
454 } 582 }
455 pt = NULL; 583 pt = NULL;
456 if (count || !timeout || signal_pending(current)) 584 if (count || !*timeout || signal_pending(current))
457 break; 585 break;
458 count = wait->error; 586 count = wait->error;
459 if (count) 587 if (count)
460 break; 588 break;
461 timeout = schedule_timeout(timeout); 589
590 if (*timeout < 0) {
591 /* Wait indefinitely */
592 __timeout = MAX_SCHEDULE_TIMEOUT;
593 } else if (unlikely(*timeout >= (s64)MAX_SCHEDULE_TIMEOUT-1)) {
594 /*
595 * Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in
596 * a loop
597 */
598 __timeout = MAX_SCHEDULE_TIMEOUT - 1;
599 *timeout -= __timeout;
600 } else {
601 __timeout = *timeout;
602 *timeout = 0;
603 }
604
605 __timeout = schedule_timeout(__timeout);
606 if (*timeout >= 0)
607 *timeout += __timeout;
462 } 608 }
463 __set_current_state(TASK_RUNNING); 609 __set_current_state(TASK_RUNNING);
464 return count; 610 return count;
465} 611}
466 612
467asmlinkage long sys_poll(struct pollfd __user * ufds, unsigned int nfds, long timeout) 613int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout)
468{ 614{
469 struct poll_wqueues table; 615 struct poll_wqueues table;
470 int fdcount, err; 616 int fdcount, err;
@@ -482,14 +628,6 @@ asmlinkage long sys_poll(struct pollfd __user * ufds, unsigned int nfds, long ti
482 if (nfds > max_fdset && nfds > OPEN_MAX) 628 if (nfds > max_fdset && nfds > OPEN_MAX)
483 return -EINVAL; 629 return -EINVAL;
484 630
485 if (timeout) {
486 /* Careful about overflow in the intermediate values */
487 if ((unsigned long) timeout < MAX_SCHEDULE_TIMEOUT / HZ)
488 timeout = (unsigned long)(timeout*HZ+999)/1000+1;
489 else /* Negative or overflow */
490 timeout = MAX_SCHEDULE_TIMEOUT;
491 }
492
493 poll_initwait(&table); 631 poll_initwait(&table);
494 632
495 head = NULL; 633 head = NULL;
@@ -519,6 +657,7 @@ asmlinkage long sys_poll(struct pollfd __user * ufds, unsigned int nfds, long ti
519 } 657 }
520 i -= pp->len; 658 i -= pp->len;
521 } 659 }
660
522 fdcount = do_poll(nfds, head, &table, timeout); 661 fdcount = do_poll(nfds, head, &table, timeout);
523 662
524 /* OK, now copy the revents fields back to user space. */ 663 /* OK, now copy the revents fields back to user space. */
@@ -547,3 +686,98 @@ out_fds:
547 poll_freewait(&table); 686 poll_freewait(&table);
548 return err; 687 return err;
549} 688}
689
690asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
691 long timeout_msecs)
692{
693 s64 timeout_jiffies = 0;
694
695 if (timeout_msecs) {
696#if HZ > 1000
697 /* We can only overflow if HZ > 1000 */
698 if (timeout_msecs / 1000 > (s64)0x7fffffffffffffffULL / (s64)HZ)
699 timeout_jiffies = -1;
700 else
701#endif
702 timeout_jiffies = msecs_to_jiffies(timeout_msecs);
703 }
704
705 return do_sys_poll(ufds, nfds, &timeout_jiffies);
706}
707
708#ifdef TIF_RESTORE_SIGMASK
709asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
710 struct timespec __user *tsp, const sigset_t __user *sigmask,
711 size_t sigsetsize)
712{
713 sigset_t ksigmask, sigsaved;
714 struct timespec ts;
715 s64 timeout = -1;
716 int ret;
717
718 if (tsp) {
719 if (copy_from_user(&ts, tsp, sizeof(ts)))
720 return -EFAULT;
721
722 /* Cast to u64 to make GCC stop complaining */
723 if ((u64)ts.tv_sec >= (u64)MAX_INT64_SECONDS)
724 timeout = -1; /* infinite */
725 else {
726 timeout = ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ);
727 timeout += ts.tv_sec * HZ;
728 }
729 }
730
731 if (sigmask) {
732 /* XXX: Don't preclude handling different sized sigset_t's. */
733 if (sigsetsize != sizeof(sigset_t))
734 return -EINVAL;
735 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
736 return -EFAULT;
737
738 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
739 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
740 }
741
742 ret = do_sys_poll(ufds, nfds, &timeout);
743
744 /* We can restart this syscall, usually */
745 if (ret == -EINTR) {
746 /*
747 * Don't restore the signal mask yet. Let do_signal() deliver
748 * the signal on the way back to userspace, before the signal
749 * mask is restored.
750 */
751 if (sigmask) {
752 memcpy(&current->saved_sigmask, &sigsaved,
753 sizeof(sigsaved));
754 set_thread_flag(TIF_RESTORE_SIGMASK);
755 }
756 ret = -ERESTARTNOHAND;
757 } else if (sigmask)
758 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
759
760 if (tsp && timeout >= 0) {
761 if (current->personality & STICKY_TIMEOUTS)
762 goto sticky;
763 /* Yes, we know it's actually an s64, but it's also positive. */
764 ts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1000;
765 ts.tv_sec = timeout;
766 if (copy_to_user(tsp, &ts, sizeof(ts))) {
767 sticky:
768 /*
769 * If an application puts its timeval in read-only
770 * memory, we don't want the Linux-specific update to
771 * the timeval to cause a fault after the select has
772 * completed successfully. However, because we're not
773 * updating the timeval, we can't restart the system
774 * call.
775 */
776 if (ret == -ERESTARTNOHAND && timeout >= 0)
777 ret = -EINTR;
778 }
779 }
780
781 return ret;
782}
783#endif /* TIF_RESTORE_SIGMASK */
diff --git a/fs/stat.c b/fs/stat.c
index b8a0e5110ab..24211b030f3 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -63,12 +63,12 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
63 63
64EXPORT_SYMBOL(vfs_getattr); 64EXPORT_SYMBOL(vfs_getattr);
65 65
66int vfs_stat(char __user *name, struct kstat *stat) 66int vfs_stat_fd(int dfd, char __user *name, struct kstat *stat)
67{ 67{
68 struct nameidata nd; 68 struct nameidata nd;
69 int error; 69 int error;
70 70
71 error = user_path_walk(name, &nd); 71 error = __user_walk_fd(dfd, name, LOOKUP_FOLLOW, &nd);
72 if (!error) { 72 if (!error) {
73 error = vfs_getattr(nd.mnt, nd.dentry, stat); 73 error = vfs_getattr(nd.mnt, nd.dentry, stat);
74 path_release(&nd); 74 path_release(&nd);
@@ -76,14 +76,19 @@ int vfs_stat(char __user *name, struct kstat *stat)
76 return error; 76 return error;
77} 77}
78 78
79int vfs_stat(char __user *name, struct kstat *stat)
80{
81 return vfs_stat_fd(AT_FDCWD, name, stat);
82}
83
79EXPORT_SYMBOL(vfs_stat); 84EXPORT_SYMBOL(vfs_stat);
80 85
81int vfs_lstat(char __user *name, struct kstat *stat) 86int vfs_lstat_fd(int dfd, char __user *name, struct kstat *stat)
82{ 87{
83 struct nameidata nd; 88 struct nameidata nd;
84 int error; 89 int error;
85 90
86 error = user_path_walk_link(name, &nd); 91 error = __user_walk_fd(dfd, name, 0, &nd);
87 if (!error) { 92 if (!error) {
88 error = vfs_getattr(nd.mnt, nd.dentry, stat); 93 error = vfs_getattr(nd.mnt, nd.dentry, stat);
89 path_release(&nd); 94 path_release(&nd);
@@ -91,6 +96,11 @@ int vfs_lstat(char __user *name, struct kstat *stat)
91 return error; 96 return error;
92} 97}
93 98
99int vfs_lstat(char __user *name, struct kstat *stat)
100{
101 return vfs_lstat_fd(AT_FDCWD, name, stat);
102}
103
94EXPORT_SYMBOL(vfs_lstat); 104EXPORT_SYMBOL(vfs_lstat);
95 105
96int vfs_fstat(unsigned int fd, struct kstat *stat) 106int vfs_fstat(unsigned int fd, struct kstat *stat)
@@ -151,7 +161,7 @@ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * sta
151asmlinkage long sys_stat(char __user * filename, struct __old_kernel_stat __user * statbuf) 161asmlinkage long sys_stat(char __user * filename, struct __old_kernel_stat __user * statbuf)
152{ 162{
153 struct kstat stat; 163 struct kstat stat;
154 int error = vfs_stat(filename, &stat); 164 int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
155 165
156 if (!error) 166 if (!error)
157 error = cp_old_stat(&stat, statbuf); 167 error = cp_old_stat(&stat, statbuf);
@@ -161,7 +171,7 @@ asmlinkage long sys_stat(char __user * filename, struct __old_kernel_stat __user
161asmlinkage long sys_lstat(char __user * filename, struct __old_kernel_stat __user * statbuf) 171asmlinkage long sys_lstat(char __user * filename, struct __old_kernel_stat __user * statbuf)
162{ 172{
163 struct kstat stat; 173 struct kstat stat;
164 int error = vfs_lstat(filename, &stat); 174 int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
165 175
166 if (!error) 176 if (!error)
167 error = cp_old_stat(&stat, statbuf); 177 error = cp_old_stat(&stat, statbuf);
@@ -229,27 +239,50 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
229 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 239 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
230} 240}
231 241
232asmlinkage long sys_newstat(char __user * filename, struct stat __user * statbuf) 242asmlinkage long sys_newstat(char __user *filename, struct stat __user *statbuf)
233{ 243{
234 struct kstat stat; 244 struct kstat stat;
235 int error = vfs_stat(filename, &stat); 245 int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
236 246
237 if (!error) 247 if (!error)
238 error = cp_new_stat(&stat, statbuf); 248 error = cp_new_stat(&stat, statbuf);
239 249
240 return error; 250 return error;
241} 251}
242asmlinkage long sys_newlstat(char __user * filename, struct stat __user * statbuf) 252
253asmlinkage long sys_newlstat(char __user *filename, struct stat __user *statbuf)
243{ 254{
244 struct kstat stat; 255 struct kstat stat;
245 int error = vfs_lstat(filename, &stat); 256 int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
246 257
247 if (!error) 258 if (!error)
248 error = cp_new_stat(&stat, statbuf); 259 error = cp_new_stat(&stat, statbuf);
249 260
250 return error; 261 return error;
251} 262}
252asmlinkage long sys_newfstat(unsigned int fd, struct stat __user * statbuf) 263
264asmlinkage long sys_newfstatat(int dfd, char __user *filename,
265 struct stat __user *statbuf, int flag)
266{
267 struct kstat stat;
268 int error = -EINVAL;
269
270 if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
271 goto out;
272
273 if (flag & AT_SYMLINK_NOFOLLOW)
274 error = vfs_lstat_fd(dfd, filename, &stat);
275 else
276 error = vfs_stat_fd(dfd, filename, &stat);
277
278 if (!error)
279 error = cp_new_stat(&stat, statbuf);
280
281out:
282 return error;
283}
284
285asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf)
253{ 286{
254 struct kstat stat; 287 struct kstat stat;
255 int error = vfs_fstat(fd, &stat); 288 int error = vfs_fstat(fd, &stat);
@@ -260,7 +293,8 @@ asmlinkage long sys_newfstat(unsigned int fd, struct stat __user * statbuf)
260 return error; 293 return error;
261} 294}
262 295
263asmlinkage long sys_readlink(const char __user * path, char __user * buf, int bufsiz) 296asmlinkage long sys_readlinkat(int dfd, const char __user *path,
297 char __user *buf, int bufsiz)
264{ 298{
265 struct nameidata nd; 299 struct nameidata nd;
266 int error; 300 int error;
@@ -268,7 +302,7 @@ asmlinkage long sys_readlink(const char __user * path, char __user * buf, int bu
268 if (bufsiz <= 0) 302 if (bufsiz <= 0)
269 return -EINVAL; 303 return -EINVAL;
270 304
271 error = user_path_walk_link(path, &nd); 305 error = __user_walk_fd(dfd, path, 0, &nd);
272 if (!error) { 306 if (!error) {
273 struct inode * inode = nd.dentry->d_inode; 307 struct inode * inode = nd.dentry->d_inode;
274 308
@@ -285,6 +319,12 @@ asmlinkage long sys_readlink(const char __user * path, char __user * buf, int bu
285 return error; 319 return error;
286} 320}
287 321
322asmlinkage long sys_readlink(const char __user *path, char __user *buf,
323 int bufsiz)
324{
325 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
326}
327
288 328
289/* ---------- LFS-64 ----------- */ 329/* ---------- LFS-64 ----------- */
290#ifdef __ARCH_WANT_STAT64 330#ifdef __ARCH_WANT_STAT64
diff --git a/include/asm-arm/arch-omap/clock.h b/include/asm-arm/arch-omap/clock.h
index 740c297eb11..46a0402696d 100644
--- a/include/asm-arm/arch-omap/clock.h
+++ b/include/asm-arm/arch-omap/clock.h
@@ -38,8 +38,6 @@ struct clk {
38struct clk_functions { 38struct clk_functions {
39 int (*clk_enable)(struct clk *clk); 39 int (*clk_enable)(struct clk *clk);
40 void (*clk_disable)(struct clk *clk); 40 void (*clk_disable)(struct clk *clk);
41 int (*clk_use)(struct clk *clk);
42 void (*clk_unuse)(struct clk *clk);
43 long (*clk_round_rate)(struct clk *clk, unsigned long rate); 41 long (*clk_round_rate)(struct clk *clk, unsigned long rate);
44 int (*clk_set_rate)(struct clk *clk, unsigned long rate); 42 int (*clk_set_rate)(struct clk *clk, unsigned long rate);
45 int (*clk_set_parent)(struct clk *clk, struct clk *parent); 43 int (*clk_set_parent)(struct clk *clk, struct clk *parent);
diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h
index dae138b9cac..1409c5bd703 100644
--- a/include/asm-arm/arch-pxa/pxa-regs.h
+++ b/include/asm-arm/arch-pxa/pxa-regs.h
@@ -108,6 +108,7 @@
108#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ 108#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
109#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ 109#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
110 110
111#define DALGN __REG(0x400000a0) /* DMA Alignment Register */
111#define DINT __REG(0x400000f0) /* DMA Interrupt Register */ 112#define DINT __REG(0x400000f0) /* DMA Interrupt Register */
112 113
113#define DRCMR(n) __REG2(0x40000100, (n)<<2) 114#define DRCMR(n) __REG2(0x40000100, (n)<<2)
@@ -1614,8 +1615,21 @@
1614#define SSCR0_National (0x2 << 4) /* National Microwire */ 1615#define SSCR0_National (0x2 << 4) /* National Microwire */
1615#define SSCR0_ECS (1 << 6) /* External clock select */ 1616#define SSCR0_ECS (1 << 6) /* External clock select */
1616#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */ 1617#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
1618#if defined(CONFIG_PXA25x)
1617#define SSCR0_SCR (0x0000ff00) /* Serial Clock Rate (mask) */ 1619#define SSCR0_SCR (0x0000ff00) /* Serial Clock Rate (mask) */
1618#define SSCR0_SerClkDiv(x) ((((x) - 2)/2) << 8) /* Divisor [2..512] */ 1620#define SSCR0_SerClkDiv(x) ((((x) - 2)/2) << 8) /* Divisor [2..512] */
1621#elif defined(CONFIG_PXA27x)
1622#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */
1623#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
1624#define SSCR0_EDSS (1 << 20) /* Extended data size select */
1625#define SSCR0_NCS (1 << 21) /* Network clock select */
1626#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */
1627#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */
1628#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */
1629#define SSCR0_SlotsPerFrm(c) ((x) - 1) /* Time slots per frame [1..8] */
1630#define SSCR0_ADC (1 << 30) /* Audio clock select */
1631#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
1632#endif
1619 1633
1620#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */ 1634#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
1621#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */ 1635#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h
index a5576e02dd1..ea426abf01d 100644
--- a/include/asm-frv/thread_info.h
+++ b/include/asm-frv/thread_info.h
@@ -129,6 +129,7 @@ register struct thread_info *__current_thread_info asm("gr15");
129#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 129#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
130#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ 130#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
131#define TIF_IRET 5 /* return with iret */ 131#define TIF_IRET 5 /* return with iret */
132#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
132#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 133#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
133#define TIF_MEMDIE 17 /* OOM killer killed process */ 134#define TIF_MEMDIE 17 /* OOM killer killed process */
134 135
@@ -138,6 +139,7 @@ register struct thread_info *__current_thread_info asm("gr15");
138#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 139#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
139#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 140#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
140#define _TIF_IRET (1 << TIF_IRET) 141#define _TIF_IRET (1 << TIF_IRET)
142#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
141#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 143#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
142 144
143#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 145#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index cde376a7a85..4d994d2e99e 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -486,6 +486,7 @@ static inline pid_t wait(int * wait_stat)
486/* #define __ARCH_WANT_SYS_SIGPENDING */ 486/* #define __ARCH_WANT_SYS_SIGPENDING */
487#define __ARCH_WANT_SYS_SIGPROCMASK 487#define __ARCH_WANT_SYS_SIGPROCMASK
488#define __ARCH_WANT_SYS_RT_SIGACTION 488#define __ARCH_WANT_SYS_RT_SIGACTION
489#define __ARCH_WANT_SYS_RT_SIGSUSPEND
489#endif 490#endif
490 491
491/* 492/*
diff --git a/include/asm-i386/edac.h b/include/asm-i386/edac.h
new file mode 100644
index 00000000000..3e7dd0ab68c
--- /dev/null
+++ b/include/asm-i386/edac.h
@@ -0,0 +1,18 @@
1#ifndef ASM_EDAC_H
2#define ASM_EDAC_H
3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5
6static __inline__ void atomic_scrub(void *va, u32 size)
7{
8 unsigned long *virt_addr = va;
9 u32 i;
10
11 for (i = 0; i < size / 4; i++, virt_addr++)
12 /* Very carefully read and write to memory atomically
13 * so we are interrupt, DMA and SMP safe.
14 */
15 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
16}
17
18#endif
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index e7a271d3930..44b9db80647 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -61,7 +61,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
61 if (op == FUTEX_OP_SET) 61 if (op == FUTEX_OP_SET)
62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); 62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
63 else { 63 else {
64#if !defined(CONFIG_X86_BSWAP) && !defined(CONFIG_UML) 64#ifndef CONFIG_X86_BSWAP
65 if (boot_cpu_data.x86 == 3) 65 if (boot_cpu_data.x86 == 3)
66 ret = -ENOSYS; 66 ret = -ENOSYS;
67 else 67 else
diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h
index 76524b4052a..026fd231488 100644
--- a/include/asm-i386/signal.h
+++ b/include/asm-i386/signal.h
@@ -218,7 +218,6 @@ static __inline__ int sigfindinword(unsigned long word)
218} 218}
219 219
220struct pt_regs; 220struct pt_regs;
221extern int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
222 221
223#define ptrace_signal_deliver(regs, cookie) \ 222#define ptrace_signal_deliver(regs, cookie) \
224 do { \ 223 do { \
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 2493e77e8c3..e20e99551d7 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -140,6 +140,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
140#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 140#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
141#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 141#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
142#define TIF_SECCOMP 8 /* secure computing */ 142#define TIF_SECCOMP 8 /* secure computing */
143#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
143#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 144#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
144#define TIF_MEMDIE 17 145#define TIF_MEMDIE 17
145 146
@@ -152,6 +153,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
152#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) 153#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
153#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 154#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
154#define _TIF_SECCOMP (1<<TIF_SECCOMP) 155#define _TIF_SECCOMP (1<<TIF_SECCOMP)
156#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
155#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 157#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
156 158
157/* work to do on interrupt/exception return */ 159/* work to do on interrupt/exception return */
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 481c3c0ea72..597496ed2ae 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -300,8 +300,23 @@
300#define __NR_inotify_add_watch 292 300#define __NR_inotify_add_watch 292
301#define __NR_inotify_rm_watch 293 301#define __NR_inotify_rm_watch 293
302#define __NR_migrate_pages 294 302#define __NR_migrate_pages 294
303#define __NR_openat 295
304#define __NR_mkdirat 296
305#define __NR_mknodat 297
306#define __NR_fchownat 298
307#define __NR_futimesat 299
308#define __NR_newfstatat 300
309#define __NR_unlinkat 301
310#define __NR_renameat 302
311#define __NR_linkat 303
312#define __NR_symlinkat 304
313#define __NR_readlinkat 305
314#define __NR_fchmodat 306
315#define __NR_faccessat 307
316#define __NR_pselect6 308
317#define __NR_ppoll 309
303 318
304#define NR_syscalls 295 319#define NR_syscalls 310
305 320
306/* 321/*
307 * user-visible error numbers are in the range -1 - -128: see 322 * user-visible error numbers are in the range -1 - -128: see
@@ -417,6 +432,7 @@ __syscall_return(type,__res); \
417#define __ARCH_WANT_SYS_SIGPENDING 432#define __ARCH_WANT_SYS_SIGPENDING
418#define __ARCH_WANT_SYS_SIGPROCMASK 433#define __ARCH_WANT_SYS_SIGPROCMASK
419#define __ARCH_WANT_SYS_RT_SIGACTION 434#define __ARCH_WANT_SYS_RT_SIGACTION
435#define __ARCH_WANT_SYS_RT_SIGSUSPEND
420#endif 436#endif
421 437
422#ifdef __KERNEL_SYSCALLS__ 438#ifdef __KERNEL_SYSCALLS__
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index bb8906285fa..f483eeb95dd 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -61,7 +61,7 @@ static inline void
61down (struct semaphore *sem) 61down (struct semaphore *sem)
62{ 62{
63 might_sleep(); 63 might_sleep();
64 if (atomic_dec_return(&sem->count) < 0) 64 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
65 __down(sem); 65 __down(sem);
66} 66}
67 67
@@ -75,7 +75,7 @@ down_interruptible (struct semaphore * sem)
75 int ret = 0; 75 int ret = 0;
76 76
77 might_sleep(); 77 might_sleep();
78 if (atomic_dec_return(&sem->count) < 0) 78 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
79 ret = __down_interruptible(sem); 79 ret = __down_interruptible(sem);
80 return ret; 80 return ret;
81} 81}
@@ -85,7 +85,7 @@ down_trylock (struct semaphore *sem)
85{ 85{
86 int ret = 0; 86 int ret = 0;
87 87
88 if (atomic_dec_return(&sem->count) < 0) 88 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
89 ret = __down_trylock(sem); 89 ret = __down_trylock(sem);
90 return ret; 90 return ret;
91} 91}
@@ -93,7 +93,7 @@ down_trylock (struct semaphore *sem)
93static inline void 93static inline void
94up (struct semaphore * sem) 94up (struct semaphore * sem)
95{ 95{
96 if (atomic_inc_return(&sem->count) <= 0) 96 if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
97 __up(sem); 97 __up(sem);
98} 98}
99 99
diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h
index 203945ae034..9bd2f9bf329 100644
--- a/include/asm-ia64/sn/xp.h
+++ b/include/asm-ia64/sn/xp.h
@@ -18,6 +18,7 @@
18 18
19#include <linux/cache.h> 19#include <linux/cache.h>
20#include <linux/hardirq.h> 20#include <linux/hardirq.h>
21#include <linux/mutex.h>
21#include <asm/sn/types.h> 22#include <asm/sn/types.h>
22#include <asm/sn/bte.h> 23#include <asm/sn/bte.h>
23 24
@@ -359,7 +360,7 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
359 * the channel. 360 * the channel.
360 */ 361 */
361struct xpc_registration { 362struct xpc_registration {
362 struct semaphore sema; 363 struct mutex mutex;
363 xpc_channel_func func; /* function to call */ 364 xpc_channel_func func; /* function to call */
364 void *key; /* pointer to user's key */ 365 void *key; /* pointer to user's key */
365 u16 nentries; /* #of msg entries in local msg queue */ 366 u16 nentries; /* #of msg entries in local msg queue */
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index 87e9cd58851..0c36928ffd8 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -19,6 +19,8 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/sysctl.h> 20#include <linux/sysctl.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/mutex.h>
23#include <linux/completion.h>
22#include <asm/pgtable.h> 24#include <asm/pgtable.h>
23#include <asm/processor.h> 25#include <asm/processor.h>
24#include <asm/sn/bte.h> 26#include <asm/sn/bte.h>
@@ -335,8 +337,7 @@ struct xpc_openclose_args {
335 * and consumed by the intended recipient. 337 * and consumed by the intended recipient.
336 */ 338 */
337struct xpc_notify { 339struct xpc_notify {
338 struct semaphore sema; /* notify semaphore */ 340 volatile u8 type; /* type of notification */
339 volatile u8 type; /* type of notification */
340 341
341 /* the following two fields are only used if type == XPC_N_CALL */ 342 /* the following two fields are only used if type == XPC_N_CALL */
342 xpc_notify_func func; /* user's notify function */ 343 xpc_notify_func func; /* user's notify function */
@@ -465,8 +466,8 @@ struct xpc_channel {
465 xpc_channel_func func; /* user's channel function */ 466 xpc_channel_func func; /* user's channel function */
466 void *key; /* pointer to user's key */ 467 void *key; /* pointer to user's key */
467 468
468 struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ 469 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
469 struct semaphore wdisconnect_sema; /* wait for channel disconnect */ 470 struct completion wdisconnect_wait; /* wait for channel disconnect */
470 471
471 struct xpc_openclose_args *local_openclose_args; /* args passed on */ 472 struct xpc_openclose_args *local_openclose_args; /* args passed on */
472 /* opening or closing of channel */ 473 /* opening or closing of channel */
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index d8aae4da397..412ef8e493a 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -18,6 +18,10 @@
18#include <asm/smp.h> 18#include <asm/smp.h>
19 19
20#ifdef CONFIG_NUMA 20#ifdef CONFIG_NUMA
21
22/* Nodes w/o CPUs are preferred for memory allocations, see build_zonelists */
23#define PENALTY_FOR_NODE_WITH_CPUS 255
24
21/* 25/*
22 * Returns the number of the node containing CPU 'cpu' 26 * Returns the number of the node containing CPU 'cpu'
23 */ 27 */
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index 7e09d7cda93..67cdaf3ae9f 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -122,6 +122,7 @@ static inline struct thread_info *current_thread_info(void)
122#define TIF_RESTOREALL 12 /* Restore all regs (implies NOERROR) */ 122#define TIF_RESTOREALL 12 /* Restore all regs (implies NOERROR) */
123#define TIF_SAVE_NVGPRS 13 /* Save r14-r31 in signal frame */ 123#define TIF_SAVE_NVGPRS 13 /* Save r14-r31 in signal frame */
124#define TIF_NOERROR 14 /* Force successful syscall return */ 124#define TIF_NOERROR 14 /* Force successful syscall return */
125#define TIF_RESTORE_SIGMASK 15 /* Restore signal mask in do_signal */
125 126
126/* as above, but as bit values */ 127/* as above, but as bit values */
127#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 128#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -138,10 +139,12 @@ static inline struct thread_info *current_thread_info(void)
138#define _TIF_RESTOREALL (1<<TIF_RESTOREALL) 139#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
139#define _TIF_SAVE_NVGPRS (1<<TIF_SAVE_NVGPRS) 140#define _TIF_SAVE_NVGPRS (1<<TIF_SAVE_NVGPRS)
140#define _TIF_NOERROR (1<<TIF_NOERROR) 141#define _TIF_NOERROR (1<<TIF_NOERROR)
142#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
141#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) 143#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
142 144
143#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ 145#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
144 _TIF_NEED_RESCHED | _TIF_RESTOREALL) 146 _TIF_NEED_RESCHED | _TIF_RESTOREALL | \
147 _TIF_RESTORE_SIGMASK)
145#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR|_TIF_SAVE_NVGPRS) 148#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR|_TIF_SAVE_NVGPRS)
146 149
147#endif /* __KERNEL__ */ 150#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index 19eaac3fbbf..a40cdff21a8 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -298,8 +298,10 @@
298#define __NR_inotify_rm_watch 277 298#define __NR_inotify_rm_watch 277
299#define __NR_spu_run 278 299#define __NR_spu_run 278
300#define __NR_spu_create 279 300#define __NR_spu_create 279
301#define __NR_pselect6 280
302#define __NR_ppoll 281
301 303
302#define __NR_syscalls 280 304#define __NR_syscalls 282
303 305
304#ifdef __KERNEL__ 306#ifdef __KERNEL__
305#define __NR__exit __NR_exit 307#define __NR__exit __NR_exit
@@ -444,11 +446,13 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
444#define __ARCH_WANT_SYS_SIGPENDING 446#define __ARCH_WANT_SYS_SIGPENDING
445#define __ARCH_WANT_SYS_SIGPROCMASK 447#define __ARCH_WANT_SYS_SIGPROCMASK
446#define __ARCH_WANT_SYS_RT_SIGACTION 448#define __ARCH_WANT_SYS_RT_SIGACTION
449#define __ARCH_WANT_SYS_RT_SIGSUSPEND
447#ifdef CONFIG_PPC32 450#ifdef CONFIG_PPC32
448#define __ARCH_WANT_OLD_STAT 451#define __ARCH_WANT_OLD_STAT
449#endif 452#endif
450#ifdef CONFIG_PPC64 453#ifdef CONFIG_PPC64
451#define __ARCH_WANT_COMPAT_SYS_TIME 454#define __ARCH_WANT_COMPAT_SYS_TIME
455#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
452#endif 456#endif
453 457
454/* 458/*
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h
index 95944556d8b..d0d76b30eb4 100644
--- a/include/asm-sparc/oplib.h
+++ b/include/asm-sparc/oplib.h
@@ -164,6 +164,7 @@ enum prom_input_device {
164 PROMDEV_IKBD, /* input from keyboard */ 164 PROMDEV_IKBD, /* input from keyboard */
165 PROMDEV_ITTYA, /* input from ttya */ 165 PROMDEV_ITTYA, /* input from ttya */
166 PROMDEV_ITTYB, /* input from ttyb */ 166 PROMDEV_ITTYB, /* input from ttyb */
167 PROMDEV_IRSC, /* input from rsc */
167 PROMDEV_I_UNK, 168 PROMDEV_I_UNK,
168}; 169};
169 170
@@ -175,6 +176,7 @@ enum prom_output_device {
175 PROMDEV_OSCREEN, /* to screen */ 176 PROMDEV_OSCREEN, /* to screen */
176 PROMDEV_OTTYA, /* to ttya */ 177 PROMDEV_OTTYA, /* to ttya */
177 PROMDEV_OTTYB, /* to ttyb */ 178 PROMDEV_OTTYB, /* to ttyb */
179 PROMDEV_ORSC, /* to rsc */
178 PROMDEV_O_UNK, 180 PROMDEV_O_UNK,
179}; 181};
180 182
diff --git a/include/asm-sparc/thread_info.h b/include/asm-sparc/thread_info.h
index 65f060b040a..91b9f5888c8 100644
--- a/include/asm-sparc/thread_info.h
+++ b/include/asm-sparc/thread_info.h
@@ -128,9 +128,10 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
128 * thread information flag bit numbers 128 * thread information flag bit numbers
129 */ 129 */
130#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 130#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
131#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ 131/* flag bit 1 is available */
132#define TIF_SIGPENDING 2 /* signal pending */ 132#define TIF_SIGPENDING 2 /* signal pending */
133#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 133#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
134#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
134#define TIF_USEDFPU 8 /* FPU was used by this task 135#define TIF_USEDFPU 8 /* FPU was used by this task
135 * this quantum (SMP) */ 136 * this quantum (SMP) */
136#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling 137#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
@@ -139,9 +140,9 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
139 140
140/* as above, but as bit values */ 141/* as above, but as bit values */
141#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 142#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
142#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
143#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 143#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
144#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 144#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
145#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
145#define _TIF_USEDFPU (1<<TIF_USEDFPU) 146#define _TIF_USEDFPU (1<<TIF_USEDFPU)
146#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 147#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
147 148
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index 58dba518239..2ac64e65e33 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -300,11 +300,26 @@
300#define __NR_add_key 281 300#define __NR_add_key 281
301#define __NR_request_key 282 301#define __NR_request_key 282
302#define __NR_keyctl 283 302#define __NR_keyctl 283
303#define __NR_openat 284
304#define __NR_mkdirat 285
305#define __NR_mknodat 286
306#define __NR_fchownat 287
307#define __NR_futimesat 288
308#define __NR_newfstatat 289
309#define __NR_unlinkat 290
310#define __NR_renameat 291
311#define __NR_linkat 292
312#define __NR_symlinkat 293
313#define __NR_readlinkat 294
314#define __NR_fchmodat 295
315#define __NR_faccessat 296
316#define __NR_pselect6 297
317#define __NR_ppoll 298
303 318
304/* WARNING: You MAY NOT add syscall numbers larger than 283, since 319/* WARNING: You MAY NOT add syscall numbers larger than 298, since
305 * all of the syscall tables in the Sparc kernel are 320 * all of the syscall tables in the Sparc kernel are
306 * sized to have 283 entries (starting at zero). Therefore 321 * sized to have 298 entries (starting at zero). Therefore
307 * find a free slot in the 0-282 range. 322 * find a free slot in the 0-298 range.
308 */ 323 */
309 324
310#define _syscall0(type,name) \ 325#define _syscall0(type,name) \
@@ -458,6 +473,7 @@ return -1; \
458#define __ARCH_WANT_SYS_OLDUMOUNT 473#define __ARCH_WANT_SYS_OLDUMOUNT
459#define __ARCH_WANT_SYS_SIGPENDING 474#define __ARCH_WANT_SYS_SIGPENDING
460#define __ARCH_WANT_SYS_SIGPROCMASK 475#define __ARCH_WANT_SYS_SIGPROCMASK
476#define __ARCH_WANT_SYS_RT_SIGSUSPEND
461#endif 477#endif
462 478
463#ifdef __KERNEL_SYSCALLS__ 479#ifdef __KERNEL_SYSCALLS__
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index d02f1e8ae1a..3c59b2693fb 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -163,6 +163,7 @@ enum prom_input_device {
163 PROMDEV_IKBD, /* input from keyboard */ 163 PROMDEV_IKBD, /* input from keyboard */
164 PROMDEV_ITTYA, /* input from ttya */ 164 PROMDEV_ITTYA, /* input from ttya */
165 PROMDEV_ITTYB, /* input from ttyb */ 165 PROMDEV_ITTYB, /* input from ttyb */
166 PROMDEV_IRSC, /* input from rsc */
166 PROMDEV_I_UNK, 167 PROMDEV_I_UNK,
167}; 168};
168 169
@@ -174,6 +175,7 @@ enum prom_output_device {
174 PROMDEV_OSCREEN, /* to screen */ 175 PROMDEV_OSCREEN, /* to screen */
175 PROMDEV_OTTYA, /* to ttya */ 176 PROMDEV_OTTYA, /* to ttya */
176 PROMDEV_OTTYB, /* to ttyb */ 177 PROMDEV_OTTYB, /* to ttyb */
178 PROMDEV_ORSC, /* to rsc */
177 PROMDEV_O_UNK, 179 PROMDEV_O_UNK,
178}; 180};
179 181
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index c94d8b3991b..ac9d068aab4 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -221,7 +221,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
221 * nop 221 * nop
222 */ 222 */
223#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 223#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
224#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ 224#define TIF_RESTORE_SIGMASK 1 /* restore signal mask in do_signal() */
225#define TIF_SIGPENDING 2 /* signal pending */ 225#define TIF_SIGPENDING 2 /* signal pending */
226#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 226#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
227#define TIF_PERFCTR 4 /* performance counters active */ 227#define TIF_PERFCTR 4 /* performance counters active */
@@ -241,7 +241,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
241#define TIF_POLLING_NRFLAG 14 241#define TIF_POLLING_NRFLAG 14
242 242
243#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 243#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
244#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
245#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 244#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
246#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 245#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
247#define _TIF_PERFCTR (1<<TIF_PERFCTR) 246#define _TIF_PERFCTR (1<<TIF_PERFCTR)
@@ -250,11 +249,12 @@ register struct thread_info *current_thread_info_reg asm("g6");
250#define _TIF_32BIT (1<<TIF_32BIT) 249#define _TIF_32BIT (1<<TIF_32BIT)
251#define _TIF_SECCOMP (1<<TIF_SECCOMP) 250#define _TIF_SECCOMP (1<<TIF_SECCOMP)
252#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 251#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
252#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
253#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 253#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
254#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 254#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
255 255
256#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ 256#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
257 (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ 257 (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | \
258 _TIF_NEED_RESCHED | _TIF_PERFCTR)) 258 _TIF_NEED_RESCHED | _TIF_PERFCTR))
259 259
260#endif /* __KERNEL__ */ 260#endif /* __KERNEL__ */
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index 51ec2879b88..84ac2bdb090 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -302,11 +302,26 @@
302#define __NR_add_key 281 302#define __NR_add_key 281
303#define __NR_request_key 282 303#define __NR_request_key 282
304#define __NR_keyctl 283 304#define __NR_keyctl 283
305#define __NR_openat 284
306#define __NR_mkdirat 285
307#define __NR_mknodat 286
308#define __NR_fchownat 287
309#define __NR_futimesat 288
310#define __NR_newfstatat 289
311#define __NR_unlinkat 290
312#define __NR_renameat 291
313#define __NR_linkat 292
314#define __NR_symlinkat 293
315#define __NR_readlinkat 294
316#define __NR_fchmodat 295
317#define __NR_faccessat 296
318#define __NR_pselect6 297
319#define __NR_ppoll 298
305 320
306/* WARNING: You MAY NOT add syscall numbers larger than 283, since 321/* WARNING: You MAY NOT add syscall numbers larger than 298, since
307 * all of the syscall tables in the Sparc kernel are 322 * all of the syscall tables in the Sparc kernel are
308 * sized to have 283 entries (starting at zero). Therefore 323 * sized to have 298 entries (starting at zero). Therefore
309 * find a free slot in the 0-282 range. 324 * find a free slot in the 0-298 range.
310 */ 325 */
311 326
312#define _syscall0(type,name) \ 327#define _syscall0(type,name) \
@@ -501,6 +516,8 @@ asmlinkage long sys_rt_sigaction(int sig,
501#define __ARCH_WANT_SYS_OLDUMOUNT 516#define __ARCH_WANT_SYS_OLDUMOUNT
502#define __ARCH_WANT_SYS_SIGPENDING 517#define __ARCH_WANT_SYS_SIGPENDING
503#define __ARCH_WANT_SYS_SIGPROCMASK 518#define __ARCH_WANT_SYS_SIGPROCMASK
519#define __ARCH_WANT_SYS_RT_SIGSUSPEND
520#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
504#endif 521#endif
505 522
506/* 523/*
diff --git a/include/asm-um/io.h b/include/asm-um/io.h
index 90674056dce..1934d9340e2 100644
--- a/include/asm-um/io.h
+++ b/include/asm-um/io.h
@@ -33,4 +33,20 @@ static inline void * phys_to_virt(unsigned long address)
33 */ 33 */
34#define xlate_dev_kmem_ptr(p) p 34#define xlate_dev_kmem_ptr(p) p
35 35
36static inline void writeb(unsigned char b, volatile void __iomem *addr)
37{
38 *(volatile unsigned char __force *) addr = b;
39}
40static inline void writew(unsigned short b, volatile void __iomem *addr)
41{
42 *(volatile unsigned short __force *) addr = b;
43}
44static inline void writel(unsigned int b, volatile void __iomem *addr)
45{
46 *(volatile unsigned int __force *) addr = b;
47}
48#define __raw_writeb writeb
49#define __raw_writew writew
50#define __raw_writel writel
51
36#endif 52#endif
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 705c71972c3..17b6b07c433 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -69,6 +69,7 @@ static inline struct thread_info *current_thread_info(void)
69#define TIF_RESTART_BLOCK 4 69#define TIF_RESTART_BLOCK 4
70#define TIF_MEMDIE 5 70#define TIF_MEMDIE 5
71#define TIF_SYSCALL_AUDIT 6 71#define TIF_SYSCALL_AUDIT 6
72#define TIF_RESTORE_SIGMASK 7
72 73
73#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 74#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
74#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 75#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -76,16 +77,6 @@ static inline struct thread_info *current_thread_info(void)
76#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 77#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
77#define _TIF_MEMDIE (1 << TIF_MEMDIE) 78#define _TIF_MEMDIE (1 << TIF_MEMDIE)
78#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 79#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
80#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
79 81
80#endif 82#endif
81
82/*
83 * Overrides for Emacs so that we follow Linus's tabbing style.
84 * Emacs will notice this stuff at the end of the file and automatically
85 * adjust the settings for this buffer only. This must remain at the end
86 * of the file.
87 * ---------------------------------------------------------------------------
88 * Local variables:
89 * c-file-style: "linux"
90 * End:
91 */
diff --git a/include/asm-um/unistd.h b/include/asm-um/unistd.h
index 6fdde45cc05..afccfcaa9ea 100644
--- a/include/asm-um/unistd.h
+++ b/include/asm-um/unistd.h
@@ -34,6 +34,7 @@ extern int um_execve(const char *file, char *const argv[], char *const env[]);
34#define __ARCH_WANT_SYS_SIGPENDING 34#define __ARCH_WANT_SYS_SIGPENDING
35#define __ARCH_WANT_SYS_SIGPROCMASK 35#define __ARCH_WANT_SYS_SIGPROCMASK
36#define __ARCH_WANT_SYS_RT_SIGACTION 36#define __ARCH_WANT_SYS_RT_SIGACTION
37#define __ARCH_WANT_SYS_RT_SIGSUSPEND
37#endif 38#endif
38 39
39#ifdef __KERNEL_SYSCALLS__ 40#ifdef __KERNEL_SYSCALLS__
diff --git a/include/asm-x86_64/edac.h b/include/asm-x86_64/edac.h
new file mode 100644
index 00000000000..cad1cd42b4e
--- /dev/null
+++ b/include/asm-x86_64/edac.h
@@ -0,0 +1,18 @@
1#ifndef ASM_EDAC_H
2#define ASM_EDAC_H
3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5
6static __inline__ void atomic_scrub(void *va, u32 size)
7{
8 unsigned int *virt_addr = va;
9 u32 i;
10
11 for (i = 0; i < size / 4; i++, virt_addr++)
12 /* Very carefully read and write to memory atomically
13 * so we are interrupt, DMA and SMP safe.
14 */
15 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
16}
17
18#endif
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
index e8843362a6c..e87cd83a0e8 100644
--- a/include/asm-x86_64/ia32_unistd.h
+++ b/include/asm-x86_64/ia32_unistd.h
@@ -300,7 +300,20 @@
300#define __NR_ia32_inotify_add_watch 292 300#define __NR_ia32_inotify_add_watch 292
301#define __NR_ia32_inotify_rm_watch 293 301#define __NR_ia32_inotify_rm_watch 293
302#define __NR_ia32_migrate_pages 294 302#define __NR_ia32_migrate_pages 294
303#define __NR_ia32_opanat 295
304#define __NR_ia32_mkdirat 296
305#define __NR_ia32_mknodat 297
306#define __NR_ia32_fchownat 298
307#define __NR_ia32_futimesat 299
308#define __NR_ia32_newfstatat 300
309#define __NR_ia32_unlinkat 301
310#define __NR_ia32_renameat 302
311#define __NR_ia32_linkat 303
312#define __NR_ia32_symlinkat 304
313#define __NR_ia32_readlinkat 305
314#define __NR_ia32_fchmodat 306
315#define __NR_ia32_faccessat 307
303 316
304#define IA32_NR_syscalls 295 /* must be > than biggest syscall! */ 317#define IA32_NR_syscalls 308 /* must be > than biggest syscall! */
305 318
306#endif /* _ASM_X86_64_IA32_UNISTD_H_ */ 319#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index e6f896161c1..436d099b5b6 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -573,8 +573,35 @@ __SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
573__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) 573__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
574#define __NR_migrate_pages 256 574#define __NR_migrate_pages 256
575__SYSCALL(__NR_migrate_pages, sys_migrate_pages) 575__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
576#define __NR_openat 257
577__SYSCALL(__NR_openat, sys_openat)
578#define __NR_mkdirat 258
579__SYSCALL(__NR_mkdirat, sys_mkdirat)
580#define __NR_mknodat 259
581__SYSCALL(__NR_mknodat, sys_mknodat)
582#define __NR_fchownat 260
583__SYSCALL(__NR_fchownat, sys_fchownat)
584#define __NR_futimesat 261
585__SYSCALL(__NR_futimesat, sys_futimesat)
586#define __NR_newfstatat 262
587__SYSCALL(__NR_newfstatat, sys_newfstatat)
588#define __NR_unlinkat 263
589__SYSCALL(__NR_unlinkat, sys_unlinkat)
590#define __NR_renameat 264
591__SYSCALL(__NR_renameat, sys_renameat)
592#define __NR_linkat 265
593__SYSCALL(__NR_linkat, sys_linkat)
594#define __NR_symlinkat 266
595__SYSCALL(__NR_symlinkat, sys_symlinkat)
596#define __NR_readlinkat 267
597__SYSCALL(__NR_readlinkat, sys_readlinkat)
598#define __NR_fchmodat 268
599__SYSCALL(__NR_fchmodat, sys_fchmodat)
600#define __NR_faccessat 269
601__SYSCALL(__NR_faccessat, sys_faccessat)
602
603#define __NR_syscall_max __NR_faccessat
576 604
577#define __NR_syscall_max __NR_migrate_pages
578#ifndef __NO_STUBS 605#ifndef __NO_STUBS
579 606
580/* user-visible error numbers are in the range -1 - -4095 */ 607/* user-visible error numbers are in the range -1 - -4095 */
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 8a7c82151de..c52a63755fd 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -23,6 +23,13 @@
23#define DN_ATTRIB 0x00000020 /* File changed attibutes */ 23#define DN_ATTRIB 0x00000020 /* File changed attibutes */
24#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */ 24#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
25 25
26#define AT_FDCWD -100 /* Special value used to indicate
27 openat should use the current
28 working directory. */
29#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
30#define AT_REMOVEDIR 0x200 /* Remove directory instead of
31 unlinking file. */
32
26#ifdef __KERNEL__ 33#ifdef __KERNEL__
27 34
28#ifndef force_o_largefile 35#ifndef force_o_largefile
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 093638cb5f0..ec09d5ed9aa 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1343,7 +1343,8 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1343 1343
1344extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, 1344extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
1345 struct file *filp); 1345 struct file *filp);
1346extern long do_sys_open(const char __user *filename, int flags, int mode); 1346extern long do_sys_open(int fdf, const char __user *filename, int flags,
1347 int mode);
1347extern struct file *filp_open(const char *, int, int); 1348extern struct file *filp_open(const char *, int, int);
1348extern struct file * dentry_open(struct dentry *, struct vfsmount *, int); 1349extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
1349extern int filp_close(struct file *, fl_owner_t id); 1350extern int filp_close(struct file *, fl_owner_t id);
@@ -1482,7 +1483,7 @@ static inline void allow_write_access(struct file *file)
1482} 1483}
1483extern int do_pipe(int *); 1484extern int do_pipe(int *);
1484 1485
1485extern int open_namei(const char *, int, int, struct nameidata *); 1486extern int open_namei(int dfd, const char *, int, int, struct nameidata *);
1486extern int may_open(struct nameidata *, int, int); 1487extern int may_open(struct nameidata *, int, int);
1487 1488
1488extern int kernel_read(struct file *, unsigned long, char *, unsigned long); 1489extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
@@ -1680,6 +1681,8 @@ extern int vfs_readdir(struct file *, filldir_t, void *);
1680 1681
1681extern int vfs_stat(char __user *, struct kstat *); 1682extern int vfs_stat(char __user *, struct kstat *);
1682extern int vfs_lstat(char __user *, struct kstat *); 1683extern int vfs_lstat(char __user *, struct kstat *);
1684extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
1685extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
1683extern int vfs_fstat(unsigned int, struct kstat *); 1686extern int vfs_fstat(unsigned int, struct kstat *);
1684 1687
1685extern int vfs_ioctl(struct file *, unsigned int, unsigned int, unsigned long); 1688extern int vfs_ioctl(struct file *, unsigned int, unsigned int, unsigned long);
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index fe26d431de8..7a92c1ce145 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -72,6 +72,7 @@
72 * over Ethernet 72 * over Ethernet
73 */ 73 */
74#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ 74#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
75#define ETH_P_TIPC 0x88CA /* TIPC */
75 76
76/* 77/*
77 * Non DIX types. Won't clash for 1500 types. 78 * Non DIX types. Won't clash for 1500 types.
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index d6a53ed6ab6..bbd2221923c 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -159,6 +159,7 @@ extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
159extern struct mempolicy default_policy; 159extern struct mempolicy default_policy;
160extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, 160extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
161 unsigned long addr); 161 unsigned long addr);
162extern unsigned slab_node(struct mempolicy *policy);
162 163
163extern int policy_zone; 164extern int policy_zone;
164 165
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 49cc68af01f..8ac854f7f19 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -39,24 +39,3 @@ del_page_from_lru(struct zone *zone, struct page *page)
39 } 39 }
40} 40}
41 41
42/*
43 * Isolate one page from the LRU lists.
44 *
45 * - zone->lru_lock must be held
46 */
47static inline int __isolate_lru_page(struct page *page)
48{
49 if (unlikely(!TestClearPageLRU(page)))
50 return 0;
51
52 if (get_page_testone(page)) {
53 /*
54 * It is being freed elsewhere
55 */
56 __put_page(page);
57 SetPageLRU(page);
58 return -ENOENT;
59 }
60
61 return 1;
62}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 34cbefd2ebd..93a849f742d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -149,15 +149,17 @@ struct zone {
149 unsigned long pages_scanned; /* since last reclaim */ 149 unsigned long pages_scanned; /* since last reclaim */
150 int all_unreclaimable; /* All pages pinned */ 150 int all_unreclaimable; /* All pages pinned */
151 151
152 /*
153 * Does the allocator try to reclaim pages from the zone as soon
154 * as it fails a watermark_ok() in __alloc_pages?
155 */
156 int reclaim_pages;
157 /* A count of how many reclaimers are scanning this zone */ 152 /* A count of how many reclaimers are scanning this zone */
158 atomic_t reclaim_in_progress; 153 atomic_t reclaim_in_progress;
159 154
160 /* 155 /*
156 * timestamp (in jiffies) of the last zone reclaim that did not
157 * result in freeing of pages. This is used to avoid repeated scans
158 * if all memory in the zone is in use.
159 */
160 unsigned long last_unsuccessful_zone_reclaim;
161
162 /*
161 * prev_priority holds the scanning priority for this zone. It is 163 * prev_priority holds the scanning priority for this zone. It is
162 * defined as the scanning priority at which we achieved our reclaim 164 * defined as the scanning priority at which we achieved our reclaim
163 * target at the previous try_to_free_pages() or balance_pgdat() 165 * target at the previous try_to_free_pages() or balance_pgdat()
diff --git a/include/linux/namei.h b/include/linux/namei.h
index b699e427c00..e6698013e4d 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -56,10 +56,11 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
56#define LOOKUP_ACCESS (0x0400) 56#define LOOKUP_ACCESS (0x0400)
57 57
58extern int FASTCALL(__user_walk(const char __user *, unsigned, struct nameidata *)); 58extern int FASTCALL(__user_walk(const char __user *, unsigned, struct nameidata *));
59extern int FASTCALL(__user_walk_fd(int dfd, const char __user *, unsigned, struct nameidata *));
59#define user_path_walk(name,nd) \ 60#define user_path_walk(name,nd) \
60 __user_walk(name, LOOKUP_FOLLOW, nd) 61 __user_walk_fd(AT_FDCWD, name, LOOKUP_FOLLOW, nd)
61#define user_path_walk_link(name,nd) \ 62#define user_path_walk_link(name,nd) \
62 __user_walk(name, 0, nd) 63 __user_walk_fd(AT_FDCWD, name, 0, nd)
63extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *)); 64extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *));
64extern int FASTCALL(path_walk(const char *, struct nameidata *)); 65extern int FASTCALL(path_walk(const char *, struct nameidata *));
65extern int FASTCALL(link_path_walk(const char *, struct nameidata *)); 66extern int FASTCALL(link_path_walk(const char *, struct nameidata *));
@@ -67,7 +68,7 @@ extern void path_release(struct nameidata *);
67extern void path_release_on_umount(struct nameidata *); 68extern void path_release_on_umount(struct nameidata *);
68 69
69extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags); 70extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags);
70extern int path_lookup_open(const char *, unsigned lookup_flags, struct nameidata *, int open_flags); 71extern int path_lookup_open(int dfd, const char *name, unsigned lookup_flags, struct nameidata *, int open_flags);
71extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, 72extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
72 int (*open)(struct inode *, struct file *)); 73 int (*open)(struct inode *, struct file *));
73extern struct file *nameidata_to_filp(struct nameidata *nd, int flags); 74extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 472f0483480..59ff6c430cf 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -19,7 +19,7 @@ struct xt_get_revision
19/* For standard target */ 19/* For standard target */
20#define XT_RETURN (-NF_REPEAT - 1) 20#define XT_RETURN (-NF_REPEAT - 1)
21 21
22#define XT_ALIGN(s) (((s) + (__alignof__(void *)-1)) & ~(__alignof__(void *)-1)) 22#define XT_ALIGN(s) (((s) + (__alignof__(u_int64_t)-1)) & ~(__alignof__(u_int64_t)-1))
23 23
24/* Standard return verdict, or do jump. */ 24/* Standard return verdict, or do jump. */
25#define XT_STANDARD_TARGET "" 25#define XT_STANDARD_TARGET ""
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index 51c231a1e5a..ec7c2e872d7 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -124,7 +124,7 @@ int nfsd_statfs(struct svc_rqst *, struct svc_fh *,
124 124
125int nfsd_notify_change(struct inode *, struct iattr *); 125int nfsd_notify_change(struct inode *, struct iattr *);
126int nfsd_permission(struct svc_export *, struct dentry *, int); 126int nfsd_permission(struct svc_export *, struct dentry *, int);
127void nfsd_sync_dir(struct dentry *dp); 127int nfsd_sync_dir(struct dentry *dp);
128 128
129#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 129#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
130#ifdef CONFIG_NFSD_V2_ACL 130#ifdef CONFIG_NFSD_V2_ACL
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 8903688890c..77adba7d228 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -145,8 +145,9 @@ struct nfsd4_lock {
145 } ok; 145 } ok;
146 struct nfsd4_lock_denied denied; 146 struct nfsd4_lock_denied denied;
147 } u; 147 } u;
148 148 /* The lk_replay_owner is the open owner in the open_to_lock_owner
149 struct nfs4_stateowner *lk_stateowner; 149 * case and the lock owner otherwise: */
150 struct nfs4_stateowner *lk_replay_owner;
150}; 151};
151#define lk_new_open_seqid v.new.open_seqid 152#define lk_new_open_seqid v.new.open_seqid
152#define lk_new_open_stateid v.new.open_stateid 153#define lk_new_open_stateid v.new.open_stateid
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 5403257ae3e..ecc1fc1f0f0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1992,6 +1992,7 @@
1992#define PCI_VENDOR_ID_DCI 0x6666 1992#define PCI_VENDOR_ID_DCI 0x6666
1993#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001 1993#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001
1994#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002 1994#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002
1995#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004
1995 1996
1996#define PCI_VENDOR_ID_INTEL 0x8086 1997#define PCI_VENDOR_ID_INTEL 0x8086
1997#define PCI_DEVICE_ID_INTEL_EESSC 0x0008 1998#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
diff --git a/include/linux/poll.h b/include/linux/poll.h
index f6da702088f..8e8f6098508 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -92,7 +92,11 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset)
92 memset(fdset, 0, FDS_BYTES(nr)); 92 memset(fdset, 0, FDS_BYTES(nr));
93} 93}
94 94
95extern int do_select(int n, fd_set_bits *fds, long *timeout); 95#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
96
97extern int do_select(int n, fd_set_bits *fds, s64 *timeout);
98extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
99 s64 *timeout);
96 100
97#endif /* KERNEL */ 101#endif /* KERNEL */
98 102
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2df1a1a2fee..0cfcd1c7865 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -809,6 +809,7 @@ struct task_struct {
809 struct sighand_struct *sighand; 809 struct sighand_struct *sighand;
810 810
811 sigset_t blocked, real_blocked; 811 sigset_t blocked, real_blocked;
812 sigset_t saved_sigmask; /* To be restored with TIF_RESTORE_SIGMASK */
812 struct sigpending pending; 813 struct sigpending pending;
813 814
814 unsigned long sas_ss_sp; 815 unsigned long sas_ss_sp;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index e4086ec8b95..50cab2a09f2 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -246,6 +246,7 @@ struct svc_deferred_req {
246 u32 prot; /* protocol (UDP or TCP) */ 246 u32 prot; /* protocol (UDP or TCP) */
247 struct sockaddr_in addr; 247 struct sockaddr_in addr;
248 struct svc_sock *svsk; /* where reply must go */ 248 struct svc_sock *svsk; /* where reply must go */
249 u32 daddr; /* where reply must come from */
249 struct cache_deferred_req handle; 250 struct cache_deferred_req handle;
250 int argslen; 251 int argslen;
251 u32 args[0]; 252 u32 args[0];
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e92054d6530..4a99e4a7fbf 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -167,6 +167,7 @@ extern void FASTCALL(lru_cache_add_active(struct page *));
167extern void FASTCALL(activate_page(struct page *)); 167extern void FASTCALL(activate_page(struct page *));
168extern void FASTCALL(mark_page_accessed(struct page *)); 168extern void FASTCALL(mark_page_accessed(struct page *));
169extern void lru_add_drain(void); 169extern void lru_add_drain(void);
170extern int lru_add_drain_all(void);
170extern int rotate_reclaimable_page(struct page *page); 171extern int rotate_reclaimable_page(struct page *page);
171extern void swap_setup(void); 172extern void swap_setup(void);
172 173
@@ -175,6 +176,17 @@ extern int try_to_free_pages(struct zone **, gfp_t);
175extern int shrink_all_memory(int); 176extern int shrink_all_memory(int);
176extern int vm_swappiness; 177extern int vm_swappiness;
177 178
179#ifdef CONFIG_NUMA
180extern int zone_reclaim_mode;
181extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
182#else
183#define zone_reclaim_mode 0
184static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
185{
186 return 0;
187}
188#endif
189
178#ifdef CONFIG_MIGRATION 190#ifdef CONFIG_MIGRATION
179extern int isolate_lru_page(struct page *p); 191extern int isolate_lru_page(struct page *p);
180extern int putback_lru_pages(struct list_head *l); 192extern int putback_lru_pages(struct list_head *l);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 3eed4734701..e666d607056 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -510,9 +510,24 @@ asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
510asmlinkage long sys_ioprio_set(int which, int who, int ioprio); 510asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
511asmlinkage long sys_ioprio_get(int which, int who); 511asmlinkage long sys_ioprio_get(int which, int who);
512asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, 512asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
513 unsigned long maxnode); 513 unsigned long maxnode);
514asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, 514asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
515 const unsigned long __user *from, const unsigned long __user *to); 515 const unsigned long __user *from,
516 const unsigned long __user *to);
517asmlinkage long sys_mbind(unsigned long start, unsigned long len,
518 unsigned long mode,
519 unsigned long __user *nmask,
520 unsigned long maxnode,
521 unsigned flags);
522asmlinkage long sys_get_mempolicy(int __user *policy,
523 unsigned long __user *nmask,
524 unsigned long maxnode,
525 unsigned long addr, unsigned long flags);
526
527asmlinkage long sys_inotify_init(void);
528asmlinkage long sys_inotify_add_watch(int fd, const char __user *path,
529 u32 mask);
530asmlinkage long sys_inotify_rm_watch(int fd, u32 wd);
516 531
517asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, 532asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
518 __u32 __user *ustatus); 533 __u32 __user *ustatus);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 7f472127b7b..8352a7ce589 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -182,6 +182,7 @@ enum
182 VM_SWAP_TOKEN_TIMEOUT=28, /* default time for token time out */ 182 VM_SWAP_TOKEN_TIMEOUT=28, /* default time for token time out */
183 VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */ 183 VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */
184 VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */ 184 VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
185 VM_ZONE_RECLAIM_MODE=31,/* reclaim local zone memory before going off node */
185}; 186};
186 187
187 188
diff --git a/include/linux/time.h b/include/linux/time.h
index f2aca7ec632..614dd846583 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -74,7 +74,7 @@ extern void do_gettimeofday(struct timeval *tv);
74extern int do_settimeofday(struct timespec *tv); 74extern int do_settimeofday(struct timespec *tv);
75extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); 75extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
76#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) 76#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
77extern long do_utimes(char __user *filename, struct timeval *times); 77extern long do_utimes(int dfd, char __user *filename, struct timeval *times);
78struct itimerval; 78struct itimerval;
79extern int do_setitimer(int which, struct itimerval *value, 79extern int do_setitimer(int which, struct itimerval *value,
80 struct itimerval *ovalue); 80 struct itimerval *ovalue);
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
index a52c8c64a5a..33a653913d9 100644
--- a/include/linux/tipc_config.h
+++ b/include/linux/tipc_config.h
@@ -168,10 +168,13 @@
168#define TIPC_MAX_LINK_NAME 60 /* format = Z.C.N:interface-Z.C.N:interface */ 168#define TIPC_MAX_LINK_NAME 60 /* format = Z.C.N:interface-Z.C.N:interface */
169 169
170/* 170/*
171 * Link priority limits (range from 0 to # priorities - 1) 171 * Link priority limits (min, default, max, media default)
172 */ 172 */
173 173
174#define TIPC_NUM_LINK_PRI 32 174#define TIPC_MIN_LINK_PRI 0
175#define TIPC_DEF_LINK_PRI 10
176#define TIPC_MAX_LINK_PRI 31
177#define TIPC_MEDIA_LINK_PRI (TIPC_MAX_LINK_PRI + 1)
175 178
176/* 179/*
177 * Link tolerance limits (min, default, max), in ms 180 * Link tolerance limits (min, default, max), in ms
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 315a5163d6a..e8eb0040ce3 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -56,6 +56,14 @@
56#define REMOTE_DISTANCE 20 56#define REMOTE_DISTANCE 20
57#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE) 57#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
58#endif 58#endif
59#ifndef RECLAIM_DISTANCE
60/*
61 * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
62 * (in whatever arch specific measurement units returned by node_distance())
63 * then switch on zone reclaim on boot.
64 */
65#define RECLAIM_DISTANCE 20
66#endif
59#ifndef PENALTY_FOR_NODE_WITH_CPUS 67#ifndef PENALTY_FOR_NODE_WITH_CPUS
60#define PENALTY_FOR_NODE_WITH_CPUS (1) 68#define PENALTY_FOR_NODE_WITH_CPUS (1)
61#endif 69#endif
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a553f39f6ae..e673b2c984e 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -175,6 +175,8 @@ void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
175void sctp_icmp_proto_unreachable(struct sock *sk, 175void sctp_icmp_proto_unreachable(struct sock *sk,
176 struct sctp_association *asoc, 176 struct sctp_association *asoc,
177 struct sctp_transport *t); 177 struct sctp_transport *t);
178void sctp_backlog_migrate(struct sctp_association *assoc,
179 struct sock *oldsk, struct sock *newsk);
178 180
179/* 181/*
180 * Section: Macros, externs, and inlines 182 * Section: Macros, externs, and inlines
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f5c22d77fea..8c522ae031b 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -127,9 +127,9 @@ extern struct sctp_globals {
127 * RTO.Alpha - 1/8 (3 when converted to right shifts.) 127 * RTO.Alpha - 1/8 (3 when converted to right shifts.)
128 * RTO.Beta - 1/4 (2 when converted to right shifts.) 128 * RTO.Beta - 1/4 (2 when converted to right shifts.)
129 */ 129 */
130 __u32 rto_initial; 130 unsigned long rto_initial;
131 __u32 rto_min; 131 unsigned long rto_min;
132 __u32 rto_max; 132 unsigned long rto_max;
133 133
134 /* Note: rto_alpha and rto_beta are really defined as inverse 134 /* Note: rto_alpha and rto_beta are really defined as inverse
135 * powers of two to facilitate integer operations. 135 * powers of two to facilitate integer operations.
@@ -140,12 +140,18 @@ extern struct sctp_globals {
140 /* Max.Burst - 4 */ 140 /* Max.Burst - 4 */
141 int max_burst; 141 int max_burst;
142 142
143 /* Valid.Cookie.Life - 60 seconds */
144 int valid_cookie_life;
145
146 /* Whether Cookie Preservative is enabled(1) or not(0) */ 143 /* Whether Cookie Preservative is enabled(1) or not(0) */
147 int cookie_preserve_enable; 144 int cookie_preserve_enable;
148 145
146 /* Valid.Cookie.Life - 60 seconds */
147 unsigned long valid_cookie_life;
148
149 /* Delayed SACK timeout 200ms default*/
150 unsigned long sack_timeout;
151
152 /* HB.interval - 30 seconds */
153 unsigned long hb_interval;
154
149 /* Association.Max.Retrans - 10 attempts 155 /* Association.Max.Retrans - 10 attempts
150 * Path.Max.Retrans - 5 attempts (per destination address) 156 * Path.Max.Retrans - 5 attempts (per destination address)
151 * Max.Init.Retransmits - 8 attempts 157 * Max.Init.Retransmits - 8 attempts
@@ -168,12 +174,6 @@ extern struct sctp_globals {
168 */ 174 */
169 int rcvbuf_policy; 175 int rcvbuf_policy;
170 176
171 /* Delayed SACK timeout 200ms default*/
172 int sack_timeout;
173
174 /* HB.interval - 30 seconds */
175 int hb_interval;
176
177 /* The following variables are implementation specific. */ 177 /* The following variables are implementation specific. */
178 178
179 /* Default initialization values to be applied to new associations. */ 179 /* Default initialization values to be applied to new associations. */
@@ -405,8 +405,9 @@ struct sctp_cookie {
405/* The format of our cookie that we send to our peer. */ 405/* The format of our cookie that we send to our peer. */
406struct sctp_signed_cookie { 406struct sctp_signed_cookie {
407 __u8 signature[SCTP_SECRET_SIZE]; 407 __u8 signature[SCTP_SECRET_SIZE];
408 __u32 __pad; /* force sctp_cookie alignment to 64 bits */
408 struct sctp_cookie c; 409 struct sctp_cookie c;
409}; 410} __attribute__((packed));
410 411
411/* This is another convenience type to allocate memory for address 412/* This is another convenience type to allocate memory for address
412 * params for the maximum size and pass such structures around 413 * params for the maximum size and pass such structures around
@@ -827,7 +828,7 @@ struct sctp_transport {
827 __u32 rtt; /* This is the most recent RTT. */ 828 __u32 rtt; /* This is the most recent RTT. */
828 829
829 /* RTO : The current retransmission timeout value. */ 830 /* RTO : The current retransmission timeout value. */
830 __u32 rto; 831 unsigned long rto;
831 832
832 /* RTTVAR : The current RTT variation. */ 833 /* RTTVAR : The current RTT variation. */
833 __u32 rttvar; 834 __u32 rttvar;
@@ -877,22 +878,10 @@ struct sctp_transport {
877 /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to 878 /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
878 * the destination address every heartbeat interval. 879 * the destination address every heartbeat interval.
879 */ 880 */
880 __u32 hbinterval; 881 unsigned long hbinterval;
881
882 /* This is the max_retrans value for the transport and will
883 * be initialized from the assocs value. This can be changed
884 * using SCTP_SET_PEER_ADDR_PARAMS socket option.
885 */
886 __u16 pathmaxrxt;
887
888 /* PMTU : The current known path MTU. */
889 __u32 pathmtu;
890 882
891 /* SACK delay timeout */ 883 /* SACK delay timeout */
892 __u32 sackdelay; 884 unsigned long sackdelay;
893
894 /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */
895 __u32 param_flags;
896 885
897 /* When was the last time (in jiffies) that we heard from this 886 /* When was the last time (in jiffies) that we heard from this
898 * transport? We use this to pick new active and retran paths. 887 * transport? We use this to pick new active and retran paths.
@@ -904,6 +893,18 @@ struct sctp_transport {
904 */ 893 */
905 unsigned long last_time_ecne_reduced; 894 unsigned long last_time_ecne_reduced;
906 895
896 /* This is the max_retrans value for the transport and will
897 * be initialized from the assocs value. This can be changed
898 * using SCTP_SET_PEER_ADDR_PARAMS socket option.
899 */
900 __u16 pathmaxrxt;
901
902 /* PMTU : The current known path MTU. */
903 __u32 pathmtu;
904
905 /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */
906 __u32 param_flags;
907
907 /* The number of times INIT has been sent on this transport. */ 908 /* The number of times INIT has been sent on this transport. */
908 int init_sent_count; 909 int init_sent_count;
909 910
@@ -1249,6 +1250,14 @@ struct sctp_endpoint {
1249 int last_key; 1250 int last_key;
1250 int key_changed_at; 1251 int key_changed_at;
1251 1252
1253 /* digest: This is a digest of the sctp cookie. This field is
1254 * only used on the receive path when we try to validate
1255 * that the cookie has not been tampered with. We put
1256 * this here so we pre-allocate this once and can re-use
1257 * on every receive.
1258 */
1259 __u8 digest[SCTP_SIGNATURE_SIZE];
1260
1252 /* sendbuf acct. policy. */ 1261 /* sendbuf acct. policy. */
1253 __u32 sndbuf_policy; 1262 __u32 sndbuf_policy;
1254 1263
@@ -1499,9 +1508,9 @@ struct sctp_association {
1499 * These values will be initialized by system defaults, but can 1508 * These values will be initialized by system defaults, but can
1500 * be modified via the SCTP_RTOINFO socket option. 1509 * be modified via the SCTP_RTOINFO socket option.
1501 */ 1510 */
1502 __u32 rto_initial; 1511 unsigned long rto_initial;
1503 __u32 rto_max; 1512 unsigned long rto_max;
1504 __u32 rto_min; 1513 unsigned long rto_min;
1505 1514
1506 /* Maximum number of new data packets that can be sent in a burst. */ 1515 /* Maximum number of new data packets that can be sent in a burst. */
1507 int max_burst; 1516 int max_burst;
@@ -1519,13 +1528,13 @@ struct sctp_association {
1519 __u16 init_retries; 1528 __u16 init_retries;
1520 1529
1521 /* The largest timeout or RTO value to use in attempting an INIT */ 1530 /* The largest timeout or RTO value to use in attempting an INIT */
1522 __u16 max_init_timeo; 1531 unsigned long max_init_timeo;
1523 1532
1524 /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to 1533 /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
1525 * the destination address every heartbeat interval. This value 1534 * the destination address every heartbeat interval. This value
1526 * will be inherited by all new transports. 1535 * will be inherited by all new transports.
1527 */ 1536 */
1528 __u32 hbinterval; 1537 unsigned long hbinterval;
1529 1538
1530 /* This is the max_retrans value for new transports in the 1539 /* This is the max_retrans value for new transports in the
1531 * association. 1540 * association.
@@ -1537,13 +1546,14 @@ struct sctp_association {
1537 */ 1546 */
1538 __u32 pathmtu; 1547 __u32 pathmtu;
1539 1548
1540 /* SACK delay timeout */
1541 __u32 sackdelay;
1542
1543 /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */ 1549 /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */
1544 __u32 param_flags; 1550 __u32 param_flags;
1545 1551
1546 int timeouts[SCTP_NUM_TIMEOUT_TYPES]; 1552 /* SACK delay timeout */
1553 unsigned long sackdelay;
1554
1555
1556 unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
1547 struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES]; 1557 struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
1548 1558
1549 /* Transport to which SHUTDOWN chunk was last sent. */ 1559 /* Transport to which SHUTDOWN chunk was last sent. */
@@ -1648,7 +1658,10 @@ struct sctp_association {
1648 /* How many duplicated TSNs have we seen? */ 1658 /* How many duplicated TSNs have we seen? */
1649 int numduptsns; 1659 int numduptsns;
1650 1660
1651 /* Number of seconds of idle time before an association is closed. */ 1661 /* Number of seconds of idle time before an association is closed.
1662 * In the association context, this is really used as a boolean
1663 * since the real timeout is stored in the timeouts array
1664 */
1652 __u32 autoclose; 1665 __u32 autoclose;
1653 1666
1654 /* These are to support 1667 /* These are to support
diff --git a/include/scsi/scsi_transport_spi.h b/include/scsi/scsi_transport_spi.h
index 2b5930ba69e..fb5a2ffae93 100644
--- a/include/scsi/scsi_transport_spi.h
+++ b/include/scsi/scsi_transport_spi.h
@@ -22,6 +22,7 @@
22 22
23#include <linux/config.h> 23#include <linux/config.h>
24#include <linux/transport_class.h> 24#include <linux/transport_class.h>
25#include <linux/mutex.h>
25 26
26struct scsi_transport_template; 27struct scsi_transport_template;
27struct scsi_target; 28struct scsi_target;
diff --git a/kernel/audit.c b/kernel/audit.c
index d13ab7d2d89..0a813d2883e 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -42,8 +42,8 @@
42 */ 42 */
43 43
44#include <linux/init.h> 44#include <linux/init.h>
45#include <asm/atomic.h>
46#include <asm/types.h> 45#include <asm/types.h>
46#include <asm/atomic.h>
47#include <linux/mm.h> 47#include <linux/mm.h>
48#include <linux/module.h> 48#include <linux/module.h>
49#include <linux/err.h> 49#include <linux/err.h>
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index d8a68509e72..685c25175d9 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -30,8 +30,8 @@
30 */ 30 */
31 31
32#include <linux/init.h> 32#include <linux/init.h>
33#include <asm/atomic.h>
34#include <asm/types.h> 33#include <asm/types.h>
34#include <asm/atomic.h>
35#include <linux/mm.h> 35#include <linux/mm.h>
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/mount.h> 37#include <linux/mount.h>
diff --git a/kernel/compat.c b/kernel/compat.c
index 256e5d9f064..1867290c37e 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -871,3 +871,31 @@ asmlinkage long compat_sys_stime(compat_time_t __user *tptr)
871} 871}
872 872
873#endif /* __ARCH_WANT_COMPAT_SYS_TIME */ 873#endif /* __ARCH_WANT_COMPAT_SYS_TIME */
874
875#ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
876asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize)
877{
878 sigset_t newset;
879 compat_sigset_t newset32;
880
881 /* XXX: Don't preclude handling different sized sigset_t's. */
882 if (sigsetsize != sizeof(sigset_t))
883 return -EINVAL;
884
885 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
886 return -EFAULT;
887 sigset_from_compat(&newset, &newset32);
888 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
889
890 spin_lock_irq(&current->sighand->siglock);
891 current->saved_sigmask = current->blocked;
892 current->blocked = newset;
893 recalc_sigpending();
894 spin_unlock_irq(&current->sighand->siglock);
895
896 current->state = TASK_INTERRUPTIBLE;
897 schedule();
898 set_thread_flag(TIF_RESTORE_SIGMASK);
899 return -ERESTARTNOHAND;
900}
901#endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
diff --git a/kernel/sched.c b/kernel/sched.c
index 788ecce1e0e..3ee2ae45125 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3850,6 +3850,10 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3850asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, 3850asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
3851 struct sched_param __user *param) 3851 struct sched_param __user *param)
3852{ 3852{
3853 /* negative values for policy are not valid */
3854 if (policy < 0)
3855 return -EINVAL;
3856
3853 return do_sched_setscheduler(pid, policy, param); 3857 return do_sched_setscheduler(pid, policy, param);
3854} 3858}
3855 3859
diff --git a/kernel/signal.c b/kernel/signal.c
index 5dafbd36d62..d3efafd8109 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2721,6 +2721,32 @@ sys_pause(void)
2721 2721
2722#endif 2722#endif
2723 2723
2724#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2725asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2726{
2727 sigset_t newset;
2728
2729 /* XXX: Don't preclude handling different sized sigset_t's. */
2730 if (sigsetsize != sizeof(sigset_t))
2731 return -EINVAL;
2732
2733 if (copy_from_user(&newset, unewset, sizeof(newset)))
2734 return -EFAULT;
2735 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2736
2737 spin_lock_irq(&current->sighand->siglock);
2738 current->saved_sigmask = current->blocked;
2739 current->blocked = newset;
2740 recalc_sigpending();
2741 spin_unlock_irq(&current->sighand->siglock);
2742
2743 current->state = TASK_INTERRUPTIBLE;
2744 schedule();
2745 set_thread_flag(TIF_RESTORE_SIGMASK);
2746 return -ERESTARTNOHAND;
2747}
2748#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2749
2724void __init signals_init(void) 2750void __init signals_init(void)
2725{ 2751{
2726 sigqueue_cachep = 2752 sigqueue_cachep =
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f5d69b6e29f..cb99a42f8b3 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -870,6 +870,17 @@ static ctl_table vm_table[] = {
870 .strategy = &sysctl_jiffies, 870 .strategy = &sysctl_jiffies,
871 }, 871 },
872#endif 872#endif
873#ifdef CONFIG_NUMA
874 {
875 .ctl_name = VM_ZONE_RECLAIM_MODE,
876 .procname = "zone_reclaim_mode",
877 .data = &zone_reclaim_mode,
878 .maxlen = sizeof(zone_reclaim_mode),
879 .mode = 0644,
880 .proc_handler = &proc_dointvec,
881 .strategy = &zero,
882 },
883#endif
873 { .ctl_name = 0 } 884 { .ctl_name = 0 }
874}; 885};
875 886
diff --git a/mm/filemap.c b/mm/filemap.c
index a965b6b35f2..44da3d47699 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -94,6 +94,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
94 * ->private_lock (try_to_unmap_one) 94 * ->private_lock (try_to_unmap_one)
95 * ->tree_lock (try_to_unmap_one) 95 * ->tree_lock (try_to_unmap_one)
96 * ->zone.lru_lock (follow_page->mark_page_accessed) 96 * ->zone.lru_lock (follow_page->mark_page_accessed)
97 * ->zone.lru_lock (check_pte_range->isolate_lru_page)
97 * ->private_lock (page_remove_rmap->set_page_dirty) 98 * ->private_lock (page_remove_rmap->set_page_dirty)
98 * ->tree_lock (page_remove_rmap->set_page_dirty) 99 * ->tree_lock (page_remove_rmap->set_page_dirty)
99 * ->inode_lock (page_remove_rmap->set_page_dirty) 100 * ->inode_lock (page_remove_rmap->set_page_dirty)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3171f884d24..73790188b0e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -185,8 +185,8 @@ static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
185} 185}
186 186
187static void gather_stats(struct page *, void *); 187static void gather_stats(struct page *, void *);
188static void migrate_page_add(struct vm_area_struct *vma, 188static void migrate_page_add(struct page *page, struct list_head *pagelist,
189 struct page *page, struct list_head *pagelist, unsigned long flags); 189 unsigned long flags);
190 190
191/* Scan through pages checking if pages follow certain conditions. */ 191/* Scan through pages checking if pages follow certain conditions. */
192static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 192static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
@@ -208,6 +208,17 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
208 page = vm_normal_page(vma, addr, *pte); 208 page = vm_normal_page(vma, addr, *pte);
209 if (!page) 209 if (!page)
210 continue; 210 continue;
211 /*
212 * The check for PageReserved here is important to avoid
213 * handling zero pages and other pages that may have been
214 * marked special by the system.
215 *
216 * If the PageReserved would not be checked here then f.e.
217 * the location of the zero page could have an influence
218 * on MPOL_MF_STRICT, zero pages would be counted for
219 * the per node stats, and there would be useless attempts
220 * to put zero pages on the migration list.
221 */
211 if (PageReserved(page)) 222 if (PageReserved(page))
212 continue; 223 continue;
213 nid = page_to_nid(page); 224 nid = page_to_nid(page);
@@ -216,11 +227,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
216 227
217 if (flags & MPOL_MF_STATS) 228 if (flags & MPOL_MF_STATS)
218 gather_stats(page, private); 229 gather_stats(page, private);
219 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 230 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
220 spin_unlock(ptl); 231 migrate_page_add(page, private, flags);
221 migrate_page_add(vma, page, private, flags);
222 spin_lock(ptl);
223 }
224 else 232 else
225 break; 233 break;
226 } while (pte++, addr += PAGE_SIZE, addr != end); 234 } while (pte++, addr += PAGE_SIZE, addr != end);
@@ -309,6 +317,10 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
309 int err; 317 int err;
310 struct vm_area_struct *first, *vma, *prev; 318 struct vm_area_struct *first, *vma, *prev;
311 319
320 /* Clear the LRU lists so pages can be isolated */
321 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
322 lru_add_drain_all();
323
312 first = find_vma(mm, start); 324 first = find_vma(mm, start);
313 if (!first) 325 if (!first)
314 return ERR_PTR(-EFAULT); 326 return ERR_PTR(-EFAULT);
@@ -519,51 +531,15 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask,
519 * page migration 531 * page migration
520 */ 532 */
521 533
522/* Check if we are the only process mapping the page in question */ 534static void migrate_page_add(struct page *page, struct list_head *pagelist,
523static inline int single_mm_mapping(struct mm_struct *mm, 535 unsigned long flags)
524 struct address_space *mapping)
525{
526 struct vm_area_struct *vma;
527 struct prio_tree_iter iter;
528 int rc = 1;
529
530 spin_lock(&mapping->i_mmap_lock);
531 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
532 if (mm != vma->vm_mm) {
533 rc = 0;
534 goto out;
535 }
536 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
537 if (mm != vma->vm_mm) {
538 rc = 0;
539 goto out;
540 }
541out:
542 spin_unlock(&mapping->i_mmap_lock);
543 return rc;
544}
545
546/*
547 * Add a page to be migrated to the pagelist
548 */
549static void migrate_page_add(struct vm_area_struct *vma,
550 struct page *page, struct list_head *pagelist, unsigned long flags)
551{ 536{
552 /* 537 /*
553 * Avoid migrating a page that is shared by others and not writable. 538 * Avoid migrating a page that is shared with others.
554 */ 539 */
555 if ((flags & MPOL_MF_MOVE_ALL) || !page->mapping || PageAnon(page) || 540 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
556 mapping_writably_mapped(page->mapping) || 541 if (isolate_lru_page(page))
557 single_mm_mapping(vma->vm_mm, page->mapping)) {
558 int rc = isolate_lru_page(page);
559
560 if (rc == 1)
561 list_add(&page->lru, pagelist); 542 list_add(&page->lru, pagelist);
562 /*
563 * If the isolate attempt was not successful then we just
564 * encountered an unswappable page. Something must be wrong.
565 */
566 WARN_ON(rc == 0);
567 } 543 }
568} 544}
569 545
@@ -1000,6 +976,33 @@ static unsigned interleave_nodes(struct mempolicy *policy)
1000 return nid; 976 return nid;
1001} 977}
1002 978
979/*
980 * Depending on the memory policy provide a node from which to allocate the
981 * next slab entry.
982 */
983unsigned slab_node(struct mempolicy *policy)
984{
985 switch (policy->policy) {
986 case MPOL_INTERLEAVE:
987 return interleave_nodes(policy);
988
989 case MPOL_BIND:
990 /*
991 * Follow bind policy behavior and start allocation at the
992 * first node.
993 */
994 return policy->v.zonelist->zones[0]->zone_pgdat->node_id;
995
996 case MPOL_PREFERRED:
997 if (policy->v.preferred_node >= 0)
998 return policy->v.preferred_node;
999 /* Fall through */
1000
1001 default:
1002 return numa_node_id();
1003 }
1004}
1005
1003/* Do static interleaving for a VMA with known offset. */ 1006/* Do static interleaving for a VMA with known offset. */
1004static unsigned offset_il_node(struct mempolicy *pol, 1007static unsigned offset_il_node(struct mempolicy *pol,
1005 struct vm_area_struct *vma, unsigned long off) 1008 struct vm_area_struct *vma, unsigned long off)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 5240e426c1f..945559fb63d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -46,7 +46,7 @@
46static long ratelimit_pages = 32; 46static long ratelimit_pages = 32;
47 47
48static long total_pages; /* The total number of pages in the machine. */ 48static long total_pages; /* The total number of pages in the machine. */
49static int dirty_exceeded; /* Dirty mem may be over limit */ 49static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */
50 50
51/* 51/*
52 * When balance_dirty_pages decides that the caller needs to perform some 52 * When balance_dirty_pages decides that the caller needs to perform some
@@ -212,7 +212,8 @@ static void balance_dirty_pages(struct address_space *mapping)
212 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) 212 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
213 break; 213 break;
214 214
215 dirty_exceeded = 1; 215 if (!dirty_exceeded)
216 dirty_exceeded = 1;
216 217
217 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. 218 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
218 * Unstable writes are a feature of certain networked 219 * Unstable writes are a feature of certain networked
@@ -234,7 +235,7 @@ static void balance_dirty_pages(struct address_space *mapping)
234 blk_congestion_wait(WRITE, HZ/10); 235 blk_congestion_wait(WRITE, HZ/10);
235 } 236 }
236 237
237 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) 238 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded)
238 dirty_exceeded = 0; 239 dirty_exceeded = 0;
239 240
240 if (writeback_in_progress(bdi)) 241 if (writeback_in_progress(bdi))
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c2e29743a8d..df54e2fc8ee 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -878,7 +878,9 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
878 mark = (*z)->pages_high; 878 mark = (*z)->pages_high;
879 if (!zone_watermark_ok(*z, order, mark, 879 if (!zone_watermark_ok(*z, order, mark,
880 classzone_idx, alloc_flags)) 880 classzone_idx, alloc_flags))
881 continue; 881 if (!zone_reclaim_mode ||
882 !zone_reclaim(*z, gfp_mask, order))
883 continue;
882 } 884 }
883 885
884 page = buffered_rmqueue(zonelist, *z, order, gfp_mask); 886 page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
@@ -1595,13 +1597,22 @@ static void __init build_zonelists(pg_data_t *pgdat)
1595 prev_node = local_node; 1597 prev_node = local_node;
1596 nodes_clear(used_mask); 1598 nodes_clear(used_mask);
1597 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 1599 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
1600 int distance = node_distance(local_node, node);
1601
1602 /*
1603 * If another node is sufficiently far away then it is better
1604 * to reclaim pages in a zone before going off node.
1605 */
1606 if (distance > RECLAIM_DISTANCE)
1607 zone_reclaim_mode = 1;
1608
1598 /* 1609 /*
1599 * We don't want to pressure a particular node. 1610 * We don't want to pressure a particular node.
1600 * So adding penalty to the first node in same 1611 * So adding penalty to the first node in same
1601 * distance group to make it round-robin. 1612 * distance group to make it round-robin.
1602 */ 1613 */
1603 if (node_distance(local_node, node) != 1614
1604 node_distance(local_node, prev_node)) 1615 if (distance != node_distance(local_node, prev_node))
1605 node_load[node] += load; 1616 node_load[node] += load;
1606 prev_node = node; 1617 prev_node = node;
1607 load--; 1618 load--;
diff --git a/mm/rmap.c b/mm/rmap.c
index dfbb89f99a1..d85a99d28c0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -33,7 +33,7 @@
33 * mapping->i_mmap_lock 33 * mapping->i_mmap_lock
34 * anon_vma->lock 34 * anon_vma->lock
35 * mm->page_table_lock or pte_lock 35 * mm->page_table_lock or pte_lock
36 * zone->lru_lock (in mark_page_accessed) 36 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
37 * swap_lock (in swap_duplicate, swap_info_get) 37 * swap_lock (in swap_duplicate, swap_info_get)
38 * mmlist_lock (in mmput, drain_mmlist and others) 38 * mmlist_lock (in mmput, drain_mmlist and others)
39 * mapping->private_lock (in __set_page_dirty_buffers) 39 * mapping->private_lock (in __set_page_dirty_buffers)
diff --git a/mm/slab.c b/mm/slab.c
index 9374293a301..6f8495e2185 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -68,7 +68,7 @@
68 * Further notes from the original documentation: 68 * Further notes from the original documentation:
69 * 69 *
70 * 11 April '97. Started multi-threading - markhe 70 * 11 April '97. Started multi-threading - markhe
71 * The global cache-chain is protected by the semaphore 'cache_chain_sem'. 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72 * The sem is only needed when accessing/extending the cache-chain, which 72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(), 73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()). 74 * kmem_cache_shrink() and kmem_cache_reap()).
@@ -103,6 +103,8 @@
103#include <linux/rcupdate.h> 103#include <linux/rcupdate.h>
104#include <linux/string.h> 104#include <linux/string.h>
105#include <linux/nodemask.h> 105#include <linux/nodemask.h>
106#include <linux/mempolicy.h>
107#include <linux/mutex.h>
106 108
107#include <asm/uaccess.h> 109#include <asm/uaccess.h>
108#include <asm/cacheflush.h> 110#include <asm/cacheflush.h>
@@ -631,7 +633,7 @@ static kmem_cache_t cache_cache = {
631}; 633};
632 634
633/* Guard access to the cache-chain. */ 635/* Guard access to the cache-chain. */
634static struct semaphore cache_chain_sem; 636static DEFINE_MUTEX(cache_chain_mutex);
635static struct list_head cache_chain; 637static struct list_head cache_chain;
636 638
637/* 639/*
@@ -772,6 +774,8 @@ static struct array_cache *alloc_arraycache(int node, int entries,
772} 774}
773 775
774#ifdef CONFIG_NUMA 776#ifdef CONFIG_NUMA
777static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int);
778
775static inline struct array_cache **alloc_alien_cache(int node, int limit) 779static inline struct array_cache **alloc_alien_cache(int node, int limit)
776{ 780{
777 struct array_cache **ac_ptr; 781 struct array_cache **ac_ptr;
@@ -857,7 +861,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
857 861
858 switch (action) { 862 switch (action) {
859 case CPU_UP_PREPARE: 863 case CPU_UP_PREPARE:
860 down(&cache_chain_sem); 864 mutex_lock(&cache_chain_mutex);
861 /* we need to do this right in the beginning since 865 /* we need to do this right in the beginning since
862 * alloc_arraycache's are going to use this list. 866 * alloc_arraycache's are going to use this list.
863 * kmalloc_node allows us to add the slab to the right 867 * kmalloc_node allows us to add the slab to the right
@@ -912,7 +916,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
912 l3->shared = nc; 916 l3->shared = nc;
913 } 917 }
914 } 918 }
915 up(&cache_chain_sem); 919 mutex_unlock(&cache_chain_mutex);
916 break; 920 break;
917 case CPU_ONLINE: 921 case CPU_ONLINE:
918 start_cpu_timer(cpu); 922 start_cpu_timer(cpu);
@@ -921,7 +925,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
921 case CPU_DEAD: 925 case CPU_DEAD:
922 /* fall thru */ 926 /* fall thru */
923 case CPU_UP_CANCELED: 927 case CPU_UP_CANCELED:
924 down(&cache_chain_sem); 928 mutex_lock(&cache_chain_mutex);
925 929
926 list_for_each_entry(cachep, &cache_chain, next) { 930 list_for_each_entry(cachep, &cache_chain, next) {
927 struct array_cache *nc; 931 struct array_cache *nc;
@@ -973,13 +977,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
973 spin_unlock_irq(&cachep->spinlock); 977 spin_unlock_irq(&cachep->spinlock);
974 kfree(nc); 978 kfree(nc);
975 } 979 }
976 up(&cache_chain_sem); 980 mutex_unlock(&cache_chain_mutex);
977 break; 981 break;
978#endif 982#endif
979 } 983 }
980 return NOTIFY_OK; 984 return NOTIFY_OK;
981 bad: 985 bad:
982 up(&cache_chain_sem); 986 mutex_unlock(&cache_chain_mutex);
983 return NOTIFY_BAD; 987 return NOTIFY_BAD;
984} 988}
985 989
@@ -1047,7 +1051,6 @@ void __init kmem_cache_init(void)
1047 */ 1051 */
1048 1052
1049 /* 1) create the cache_cache */ 1053 /* 1) create the cache_cache */
1050 init_MUTEX(&cache_chain_sem);
1051 INIT_LIST_HEAD(&cache_chain); 1054 INIT_LIST_HEAD(&cache_chain);
1052 list_add(&cache_cache.next, &cache_chain); 1055 list_add(&cache_cache.next, &cache_chain);
1053 cache_cache.colour_off = cache_line_size(); 1056 cache_cache.colour_off = cache_line_size();
@@ -1168,10 +1171,10 @@ void __init kmem_cache_init(void)
1168 /* 6) resize the head arrays to their final sizes */ 1171 /* 6) resize the head arrays to their final sizes */
1169 { 1172 {
1170 kmem_cache_t *cachep; 1173 kmem_cache_t *cachep;
1171 down(&cache_chain_sem); 1174 mutex_lock(&cache_chain_mutex);
1172 list_for_each_entry(cachep, &cache_chain, next) 1175 list_for_each_entry(cachep, &cache_chain, next)
1173 enable_cpucache(cachep); 1176 enable_cpucache(cachep);
1174 up(&cache_chain_sem); 1177 mutex_unlock(&cache_chain_mutex);
1175 } 1178 }
1176 1179
1177 /* Done! */ 1180 /* Done! */
@@ -1590,7 +1593,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1590 BUG(); 1593 BUG();
1591 } 1594 }
1592 1595
1593 down(&cache_chain_sem); 1596 mutex_lock(&cache_chain_mutex);
1594 1597
1595 list_for_each(p, &cache_chain) { 1598 list_for_each(p, &cache_chain) {
1596 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); 1599 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
@@ -1856,7 +1859,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1856 if (!cachep && (flags & SLAB_PANIC)) 1859 if (!cachep && (flags & SLAB_PANIC))
1857 panic("kmem_cache_create(): failed to create slab `%s'\n", 1860 panic("kmem_cache_create(): failed to create slab `%s'\n",
1858 name); 1861 name);
1859 up(&cache_chain_sem); 1862 mutex_unlock(&cache_chain_mutex);
1860 return cachep; 1863 return cachep;
1861} 1864}
1862EXPORT_SYMBOL(kmem_cache_create); 1865EXPORT_SYMBOL(kmem_cache_create);
@@ -2044,18 +2047,18 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
2044 lock_cpu_hotplug(); 2047 lock_cpu_hotplug();
2045 2048
2046 /* Find the cache in the chain of caches. */ 2049 /* Find the cache in the chain of caches. */
2047 down(&cache_chain_sem); 2050 mutex_lock(&cache_chain_mutex);
2048 /* 2051 /*
2049 * the chain is never empty, cache_cache is never destroyed 2052 * the chain is never empty, cache_cache is never destroyed
2050 */ 2053 */
2051 list_del(&cachep->next); 2054 list_del(&cachep->next);
2052 up(&cache_chain_sem); 2055 mutex_unlock(&cache_chain_mutex);
2053 2056
2054 if (__cache_shrink(cachep)) { 2057 if (__cache_shrink(cachep)) {
2055 slab_error(cachep, "Can't free all objects"); 2058 slab_error(cachep, "Can't free all objects");
2056 down(&cache_chain_sem); 2059 mutex_lock(&cache_chain_mutex);
2057 list_add(&cachep->next, &cache_chain); 2060 list_add(&cachep->next, &cache_chain);
2058 up(&cache_chain_sem); 2061 mutex_unlock(&cache_chain_mutex);
2059 unlock_cpu_hotplug(); 2062 unlock_cpu_hotplug();
2060 return 1; 2063 return 1;
2061 } 2064 }
@@ -2570,6 +2573,15 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2570 void *objp; 2573 void *objp;
2571 struct array_cache *ac; 2574 struct array_cache *ac;
2572 2575
2576#ifdef CONFIG_NUMA
2577 if (unlikely(current->mempolicy && !in_interrupt())) {
2578 int nid = slab_node(current->mempolicy);
2579
2580 if (nid != numa_node_id())
2581 return __cache_alloc_node(cachep, flags, nid);
2582 }
2583#endif
2584
2573 check_irq_off(); 2585 check_irq_off();
2574 ac = ac_data(cachep); 2586 ac = ac_data(cachep);
2575 if (likely(ac->avail)) { 2587 if (likely(ac->avail)) {
@@ -3314,7 +3326,7 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
3314 * - clear the per-cpu caches for this CPU. 3326 * - clear the per-cpu caches for this CPU.
3315 * - return freeable pages to the main free memory pool. 3327 * - return freeable pages to the main free memory pool.
3316 * 3328 *
3317 * If we cannot acquire the cache chain semaphore then just give up - we'll 3329 * If we cannot acquire the cache chain mutex then just give up - we'll
3318 * try again on the next iteration. 3330 * try again on the next iteration.
3319 */ 3331 */
3320static void cache_reap(void *unused) 3332static void cache_reap(void *unused)
@@ -3322,7 +3334,7 @@ static void cache_reap(void *unused)
3322 struct list_head *walk; 3334 struct list_head *walk;
3323 struct kmem_list3 *l3; 3335 struct kmem_list3 *l3;
3324 3336
3325 if (down_trylock(&cache_chain_sem)) { 3337 if (!mutex_trylock(&cache_chain_mutex)) {
3326 /* Give up. Setup the next iteration. */ 3338 /* Give up. Setup the next iteration. */
3327 schedule_delayed_work(&__get_cpu_var(reap_work), 3339 schedule_delayed_work(&__get_cpu_var(reap_work),
3328 REAPTIMEOUT_CPUC); 3340 REAPTIMEOUT_CPUC);
@@ -3393,7 +3405,7 @@ static void cache_reap(void *unused)
3393 cond_resched(); 3405 cond_resched();
3394 } 3406 }
3395 check_irq_on(); 3407 check_irq_on();
3396 up(&cache_chain_sem); 3408 mutex_unlock(&cache_chain_mutex);
3397 drain_remote_pages(); 3409 drain_remote_pages();
3398 /* Setup the next iteration */ 3410 /* Setup the next iteration */
3399 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 3411 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
@@ -3429,7 +3441,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
3429 loff_t n = *pos; 3441 loff_t n = *pos;
3430 struct list_head *p; 3442 struct list_head *p;
3431 3443
3432 down(&cache_chain_sem); 3444 mutex_lock(&cache_chain_mutex);
3433 if (!n) 3445 if (!n)
3434 print_slabinfo_header(m); 3446 print_slabinfo_header(m);
3435 p = cache_chain.next; 3447 p = cache_chain.next;
@@ -3451,7 +3463,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3451 3463
3452static void s_stop(struct seq_file *m, void *p) 3464static void s_stop(struct seq_file *m, void *p)
3453{ 3465{
3454 up(&cache_chain_sem); 3466 mutex_unlock(&cache_chain_mutex);
3455} 3467}
3456 3468
3457static int s_show(struct seq_file *m, void *p) 3469static int s_show(struct seq_file *m, void *p)
@@ -3603,7 +3615,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3603 return -EINVAL; 3615 return -EINVAL;
3604 3616
3605 /* Find the cache in the chain of caches. */ 3617 /* Find the cache in the chain of caches. */
3606 down(&cache_chain_sem); 3618 mutex_lock(&cache_chain_mutex);
3607 res = -EINVAL; 3619 res = -EINVAL;
3608 list_for_each(p, &cache_chain) { 3620 list_for_each(p, &cache_chain) {
3609 kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); 3621 kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
@@ -3620,7 +3632,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3620 break; 3632 break;
3621 } 3633 }
3622 } 3634 }
3623 up(&cache_chain_sem); 3635 mutex_unlock(&cache_chain_mutex);
3624 if (res >= 0) 3636 if (res >= 0)
3625 res = count; 3637 res = count;
3626 return res; 3638 return res;
diff --git a/mm/swap.c b/mm/swap.c
index cbb48e721ab..bc2442a7b0e 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -174,6 +174,32 @@ void lru_add_drain(void)
174 put_cpu(); 174 put_cpu();
175} 175}
176 176
177#ifdef CONFIG_NUMA
178static void lru_add_drain_per_cpu(void *dummy)
179{
180 lru_add_drain();
181}
182
183/*
184 * Returns 0 for success
185 */
186int lru_add_drain_all(void)
187{
188 return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
189}
190
191#else
192
193/*
194 * Returns 0 for success
195 */
196int lru_add_drain_all(void)
197{
198 lru_add_drain();
199 return 0;
200}
201#endif
202
177/* 203/*
178 * This path almost never happens for VM activity - pages are normally 204 * This path almost never happens for VM activity - pages are normally
179 * freed via pagevecs. But it gets used by networking. 205 * freed via pagevecs. But it gets used by networking.
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 957fef43fa6..f1e69c30d20 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -25,6 +25,7 @@
25#include <linux/rmap.h> 25#include <linux/rmap.h>
26#include <linux/security.h> 26#include <linux/security.h>
27#include <linux/backing-dev.h> 27#include <linux/backing-dev.h>
28#include <linux/mutex.h>
28#include <linux/capability.h> 29#include <linux/capability.h>
29#include <linux/syscalls.h> 30#include <linux/syscalls.h>
30 31
@@ -46,12 +47,12 @@ struct swap_list_t swap_list = {-1, -1};
46 47
47struct swap_info_struct swap_info[MAX_SWAPFILES]; 48struct swap_info_struct swap_info[MAX_SWAPFILES];
48 49
49static DECLARE_MUTEX(swapon_sem); 50static DEFINE_MUTEX(swapon_mutex);
50 51
51/* 52/*
52 * We need this because the bdev->unplug_fn can sleep and we cannot 53 * We need this because the bdev->unplug_fn can sleep and we cannot
53 * hold swap_lock while calling the unplug_fn. And swap_lock 54 * hold swap_lock while calling the unplug_fn. And swap_lock
54 * cannot be turned into a semaphore. 55 * cannot be turned into a mutex.
55 */ 56 */
56static DECLARE_RWSEM(swap_unplug_sem); 57static DECLARE_RWSEM(swap_unplug_sem);
57 58
@@ -1161,7 +1162,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
1161 up_write(&swap_unplug_sem); 1162 up_write(&swap_unplug_sem);
1162 1163
1163 destroy_swap_extents(p); 1164 destroy_swap_extents(p);
1164 down(&swapon_sem); 1165 mutex_lock(&swapon_mutex);
1165 spin_lock(&swap_lock); 1166 spin_lock(&swap_lock);
1166 drain_mmlist(); 1167 drain_mmlist();
1167 1168
@@ -1180,7 +1181,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
1180 p->swap_map = NULL; 1181 p->swap_map = NULL;
1181 p->flags = 0; 1182 p->flags = 0;
1182 spin_unlock(&swap_lock); 1183 spin_unlock(&swap_lock);
1183 up(&swapon_sem); 1184 mutex_unlock(&swapon_mutex);
1184 vfree(swap_map); 1185 vfree(swap_map);
1185 inode = mapping->host; 1186 inode = mapping->host;
1186 if (S_ISBLK(inode->i_mode)) { 1187 if (S_ISBLK(inode->i_mode)) {
@@ -1209,7 +1210,7 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
1209 int i; 1210 int i;
1210 loff_t l = *pos; 1211 loff_t l = *pos;
1211 1212
1212 down(&swapon_sem); 1213 mutex_lock(&swapon_mutex);
1213 1214
1214 for (i = 0; i < nr_swapfiles; i++, ptr++) { 1215 for (i = 0; i < nr_swapfiles; i++, ptr++) {
1215 if (!(ptr->flags & SWP_USED) || !ptr->swap_map) 1216 if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
@@ -1238,7 +1239,7 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1238 1239
1239static void swap_stop(struct seq_file *swap, void *v) 1240static void swap_stop(struct seq_file *swap, void *v)
1240{ 1241{
1241 up(&swapon_sem); 1242 mutex_unlock(&swapon_mutex);
1242} 1243}
1243 1244
1244static int swap_show(struct seq_file *swap, void *v) 1245static int swap_show(struct seq_file *swap, void *v)
@@ -1540,7 +1541,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1540 goto bad_swap; 1541 goto bad_swap;
1541 } 1542 }
1542 1543
1543 down(&swapon_sem); 1544 mutex_lock(&swapon_mutex);
1544 spin_lock(&swap_lock); 1545 spin_lock(&swap_lock);
1545 p->flags = SWP_ACTIVE; 1546 p->flags = SWP_ACTIVE;
1546 nr_swap_pages += nr_good_pages; 1547 nr_swap_pages += nr_good_pages;
@@ -1566,7 +1567,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1566 swap_info[prev].next = p - swap_info; 1567 swap_info[prev].next = p - swap_info;
1567 } 1568 }
1568 spin_unlock(&swap_lock); 1569 spin_unlock(&swap_lock);
1569 up(&swapon_sem); 1570 mutex_unlock(&swapon_mutex);
1570 error = 0; 1571 error = 0;
1571 goto out; 1572 goto out;
1572bad_swap: 1573bad_swap:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bf903b2d198..2e34b61a70c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -71,6 +71,9 @@ struct scan_control {
71 71
72 int may_writepage; 72 int may_writepage;
73 73
74 /* Can pages be swapped as part of reclaim? */
75 int may_swap;
76
74 /* This context's SWAP_CLUSTER_MAX. If freeing memory for 77 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
75 * suspend, we effectively ignore SWAP_CLUSTER_MAX. 78 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
76 * In this context, it doesn't matter that we scan the 79 * In this context, it doesn't matter that we scan the
@@ -458,6 +461,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
458 * Try to allocate it some swap space here. 461 * Try to allocate it some swap space here.
459 */ 462 */
460 if (PageAnon(page) && !PageSwapCache(page)) { 463 if (PageAnon(page) && !PageSwapCache(page)) {
464 if (!sc->may_swap)
465 goto keep_locked;
461 if (!add_to_swap(page, GFP_ATOMIC)) 466 if (!add_to_swap(page, GFP_ATOMIC))
462 goto activate_locked; 467 goto activate_locked;
463 } 468 }
@@ -586,7 +591,7 @@ static inline void move_to_lru(struct page *page)
586} 591}
587 592
588/* 593/*
589 * Add isolated pages on the list back to the LRU 594 * Add isolated pages on the list back to the LRU.
590 * 595 *
591 * returns the number of pages put back. 596 * returns the number of pages put back.
592 */ 597 */
@@ -760,46 +765,33 @@ next:
760 return nr_failed + retry; 765 return nr_failed + retry;
761} 766}
762 767
763static void lru_add_drain_per_cpu(void *dummy)
764{
765 lru_add_drain();
766}
767
768/* 768/*
769 * Isolate one page from the LRU lists and put it on the 769 * Isolate one page from the LRU lists and put it on the
770 * indicated list. Do necessary cache draining if the 770 * indicated list with elevated refcount.
771 * page is not on the LRU lists yet.
772 * 771 *
773 * Result: 772 * Result:
774 * 0 = page not on LRU list 773 * 0 = page not on LRU list
775 * 1 = page removed from LRU list and added to the specified list. 774 * 1 = page removed from LRU list and added to the specified list.
776 * -ENOENT = page is being freed elsewhere.
777 */ 775 */
778int isolate_lru_page(struct page *page) 776int isolate_lru_page(struct page *page)
779{ 777{
780 int rc = 0; 778 int ret = 0;
781 struct zone *zone = page_zone(page);
782 779
783redo: 780 if (PageLRU(page)) {
784 spin_lock_irq(&zone->lru_lock); 781 struct zone *zone = page_zone(page);
785 rc = __isolate_lru_page(page); 782 spin_lock_irq(&zone->lru_lock);
786 if (rc == 1) { 783 if (TestClearPageLRU(page)) {
787 if (PageActive(page)) 784 ret = 1;
788 del_page_from_active_list(zone, page); 785 get_page(page);
789 else 786 if (PageActive(page))
790 del_page_from_inactive_list(zone, page); 787 del_page_from_active_list(zone, page);
791 } 788 else
792 spin_unlock_irq(&zone->lru_lock); 789 del_page_from_inactive_list(zone, page);
793 if (rc == 0) { 790 }
794 /* 791 spin_unlock_irq(&zone->lru_lock);
795 * Maybe this page is still waiting for a cpu to drain it
796 * from one of the lru lists?
797 */
798 rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
799 if (rc == 0 && PageLRU(page))
800 goto redo;
801 } 792 }
802 return rc; 793
794 return ret;
803} 795}
804#endif 796#endif
805 797
@@ -831,18 +823,20 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
831 page = lru_to_page(src); 823 page = lru_to_page(src);
832 prefetchw_prev_lru_page(page, src, flags); 824 prefetchw_prev_lru_page(page, src, flags);
833 825
834 switch (__isolate_lru_page(page)) { 826 if (!TestClearPageLRU(page))
835 case 1:
836 /* Succeeded to isolate page */
837 list_move(&page->lru, dst);
838 nr_taken++;
839 break;
840 case -ENOENT:
841 /* Not possible to isolate */
842 list_move(&page->lru, src);
843 break;
844 default:
845 BUG(); 827 BUG();
828 list_del(&page->lru);
829 if (get_page_testone(page)) {
830 /*
831 * It is being freed elsewhere
832 */
833 __put_page(page);
834 SetPageLRU(page);
835 list_add(&page->lru, src);
836 continue;
837 } else {
838 list_add(&page->lru, dst);
839 nr_taken++;
846 } 840 }
847 } 841 }
848 842
@@ -1177,6 +1171,7 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1177 1171
1178 sc.gfp_mask = gfp_mask; 1172 sc.gfp_mask = gfp_mask;
1179 sc.may_writepage = 0; 1173 sc.may_writepage = 0;
1174 sc.may_swap = 1;
1180 1175
1181 inc_page_state(allocstall); 1176 inc_page_state(allocstall);
1182 1177
@@ -1279,6 +1274,7 @@ loop_again:
1279 total_reclaimed = 0; 1274 total_reclaimed = 0;
1280 sc.gfp_mask = GFP_KERNEL; 1275 sc.gfp_mask = GFP_KERNEL;
1281 sc.may_writepage = 0; 1276 sc.may_writepage = 0;
1277 sc.may_swap = 1;
1282 sc.nr_mapped = read_page_state(nr_mapped); 1278 sc.nr_mapped = read_page_state(nr_mapped);
1283 1279
1284 inc_page_state(pageoutrun); 1280 inc_page_state(pageoutrun);
@@ -1576,3 +1572,71 @@ static int __init kswapd_init(void)
1576} 1572}
1577 1573
1578module_init(kswapd_init) 1574module_init(kswapd_init)
1575
1576#ifdef CONFIG_NUMA
1577/*
1578 * Zone reclaim mode
1579 *
1580 * If non-zero call zone_reclaim when the number of free pages falls below
1581 * the watermarks.
1582 *
1583 * In the future we may add flags to the mode. However, the page allocator
1584 * should only have to check that zone_reclaim_mode != 0 before calling
1585 * zone_reclaim().
1586 */
1587int zone_reclaim_mode __read_mostly;
1588
1589/*
1590 * Mininum time between zone reclaim scans
1591 */
1592#define ZONE_RECLAIM_INTERVAL HZ/2
1593/*
1594 * Try to free up some pages from this zone through reclaim.
1595 */
1596int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1597{
1598 int nr_pages = 1 << order;
1599 struct task_struct *p = current;
1600 struct reclaim_state reclaim_state;
1601 struct scan_control sc = {
1602 .gfp_mask = gfp_mask,
1603 .may_writepage = 0,
1604 .may_swap = 0,
1605 .nr_mapped = read_page_state(nr_mapped),
1606 .nr_scanned = 0,
1607 .nr_reclaimed = 0,
1608 .priority = 0
1609 };
1610
1611 if (!(gfp_mask & __GFP_WAIT) ||
1612 zone->zone_pgdat->node_id != numa_node_id() ||
1613 zone->all_unreclaimable ||
1614 atomic_read(&zone->reclaim_in_progress) > 0)
1615 return 0;
1616
1617 if (time_before(jiffies,
1618 zone->last_unsuccessful_zone_reclaim + ZONE_RECLAIM_INTERVAL))
1619 return 0;
1620
1621 disable_swap_token();
1622
1623 if (nr_pages > SWAP_CLUSTER_MAX)
1624 sc.swap_cluster_max = nr_pages;
1625 else
1626 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
1627
1628 cond_resched();
1629 p->flags |= PF_MEMALLOC;
1630 reclaim_state.reclaimed_slab = 0;
1631 p->reclaim_state = &reclaim_state;
1632 shrink_zone(zone, &sc);
1633 p->reclaim_state = NULL;
1634 current->flags &= ~PF_MEMALLOC;
1635
1636 if (sc.nr_reclaimed == 0)
1637 zone->last_unsuccessful_zone_reclaim = jiffies;
1638
1639 return sc.nr_reclaimed > nr_pages;
1640}
1641#endif
1642
diff --git a/net/Kconfig b/net/Kconfig
index 9296b269d67..bc603d9aea5 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -150,6 +150,7 @@ endif
150 150
151source "net/dccp/Kconfig" 151source "net/dccp/Kconfig"
152source "net/sctp/Kconfig" 152source "net/sctp/Kconfig"
153source "net/tipc/Kconfig"
153source "net/atm/Kconfig" 154source "net/atm/Kconfig"
154source "net/bridge/Kconfig" 155source "net/bridge/Kconfig"
155source "net/8021q/Kconfig" 156source "net/8021q/Kconfig"
@@ -159,7 +160,6 @@ source "net/ipx/Kconfig"
159source "drivers/net/appletalk/Kconfig" 160source "drivers/net/appletalk/Kconfig"
160source "net/x25/Kconfig" 161source "net/x25/Kconfig"
161source "net/lapb/Kconfig" 162source "net/lapb/Kconfig"
162source "net/tipc/Kconfig"
163 163
164config NET_DIVERT 164config NET_DIVERT
165 bool "Frame Diverter (EXPERIMENTAL)" 165 bool "Frame Diverter (EXPERIMENTAL)"
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 3827f881f42..da16f8fd149 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1860,13 +1860,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
1860 */ 1860 */
1861 mod_cur_headers(pkt_dev); 1861 mod_cur_headers(pkt_dev);
1862 1862
1863 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16, GFP_ATOMIC); 1863 datalen = (odev->hard_header_len + 16) & ~0xf;
1864 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen, GFP_ATOMIC);
1864 if (!skb) { 1865 if (!skb) {
1865 sprintf(pkt_dev->result, "No memory"); 1866 sprintf(pkt_dev->result, "No memory");
1866 return NULL; 1867 return NULL;
1867 } 1868 }
1868 1869
1869 skb_reserve(skb, 16); 1870 skb_reserve(skb, datalen);
1870 1871
1871 /* Reserve for ethernet and IP header */ 1872 /* Reserve for ethernet and IP header */
1872 eth = (__u8 *) skb_push(skb, 14); 1873 eth = (__u8 *) skb_push(skb, 14);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 192092b89e5..d8ce7133cd8 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -233,7 +233,18 @@ static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
233 case IGMPV3_MODE_IS_EXCLUDE: 233 case IGMPV3_MODE_IS_EXCLUDE:
234 if (gdeleted || sdeleted) 234 if (gdeleted || sdeleted)
235 return 0; 235 return 0;
236 return !(pmc->gsquery && !psf->sf_gsresp); 236 if (!(pmc->gsquery && !psf->sf_gsresp)) {
237 if (pmc->sfmode == MCAST_INCLUDE)
238 return 1;
239 /* don't include if this source is excluded
240 * in all filters
241 */
242 if (psf->sf_count[MCAST_INCLUDE])
243 return type == IGMPV3_MODE_IS_INCLUDE;
244 return pmc->sfcount[MCAST_EXCLUDE] ==
245 psf->sf_count[MCAST_EXCLUDE];
246 }
247 return 0;
237 case IGMPV3_CHANGE_TO_INCLUDE: 248 case IGMPV3_CHANGE_TO_INCLUDE:
238 if (gdeleted || sdeleted) 249 if (gdeleted || sdeleted)
239 return 0; 250 return 0;
@@ -385,7 +396,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
385 struct igmpv3_report *pih; 396 struct igmpv3_report *pih;
386 struct igmpv3_grec *pgr = NULL; 397 struct igmpv3_grec *pgr = NULL;
387 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; 398 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
388 int scount, first, isquery, truncate; 399 int scount, stotal, first, isquery, truncate;
389 400
390 if (pmc->multiaddr == IGMP_ALL_HOSTS) 401 if (pmc->multiaddr == IGMP_ALL_HOSTS)
391 return skb; 402 return skb;
@@ -395,25 +406,13 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
395 truncate = type == IGMPV3_MODE_IS_EXCLUDE || 406 truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
396 type == IGMPV3_CHANGE_TO_EXCLUDE; 407 type == IGMPV3_CHANGE_TO_EXCLUDE;
397 408
409 stotal = scount = 0;
410
398 psf_list = sdeleted ? &pmc->tomb : &pmc->sources; 411 psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
399 412
400 if (!*psf_list) { 413 if (!*psf_list)
401 if (type == IGMPV3_ALLOW_NEW_SOURCES || 414 goto empty_source;
402 type == IGMPV3_BLOCK_OLD_SOURCES) 415
403 return skb;
404 if (pmc->crcount || isquery) {
405 /* make sure we have room for group header and at
406 * least one source.
407 */
408 if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)+
409 sizeof(__u32)) {
410 igmpv3_sendpack(skb);
411 skb = NULL; /* add_grhead will get a new one */
412 }
413 skb = add_grhead(skb, pmc, type, &pgr);
414 }
415 return skb;
416 }
417 pih = skb ? (struct igmpv3_report *)skb->h.igmph : NULL; 416 pih = skb ? (struct igmpv3_report *)skb->h.igmph : NULL;
418 417
419 /* EX and TO_EX get a fresh packet, if needed */ 418 /* EX and TO_EX get a fresh packet, if needed */
@@ -426,7 +425,6 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
426 } 425 }
427 } 426 }
428 first = 1; 427 first = 1;
429 scount = 0;
430 psf_prev = NULL; 428 psf_prev = NULL;
431 for (psf=*psf_list; psf; psf=psf_next) { 429 for (psf=*psf_list; psf; psf=psf_next) {
432 u32 *psrc; 430 u32 *psrc;
@@ -460,7 +458,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
460 } 458 }
461 psrc = (u32 *)skb_put(skb, sizeof(u32)); 459 psrc = (u32 *)skb_put(skb, sizeof(u32));
462 *psrc = psf->sf_inaddr; 460 *psrc = psf->sf_inaddr;
463 scount++; 461 scount++; stotal++;
464 if ((type == IGMPV3_ALLOW_NEW_SOURCES || 462 if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
465 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) { 463 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
466 psf->sf_crcount--; 464 psf->sf_crcount--;
@@ -475,6 +473,21 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
475 } 473 }
476 psf_prev = psf; 474 psf_prev = psf;
477 } 475 }
476
477empty_source:
478 if (!stotal) {
479 if (type == IGMPV3_ALLOW_NEW_SOURCES ||
480 type == IGMPV3_BLOCK_OLD_SOURCES)
481 return skb;
482 if (pmc->crcount || isquery) {
483 /* make sure we have room for group header */
484 if (skb && AVAILABLE(skb)<sizeof(struct igmpv3_grec)) {
485 igmpv3_sendpack(skb);
486 skb = NULL; /* add_grhead will get a new one */
487 }
488 skb = add_grhead(skb, pmc, type, &pgr);
489 }
490 }
478 if (pgr) 491 if (pgr)
479 pgr->grec_nsrcs = htons(scount); 492 pgr->grec_nsrcs = htons(scount);
480 493
@@ -557,11 +570,11 @@ static void igmpv3_send_cr(struct in_device *in_dev)
557 skb = add_grec(skb, pmc, dtype, 1, 1); 570 skb = add_grec(skb, pmc, dtype, 1, 1);
558 } 571 }
559 if (pmc->crcount) { 572 if (pmc->crcount) {
560 pmc->crcount--;
561 if (pmc->sfmode == MCAST_EXCLUDE) { 573 if (pmc->sfmode == MCAST_EXCLUDE) {
562 type = IGMPV3_CHANGE_TO_INCLUDE; 574 type = IGMPV3_CHANGE_TO_INCLUDE;
563 skb = add_grec(skb, pmc, type, 1, 0); 575 skb = add_grec(skb, pmc, type, 1, 0);
564 } 576 }
577 pmc->crcount--;
565 if (pmc->crcount == 0) { 578 if (pmc->crcount == 0) {
566 igmpv3_clear_zeros(&pmc->tomb); 579 igmpv3_clear_zeros(&pmc->tomb);
567 igmpv3_clear_zeros(&pmc->sources); 580 igmpv3_clear_zeros(&pmc->sources);
@@ -594,12 +607,12 @@ static void igmpv3_send_cr(struct in_device *in_dev)
594 607
595 /* filter mode changes */ 608 /* filter mode changes */
596 if (pmc->crcount) { 609 if (pmc->crcount) {
597 pmc->crcount--;
598 if (pmc->sfmode == MCAST_EXCLUDE) 610 if (pmc->sfmode == MCAST_EXCLUDE)
599 type = IGMPV3_CHANGE_TO_EXCLUDE; 611 type = IGMPV3_CHANGE_TO_EXCLUDE;
600 else 612 else
601 type = IGMPV3_CHANGE_TO_INCLUDE; 613 type = IGMPV3_CHANGE_TO_INCLUDE;
602 skb = add_grec(skb, pmc, type, 0, 0); 614 skb = add_grec(skb, pmc, type, 0, 0);
615 pmc->crcount--;
603 } 616 }
604 spin_unlock_bh(&pmc->lock); 617 spin_unlock_bh(&pmc->lock);
605 } 618 }
@@ -735,11 +748,43 @@ static void igmp_timer_expire(unsigned long data)
735 ip_ma_put(im); 748 ip_ma_put(im);
736} 749}
737 750
738static void igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs) 751/* mark EXCLUDE-mode sources */
752static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs)
753{
754 struct ip_sf_list *psf;
755 int i, scount;
756
757 scount = 0;
758 for (psf=pmc->sources; psf; psf=psf->sf_next) {
759 if (scount == nsrcs)
760 break;
761 for (i=0; i<nsrcs; i++) {
762 /* skip inactive filters */
763 if (pmc->sfcount[MCAST_INCLUDE] ||
764 pmc->sfcount[MCAST_EXCLUDE] !=
765 psf->sf_count[MCAST_EXCLUDE])
766 continue;
767 if (srcs[i] == psf->sf_inaddr) {
768 scount++;
769 break;
770 }
771 }
772 }
773 pmc->gsquery = 0;
774 if (scount == nsrcs) /* all sources excluded */
775 return 0;
776 return 1;
777}
778
779static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs)
739{ 780{
740 struct ip_sf_list *psf; 781 struct ip_sf_list *psf;
741 int i, scount; 782 int i, scount;
742 783
784 if (pmc->sfmode == MCAST_EXCLUDE)
785 return igmp_xmarksources(pmc, nsrcs, srcs);
786
787 /* mark INCLUDE-mode sources */
743 scount = 0; 788 scount = 0;
744 for (psf=pmc->sources; psf; psf=psf->sf_next) { 789 for (psf=pmc->sources; psf; psf=psf->sf_next) {
745 if (scount == nsrcs) 790 if (scount == nsrcs)
@@ -751,6 +796,12 @@ static void igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs)
751 break; 796 break;
752 } 797 }
753 } 798 }
799 if (!scount) {
800 pmc->gsquery = 0;
801 return 0;
802 }
803 pmc->gsquery = 1;
804 return 1;
754} 805}
755 806
756static void igmp_heard_report(struct in_device *in_dev, u32 group) 807static void igmp_heard_report(struct in_device *in_dev, u32 group)
@@ -845,6 +896,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
845 */ 896 */
846 read_lock(&in_dev->mc_list_lock); 897 read_lock(&in_dev->mc_list_lock);
847 for (im=in_dev->mc_list; im!=NULL; im=im->next) { 898 for (im=in_dev->mc_list; im!=NULL; im=im->next) {
899 int changed;
900
848 if (group && group != im->multiaddr) 901 if (group && group != im->multiaddr)
849 continue; 902 continue;
850 if (im->multiaddr == IGMP_ALL_HOSTS) 903 if (im->multiaddr == IGMP_ALL_HOSTS)
@@ -854,10 +907,11 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
854 im->gsquery = im->gsquery && mark; 907 im->gsquery = im->gsquery && mark;
855 else 908 else
856 im->gsquery = mark; 909 im->gsquery = mark;
857 if (im->gsquery) 910 changed = !im->gsquery ||
858 igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs); 911 igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
859 spin_unlock_bh(&im->lock); 912 spin_unlock_bh(&im->lock);
860 igmp_mod_timer(im, max_delay); 913 if (changed)
914 igmp_mod_timer(im, max_delay);
861 } 915 }
862 read_unlock(&in_dev->mc_list_lock); 916 read_unlock(&in_dev->mc_list_lock);
863} 917}
@@ -1510,7 +1564,7 @@ static void sf_markstate(struct ip_mc_list *pmc)
1510 1564
1511static int sf_setstate(struct ip_mc_list *pmc) 1565static int sf_setstate(struct ip_mc_list *pmc)
1512{ 1566{
1513 struct ip_sf_list *psf; 1567 struct ip_sf_list *psf, *dpsf;
1514 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE]; 1568 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
1515 int qrv = pmc->interface->mr_qrv; 1569 int qrv = pmc->interface->mr_qrv;
1516 int new_in, rv; 1570 int new_in, rv;
@@ -1522,8 +1576,46 @@ static int sf_setstate(struct ip_mc_list *pmc)
1522 !psf->sf_count[MCAST_INCLUDE]; 1576 !psf->sf_count[MCAST_INCLUDE];
1523 } else 1577 } else
1524 new_in = psf->sf_count[MCAST_INCLUDE] != 0; 1578 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
1525 if (new_in != psf->sf_oldin) { 1579 if (new_in) {
1526 psf->sf_crcount = qrv; 1580 if (!psf->sf_oldin) {
1581 struct ip_sf_list *prev = 0;
1582
1583 for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) {
1584 if (dpsf->sf_inaddr == psf->sf_inaddr)
1585 break;
1586 prev = dpsf;
1587 }
1588 if (dpsf) {
1589 if (prev)
1590 prev->sf_next = dpsf->sf_next;
1591 else
1592 pmc->tomb = dpsf->sf_next;
1593 kfree(dpsf);
1594 }
1595 psf->sf_crcount = qrv;
1596 rv++;
1597 }
1598 } else if (psf->sf_oldin) {
1599
1600 psf->sf_crcount = 0;
1601 /*
1602 * add or update "delete" records if an active filter
1603 * is now inactive
1604 */
1605 for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next)
1606 if (dpsf->sf_inaddr == psf->sf_inaddr)
1607 break;
1608 if (!dpsf) {
1609 dpsf = (struct ip_sf_list *)
1610 kmalloc(sizeof(*dpsf), GFP_ATOMIC);
1611 if (!dpsf)
1612 continue;
1613 *dpsf = *psf;
1614 /* pmc->lock held by callers */
1615 dpsf->sf_next = pmc->tomb;
1616 pmc->tomb = dpsf;
1617 }
1618 dpsf->sf_crcount = qrv;
1527 rv++; 1619 rv++;
1528 } 1620 }
1529 } 1621 }
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 165a4d81efa..f29a12da510 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -40,12 +40,12 @@
40 */ 40 */
41 41
42#include <linux/config.h> 42#include <linux/config.h>
43#include <linux/types.h>
43#include <asm/atomic.h> 44#include <asm/atomic.h>
44#include <asm/byteorder.h> 45#include <asm/byteorder.h>
45#include <asm/current.h> 46#include <asm/current.h>
46#include <asm/uaccess.h> 47#include <asm/uaccess.h>
47#include <asm/ioctls.h> 48#include <asm/ioctls.h>
48#include <linux/types.h>
49#include <linux/stddef.h> 49#include <linux/stddef.h>
50#include <linux/slab.h> 50#include <linux/slab.h>
51#include <linux/errno.h> 51#include <linux/errno.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f2e82afc15b..d82c242ea70 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -241,7 +241,8 @@ static int rt_hash_log;
241static unsigned int rt_hash_rnd; 241static unsigned int rt_hash_rnd;
242 242
243static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 243static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
244#define RT_CACHE_STAT_INC(field) (__get_cpu_var(rt_cache_stat).field++) 244#define RT_CACHE_STAT_INC(field) \
245 (per_cpu(rt_cache_stat, raw_smp_processor_id()).field++)
245 246
246static int rt_intern_hash(unsigned hash, struct rtable *rth, 247static int rt_intern_hash(unsigned hash, struct rtable *rth,
247 struct rtable **res); 248 struct rtable **res);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 4aa6fc60357..cb78b50868e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -257,20 +257,26 @@ int sctp_rcv(struct sk_buff *skb)
257 */ 257 */
258 sctp_bh_lock_sock(sk); 258 sctp_bh_lock_sock(sk);
259 259
260 /* It is possible that the association could have moved to a different
261 * socket if it is peeled off. If so, update the sk.
262 */
263 if (sk != rcvr->sk) {
264 sctp_bh_lock_sock(rcvr->sk);
265 sctp_bh_unlock_sock(sk);
266 sk = rcvr->sk;
267 }
268
260 if (sock_owned_by_user(sk)) 269 if (sock_owned_by_user(sk))
261 sk_add_backlog(sk, skb); 270 sk_add_backlog(sk, skb);
262 else 271 else
263 sctp_backlog_rcv(sk, skb); 272 sctp_backlog_rcv(sk, skb);
264 273
265 /* Release the sock and any reference counts we took in the 274 /* Release the sock and the sock ref we took in the lookup calls.
266 * lookup calls. 275 * The asoc/ep ref will be released in sctp_backlog_rcv.
267 */ 276 */
268 sctp_bh_unlock_sock(sk); 277 sctp_bh_unlock_sock(sk);
269 if (asoc)
270 sctp_association_put(asoc);
271 else
272 sctp_endpoint_put(ep);
273 sock_put(sk); 278 sock_put(sk);
279
274 return ret; 280 return ret;
275 281
276discard_it: 282discard_it:
@@ -296,12 +302,50 @@ discard_release:
296int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) 302int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
297{ 303{
298 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 304 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
299 struct sctp_inq *inqueue = &chunk->rcvr->inqueue; 305 struct sctp_inq *inqueue = NULL;
300 306 struct sctp_ep_common *rcvr = NULL;
301 sctp_inq_push(inqueue, chunk); 307
308 rcvr = chunk->rcvr;
309
310 BUG_TRAP(rcvr->sk == sk);
311
312 if (rcvr->dead) {
313 sctp_chunk_free(chunk);
314 } else {
315 inqueue = &chunk->rcvr->inqueue;
316 sctp_inq_push(inqueue, chunk);
317 }
318
319 /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */
320 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
321 sctp_association_put(sctp_assoc(rcvr));
322 else
323 sctp_endpoint_put(sctp_ep(rcvr));
324
302 return 0; 325 return 0;
303} 326}
304 327
328void sctp_backlog_migrate(struct sctp_association *assoc,
329 struct sock *oldsk, struct sock *newsk)
330{
331 struct sk_buff *skb;
332 struct sctp_chunk *chunk;
333
334 skb = oldsk->sk_backlog.head;
335 oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
336 while (skb != NULL) {
337 struct sk_buff *next = skb->next;
338
339 chunk = SCTP_INPUT_CB(skb)->chunk;
340 skb->next = NULL;
341 if (&assoc->base == chunk->rcvr)
342 sk_add_backlog(newsk, skb);
343 else
344 sk_add_backlog(oldsk, skb);
345 skb = next;
346 }
347}
348
305/* Handle icmp frag needed error. */ 349/* Handle icmp frag needed error. */
306void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, 350void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
307 struct sctp_transport *t, __u32 pmtu) 351 struct sctp_transport *t, __u32 pmtu)
@@ -544,10 +588,16 @@ int sctp_rcv_ootb(struct sk_buff *skb)
544 sctp_errhdr_t *err; 588 sctp_errhdr_t *err;
545 589
546 ch = (sctp_chunkhdr_t *) skb->data; 590 ch = (sctp_chunkhdr_t *) skb->data;
547 ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length));
548 591
549 /* Scan through all the chunks in the packet. */ 592 /* Scan through all the chunks in the packet. */
550 while (ch_end > (__u8 *)ch && ch_end < skb->tail) { 593 do {
594 /* Break out if chunk length is less then minimal. */
595 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
596 break;
597
598 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
599 if (ch_end > skb->tail)
600 break;
551 601
552 /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the 602 /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
553 * receiver MUST silently discard the OOTB packet and take no 603 * receiver MUST silently discard the OOTB packet and take no
@@ -578,8 +628,7 @@ int sctp_rcv_ootb(struct sk_buff *skb)
578 } 628 }
579 629
580 ch = (sctp_chunkhdr_t *) ch_end; 630 ch = (sctp_chunkhdr_t *) ch_end;
581 ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length)); 631 } while (ch_end < skb->tail);
582 }
583 632
584 return 0; 633 return 0;
585 634
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 2d33922c044..297b8951463 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -73,8 +73,10 @@ void sctp_inq_free(struct sctp_inq *queue)
73 /* If there is a packet which is currently being worked on, 73 /* If there is a packet which is currently being worked on,
74 * free it as well. 74 * free it as well.
75 */ 75 */
76 if (queue->in_progress) 76 if (queue->in_progress) {
77 sctp_chunk_free(queue->in_progress); 77 sctp_chunk_free(queue->in_progress);
78 queue->in_progress = NULL;
79 }
78 80
79 if (queue->malloced) { 81 if (queue->malloced) {
80 /* Dump the master memory segment. */ 82 /* Dump the master memory segment. */
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 6e4dc28874d..d47a52c303a 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -176,7 +176,7 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa
176 176
177static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) 177static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
178{ 178{
179 if (*pos > sctp_ep_hashsize) 179 if (*pos >= sctp_ep_hashsize)
180 return NULL; 180 return NULL;
181 181
182 if (*pos < 0) 182 if (*pos < 0)
@@ -185,8 +185,6 @@ static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
185 if (*pos == 0) 185 if (*pos == 0)
186 seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n"); 186 seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n");
187 187
188 ++*pos;
189
190 return (void *)pos; 188 return (void *)pos;
191} 189}
192 190
@@ -198,11 +196,9 @@ static void sctp_eps_seq_stop(struct seq_file *seq, void *v)
198 196
199static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) 197static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos)
200{ 198{
201 if (*pos > sctp_ep_hashsize) 199 if (++*pos >= sctp_ep_hashsize)
202 return NULL; 200 return NULL;
203 201
204 ++*pos;
205
206 return pos; 202 return pos;
207} 203}
208 204
@@ -214,19 +210,19 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
214 struct sctp_ep_common *epb; 210 struct sctp_ep_common *epb;
215 struct sctp_endpoint *ep; 211 struct sctp_endpoint *ep;
216 struct sock *sk; 212 struct sock *sk;
217 int hash = *(int *)v; 213 int hash = *(loff_t *)v;
218 214
219 if (hash > sctp_ep_hashsize) 215 if (hash >= sctp_ep_hashsize)
220 return -ENOMEM; 216 return -ENOMEM;
221 217
222 head = &sctp_ep_hashtable[hash-1]; 218 head = &sctp_ep_hashtable[hash];
223 sctp_local_bh_disable(); 219 sctp_local_bh_disable();
224 read_lock(&head->lock); 220 read_lock(&head->lock);
225 for (epb = head->chain; epb; epb = epb->next) { 221 for (epb = head->chain; epb; epb = epb->next) {
226 ep = sctp_ep(epb); 222 ep = sctp_ep(epb);
227 sk = epb->sk; 223 sk = epb->sk;
228 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, 224 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
229 sctp_sk(sk)->type, sk->sk_state, hash-1, 225 sctp_sk(sk)->type, sk->sk_state, hash,
230 epb->bind_addr.port, 226 epb->bind_addr.port,
231 sock_i_uid(sk), sock_i_ino(sk)); 227 sock_i_uid(sk), sock_i_ino(sk));
232 228
@@ -283,7 +279,7 @@ void sctp_eps_proc_exit(void)
283 279
284static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) 280static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
285{ 281{
286 if (*pos > sctp_assoc_hashsize) 282 if (*pos >= sctp_assoc_hashsize)
287 return NULL; 283 return NULL;
288 284
289 if (*pos < 0) 285 if (*pos < 0)
@@ -293,8 +289,6 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
293 seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " 289 seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT "
294 "RPORT LADDRS <-> RADDRS\n"); 290 "RPORT LADDRS <-> RADDRS\n");
295 291
296 ++*pos;
297
298 return (void *)pos; 292 return (void *)pos;
299} 293}
300 294
@@ -306,11 +300,9 @@ static void sctp_assocs_seq_stop(struct seq_file *seq, void *v)
306 300
307static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) 301static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos)
308{ 302{
309 if (*pos > sctp_assoc_hashsize) 303 if (++*pos >= sctp_assoc_hashsize)
310 return NULL; 304 return NULL;
311 305
312 ++*pos;
313
314 return pos; 306 return pos;
315} 307}
316 308
@@ -321,12 +313,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
321 struct sctp_ep_common *epb; 313 struct sctp_ep_common *epb;
322 struct sctp_association *assoc; 314 struct sctp_association *assoc;
323 struct sock *sk; 315 struct sock *sk;
324 int hash = *(int *)v; 316 int hash = *(loff_t *)v;
325 317
326 if (hash > sctp_assoc_hashsize) 318 if (hash >= sctp_assoc_hashsize)
327 return -ENOMEM; 319 return -ENOMEM;
328 320
329 head = &sctp_assoc_hashtable[hash-1]; 321 head = &sctp_assoc_hashtable[hash];
330 sctp_local_bh_disable(); 322 sctp_local_bh_disable();
331 read_lock(&head->lock); 323 read_lock(&head->lock);
332 for (epb = head->chain; epb; epb = epb->next) { 324 for (epb = head->chain; epb; epb = epb->next) {
@@ -335,7 +327,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
335 seq_printf(seq, 327 seq_printf(seq,
336 "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ", 328 "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ",
337 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 329 assoc, sk, sctp_sk(sk)->type, sk->sk_state,
338 assoc->state, hash-1, assoc->assoc_id, 330 assoc->state, hash, assoc->assoc_id,
339 (sk->sk_rcvbuf - assoc->rwnd), 331 (sk->sk_rcvbuf - assoc->rwnd),
340 assoc->sndbuf_used, 332 assoc->sndbuf_used,
341 sock_i_uid(sk), sock_i_ino(sk), 333 sock_i_uid(sk), sock_i_ino(sk),
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 556c495c692..5e0de3c0eea 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1275,7 +1275,12 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1275 unsigned int keylen; 1275 unsigned int keylen;
1276 char *key; 1276 char *key;
1277 1277
1278 headersize = sizeof(sctp_paramhdr_t) + SCTP_SECRET_SIZE; 1278 /* Header size is static data prior to the actual cookie, including
1279 * any padding.
1280 */
1281 headersize = sizeof(sctp_paramhdr_t) +
1282 (sizeof(struct sctp_signed_cookie) -
1283 sizeof(struct sctp_cookie));
1279 bodysize = sizeof(struct sctp_cookie) 1284 bodysize = sizeof(struct sctp_cookie)
1280 + ntohs(init_chunk->chunk_hdr->length) + addrs_len; 1285 + ntohs(init_chunk->chunk_hdr->length) + addrs_len;
1281 1286
@@ -1354,7 +1359,7 @@ struct sctp_association *sctp_unpack_cookie(
1354 struct sctp_signed_cookie *cookie; 1359 struct sctp_signed_cookie *cookie;
1355 struct sctp_cookie *bear_cookie; 1360 struct sctp_cookie *bear_cookie;
1356 int headersize, bodysize, fixed_size; 1361 int headersize, bodysize, fixed_size;
1357 __u8 digest[SCTP_SIGNATURE_SIZE]; 1362 __u8 *digest = ep->digest;
1358 struct scatterlist sg; 1363 struct scatterlist sg;
1359 unsigned int keylen, len; 1364 unsigned int keylen, len;
1360 char *key; 1365 char *key;
@@ -1362,7 +1367,12 @@ struct sctp_association *sctp_unpack_cookie(
1362 struct sk_buff *skb = chunk->skb; 1367 struct sk_buff *skb = chunk->skb;
1363 struct timeval tv; 1368 struct timeval tv;
1364 1369
1365 headersize = sizeof(sctp_chunkhdr_t) + SCTP_SECRET_SIZE; 1370 /* Header size is static data prior to the actual cookie, including
1371 * any padding.
1372 */
1373 headersize = sizeof(sctp_chunkhdr_t) +
1374 (sizeof(struct sctp_signed_cookie) -
1375 sizeof(struct sctp_cookie));
1366 bodysize = ntohs(chunk->chunk_hdr->length) - headersize; 1376 bodysize = ntohs(chunk->chunk_hdr->length) - headersize;
1367 fixed_size = headersize + sizeof(struct sctp_cookie); 1377 fixed_size = headersize + sizeof(struct sctp_cookie);
1368 1378
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index b8b38aba92b..8d1dc24bab4 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1300,7 +1300,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1300 "T1 INIT Timeout adjustment" 1300 "T1 INIT Timeout adjustment"
1301 " init_err_counter: %d" 1301 " init_err_counter: %d"
1302 " cycle: %d" 1302 " cycle: %d"
1303 " timeout: %d\n", 1303 " timeout: %ld\n",
1304 asoc->init_err_counter, 1304 asoc->init_err_counter,
1305 asoc->init_cycle, 1305 asoc->init_cycle,
1306 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]); 1306 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]);
@@ -1328,7 +1328,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1328 SCTP_DEBUG_PRINTK( 1328 SCTP_DEBUG_PRINTK(
1329 "T1 COOKIE Timeout adjustment" 1329 "T1 COOKIE Timeout adjustment"
1330 " init_err_counter: %d" 1330 " init_err_counter: %d"
1331 " timeout: %d\n", 1331 " timeout: %ld\n",
1332 asoc->init_err_counter, 1332 asoc->init_err_counter,
1333 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]); 1333 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
1334 1334
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 477d7f80dba..71c9a961c32 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3090,6 +3090,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3090 break; 3090 break;
3091 3091
3092 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 3092 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3093 if (ch_end > skb->tail)
3094 break;
3093 3095
3094 if (SCTP_CID_SHUTDOWN_ACK == ch->type) 3096 if (SCTP_CID_SHUTDOWN_ACK == ch->type)
3095 ootb_shut_ack = 1; 3097 ootb_shut_ack = 1;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c98ee375ba5..fb1821d9f33 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2995,7 +2995,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
2995 sp->hbinterval = jiffies_to_msecs(sctp_hb_interval); 2995 sp->hbinterval = jiffies_to_msecs(sctp_hb_interval);
2996 sp->pathmaxrxt = sctp_max_retrans_path; 2996 sp->pathmaxrxt = sctp_max_retrans_path;
2997 sp->pathmtu = 0; // allow default discovery 2997 sp->pathmtu = 0; // allow default discovery
2998 sp->sackdelay = sctp_sack_timeout; 2998 sp->sackdelay = jiffies_to_msecs(sctp_sack_timeout);
2999 sp->param_flags = SPP_HB_ENABLE | 2999 sp->param_flags = SPP_HB_ENABLE |
3000 SPP_PMTUD_ENABLE | 3000 SPP_PMTUD_ENABLE |
3001 SPP_SACKDELAY_ENABLE; 3001 SPP_SACKDELAY_ENABLE;
@@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
5602 */ 5602 */
5603 newsp->type = type; 5603 newsp->type = type;
5604 5604
5605 spin_lock_bh(&oldsk->sk_lock.slock);
5606 /* Migrate the backlog from oldsk to newsk. */
5607 sctp_backlog_migrate(assoc, oldsk, newsk);
5605 /* Migrate the association to the new socket. */ 5608 /* Migrate the association to the new socket. */
5606 sctp_assoc_migrate(assoc, newsk); 5609 sctp_assoc_migrate(assoc, newsk);
5610 spin_unlock_bh(&oldsk->sk_lock.slock);
5607 5611
5608 /* If the association on the newsk is already closed before accept() 5612 /* If the association on the newsk is already closed before accept()
5609 * is called, set RCV_SHUTDOWN flag. 5613 * is called, set RCV_SHUTDOWN flag.
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index fcd7096c953..dc6f3ff3235 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -159,12 +159,9 @@ static ctl_table sctp_table[] = {
159 .ctl_name = NET_SCTP_PRESERVE_ENABLE, 159 .ctl_name = NET_SCTP_PRESERVE_ENABLE,
160 .procname = "cookie_preserve_enable", 160 .procname = "cookie_preserve_enable",
161 .data = &sctp_cookie_preserve_enable, 161 .data = &sctp_cookie_preserve_enable,
162 .maxlen = sizeof(long), 162 .maxlen = sizeof(int),
163 .mode = 0644, 163 .mode = 0644,
164 .proc_handler = &proc_doulongvec_ms_jiffies_minmax, 164 .proc_handler = &proc_dointvec
165 .strategy = &sctp_sysctl_jiffies_ms,
166 .extra1 = &rto_timer_min,
167 .extra2 = &rto_timer_max
168 }, 165 },
169 { 166 {
170 .ctl_name = NET_SCTP_RTO_ALPHA, 167 .ctl_name = NET_SCTP_RTO_ALPHA,
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 68d73e2dd15..160f62ad1cc 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -350,7 +350,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
350 tp->rto_pending = 0; 350 tp->rto_pending = 0;
351 351
352 SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " 352 SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d "
353 "rttvar: %d, rto: %d\n", __FUNCTION__, 353 "rttvar: %d, rto: %ld\n", __FUNCTION__,
354 tp, rtt, tp->srtt, tp->rttvar, tp->rto); 354 tp, rtt, tp->srtt, tp->rttvar, tp->rto);
355} 355}
356 356
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index e4ada15ed85..23632d84d8d 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -420,7 +420,8 @@ static int rsc_parse(struct cache_detail *cd,
420 gss_mech_put(gm); 420 gss_mech_put(gm);
421 goto out; 421 goto out;
422 } 422 }
423 if (gss_import_sec_context(buf, len, gm, &rsci.mechctx)) { 423 status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
424 if (status) {
424 gss_mech_put(gm); 425 gss_mech_put(gm);
425 goto out; 426 goto out;
426 } 427 }
@@ -586,6 +587,20 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
586} 587}
587 588
588static int 589static int
590gss_write_null_verf(struct svc_rqst *rqstp)
591{
592 u32 *p;
593
594 svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_NULL));
595 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
596 /* don't really need to check if head->iov_len > PAGE_SIZE ... */
597 *p++ = 0;
598 if (!xdr_ressize_check(rqstp, p))
599 return -1;
600 return 0;
601}
602
603static int
589gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) 604gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
590{ 605{
591 u32 xdr_seq; 606 u32 xdr_seq;
@@ -741,6 +756,21 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
741 return SVC_OK; 756 return SVC_OK;
742} 757}
743 758
759static inline int
760gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip)
761{
762 struct rsc *rsci;
763
764 if (rsip->major_status != GSS_S_COMPLETE)
765 return gss_write_null_verf(rqstp);
766 rsci = gss_svc_searchbyctx(&rsip->out_handle);
767 if (rsci == NULL) {
768 rsip->major_status = GSS_S_NO_CONTEXT;
769 return gss_write_null_verf(rqstp);
770 }
771 return gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
772}
773
744/* 774/*
745 * Accept an rpcsec packet. 775 * Accept an rpcsec packet.
746 * If context establishment, punt to user space 776 * If context establishment, punt to user space
@@ -876,11 +906,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
876 case -ENOENT: 906 case -ENOENT:
877 goto drop; 907 goto drop;
878 case 0: 908 case 0:
879 rsci = gss_svc_searchbyctx(&rsip->out_handle); 909 if (gss_write_init_verf(rqstp, rsip))
880 if (!rsci) {
881 goto drop;
882 }
883 if (gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN))
884 goto drop; 910 goto drop;
885 if (resv->iov_len + 4 > PAGE_SIZE) 911 if (resv->iov_len + 4 > PAGE_SIZE)
886 goto drop; 912 goto drop;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index e67613e4eb1..50580620e89 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1527,6 +1527,7 @@ svc_defer(struct cache_req *req)
1527 dr->handle.owner = rqstp->rq_server; 1527 dr->handle.owner = rqstp->rq_server;
1528 dr->prot = rqstp->rq_prot; 1528 dr->prot = rqstp->rq_prot;
1529 dr->addr = rqstp->rq_addr; 1529 dr->addr = rqstp->rq_addr;
1530 dr->daddr = rqstp->rq_daddr;
1530 dr->argslen = rqstp->rq_arg.len >> 2; 1531 dr->argslen = rqstp->rq_arg.len >> 2;
1531 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); 1532 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
1532 } 1533 }
@@ -1552,6 +1553,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1552 rqstp->rq_arg.len = dr->argslen<<2; 1553 rqstp->rq_arg.len = dr->argslen<<2;
1553 rqstp->rq_prot = dr->prot; 1554 rqstp->rq_prot = dr->prot;
1554 rqstp->rq_addr = dr->addr; 1555 rqstp->rq_addr = dr->addr;
1556 rqstp->rq_daddr = dr->daddr;
1555 return dr->argslen<<2; 1557 return dr->argslen<<2;
1556} 1558}
1557 1559
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 05ab18e62de..3891cc00087 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -8,7 +8,12 @@ menu "TIPC Configuration (EXPERIMENTAL)"
8config TIPC 8config TIPC
9 tristate "The TIPC Protocol (EXPERIMENTAL)" 9 tristate "The TIPC Protocol (EXPERIMENTAL)"
10 ---help--- 10 ---help---
11 TBD. 11 The Transparent Inter Process Communication (TIPC) protocol is
12 specially designed for intra cluster communication. This protocol
13 originates from Ericsson where it has been used in carrier grade
14 cluster applications for many years.
15
16 For more information about TIPC, see http://tipc.sourceforge.net.
12 17
13 This protocol support is also available as a module ( = code which 18 This protocol support is also available as a module ( = code which
14 can be inserted in and removed from the running kernel whenever you 19 can be inserted in and removed from the running kernel whenever you
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index eca22260c98..0be25e175b9 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -47,7 +47,7 @@ u32 tipc_get_addr(void)
47} 47}
48 48
49/** 49/**
50 * addr_domain_valid - validates a network domain address 50 * tipc_addr_domain_valid - validates a network domain address
51 * 51 *
52 * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>, 52 * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
53 * where Z, C, and N are non-zero and do not exceed the configured limits. 53 * where Z, C, and N are non-zero and do not exceed the configured limits.
@@ -55,7 +55,7 @@ u32 tipc_get_addr(void)
55 * Returns 1 if domain address is valid, otherwise 0 55 * Returns 1 if domain address is valid, otherwise 0
56 */ 56 */
57 57
58int addr_domain_valid(u32 addr) 58int tipc_addr_domain_valid(u32 addr)
59{ 59{
60 u32 n = tipc_node(addr); 60 u32 n = tipc_node(addr);
61 u32 c = tipc_cluster(addr); 61 u32 c = tipc_cluster(addr);
@@ -79,7 +79,7 @@ int addr_domain_valid(u32 addr)
79} 79}
80 80
81/** 81/**
82 * addr_node_valid - validates a proposed network address for this node 82 * tipc_addr_node_valid - validates a proposed network address for this node
83 * 83 *
84 * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed 84 * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed
85 * the configured limits. 85 * the configured limits.
@@ -87,8 +87,8 @@ int addr_domain_valid(u32 addr)
87 * Returns 1 if address can be used, otherwise 0 87 * Returns 1 if address can be used, otherwise 0
88 */ 88 */
89 89
90int addr_node_valid(u32 addr) 90int tipc_addr_node_valid(u32 addr)
91{ 91{
92 return (addr_domain_valid(addr) && tipc_node(addr)); 92 return (tipc_addr_domain_valid(addr) && tipc_node(addr));
93} 93}
94 94
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 02ca71783e2..bcfebb3cbbf 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -122,7 +122,7 @@ static inline char *addr_string_fill(char *string, u32 addr)
122 return string; 122 return string;
123} 123}
124 124
125int addr_domain_valid(u32); 125int tipc_addr_domain_valid(u32);
126int addr_node_valid(u32 addr); 126int tipc_addr_node_valid(u32 addr);
127 127
128#endif 128#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 9713d622efb..a7b04f397c1 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -82,7 +82,7 @@ struct bcbearer {
82 struct bearer bearer; 82 struct bearer bearer;
83 struct media media; 83 struct media media;
84 struct bcbearer_pair bpairs[MAX_BEARERS]; 84 struct bcbearer_pair bpairs[MAX_BEARERS];
85 struct bcbearer_pair bpairs_temp[TIPC_NUM_LINK_PRI]; 85 struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
86}; 86};
87 87
88/** 88/**
@@ -104,7 +104,7 @@ static struct bclink *bclink = NULL;
104static struct link *bcl = NULL; 104static struct link *bcl = NULL;
105static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED; 105static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
106 106
107char bc_link_name[] = "multicast-link"; 107char tipc_bclink_name[] = "multicast-link";
108 108
109 109
110static inline u32 buf_seqno(struct sk_buff *buf) 110static inline u32 buf_seqno(struct sk_buff *buf)
@@ -178,19 +178,19 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
178 buf = buf->next; 178 buf = buf->next;
179 } 179 }
180 if (buf != NULL) 180 if (buf != NULL)
181 link_retransmit(bcl, buf, mod(to - after)); 181 tipc_link_retransmit(bcl, buf, mod(to - after));
182 spin_unlock_bh(&bc_lock); 182 spin_unlock_bh(&bc_lock);
183} 183}
184 184
185/** 185/**
186 * bclink_acknowledge - handle acknowledgement of broadcast packets 186 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
187 * @n_ptr: node that sent acknowledgement info 187 * @n_ptr: node that sent acknowledgement info
188 * @acked: broadcast sequence # that has been acknowledged 188 * @acked: broadcast sequence # that has been acknowledged
189 * 189 *
190 * Node is locked, bc_lock unlocked. 190 * Node is locked, bc_lock unlocked.
191 */ 191 */
192 192
193void bclink_acknowledge(struct node *n_ptr, u32 acked) 193void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked)
194{ 194{
195 struct sk_buff *crs; 195 struct sk_buff *crs;
196 struct sk_buff *next; 196 struct sk_buff *next;
@@ -226,16 +226,16 @@ void bclink_acknowledge(struct node *n_ptr, u32 acked)
226 /* Try resolving broadcast link congestion, if necessary */ 226 /* Try resolving broadcast link congestion, if necessary */
227 227
228 if (unlikely(bcl->next_out)) 228 if (unlikely(bcl->next_out))
229 link_push_queue(bcl); 229 tipc_link_push_queue(bcl);
230 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 230 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
231 link_wakeup_ports(bcl, 0); 231 tipc_link_wakeup_ports(bcl, 0);
232 spin_unlock_bh(&bc_lock); 232 spin_unlock_bh(&bc_lock);
233} 233}
234 234
235/** 235/**
236 * bclink_send_ack - unicast an ACK msg 236 * bclink_send_ack - unicast an ACK msg
237 * 237 *
238 * net_lock and node lock set 238 * tipc_net_lock and node lock set
239 */ 239 */
240 240
241static void bclink_send_ack(struct node *n_ptr) 241static void bclink_send_ack(struct node *n_ptr)
@@ -243,13 +243,13 @@ static void bclink_send_ack(struct node *n_ptr)
243 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 243 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
244 244
245 if (l_ptr != NULL) 245 if (l_ptr != NULL)
246 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 246 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
247} 247}
248 248
249/** 249/**
250 * bclink_send_nack- broadcast a NACK msg 250 * bclink_send_nack- broadcast a NACK msg
251 * 251 *
252 * net_lock and node lock set 252 * tipc_net_lock and node lock set
253 */ 253 */
254 254
255static void bclink_send_nack(struct node *n_ptr) 255static void bclink_send_nack(struct node *n_ptr)
@@ -271,11 +271,11 @@ static void bclink_send_nack(struct node *n_ptr)
271 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 271 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
272 msg_set_bcast_tag(msg, tipc_own_tag); 272 msg_set_bcast_tag(msg, tipc_own_tag);
273 273
274 if (bearer_send(&bcbearer->bearer, buf, 0)) { 274 if (tipc_bearer_send(&bcbearer->bearer, buf, 0)) {
275 bcl->stats.sent_nacks++; 275 bcl->stats.sent_nacks++;
276 buf_discard(buf); 276 buf_discard(buf);
277 } else { 277 } else {
278 bearer_schedule(bcl->b_ptr, bcl); 278 tipc_bearer_schedule(bcl->b_ptr, bcl);
279 bcl->proto_msg_queue = buf; 279 bcl->proto_msg_queue = buf;
280 bcl->stats.bearer_congs++; 280 bcl->stats.bearer_congs++;
281 } 281 }
@@ -291,12 +291,12 @@ static void bclink_send_nack(struct node *n_ptr)
291} 291}
292 292
293/** 293/**
294 * bclink_check_gap - send a NACK if a sequence gap exists 294 * tipc_bclink_check_gap - send a NACK if a sequence gap exists
295 * 295 *
296 * net_lock and node lock set 296 * tipc_net_lock and node lock set
297 */ 297 */
298 298
299void bclink_check_gap(struct node *n_ptr, u32 last_sent) 299void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent)
300{ 300{
301 if (!n_ptr->bclink.supported || 301 if (!n_ptr->bclink.supported ||
302 less_eq(last_sent, mod(n_ptr->bclink.last_in))) 302 less_eq(last_sent, mod(n_ptr->bclink.last_in)))
@@ -309,19 +309,19 @@ void bclink_check_gap(struct node *n_ptr, u32 last_sent)
309} 309}
310 310
311/** 311/**
312 * bclink_peek_nack - process a NACK msg meant for another node 312 * tipc_bclink_peek_nack - process a NACK msg meant for another node
313 * 313 *
314 * Only net_lock set. 314 * Only tipc_net_lock set.
315 */ 315 */
316 316
317void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) 317void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
318{ 318{
319 struct node *n_ptr = node_find(dest); 319 struct node *n_ptr = tipc_node_find(dest);
320 u32 my_after, my_to; 320 u32 my_after, my_to;
321 321
322 if (unlikely(!n_ptr || !node_is_up(n_ptr))) 322 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
323 return; 323 return;
324 node_lock(n_ptr); 324 tipc_node_lock(n_ptr);
325 /* 325 /*
326 * Modify gap to suppress unnecessary NACKs from this node 326 * Modify gap to suppress unnecessary NACKs from this node
327 */ 327 */
@@ -364,20 +364,20 @@ void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
364 bclink_set_gap(n_ptr); 364 bclink_set_gap(n_ptr);
365 } 365 }
366 } 366 }
367 node_unlock(n_ptr); 367 tipc_node_unlock(n_ptr);
368} 368}
369 369
370/** 370/**
371 * bclink_send_msg - broadcast a packet to all nodes in cluster 371 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
372 */ 372 */
373 373
374int bclink_send_msg(struct sk_buff *buf) 374int tipc_bclink_send_msg(struct sk_buff *buf)
375{ 375{
376 int res; 376 int res;
377 377
378 spin_lock_bh(&bc_lock); 378 spin_lock_bh(&bc_lock);
379 379
380 res = link_send_buf(bcl, buf); 380 res = tipc_link_send_buf(bcl, buf);
381 if (unlikely(res == -ELINKCONG)) 381 if (unlikely(res == -ELINKCONG))
382 buf_discard(buf); 382 buf_discard(buf);
383 else 383 else
@@ -393,22 +393,22 @@ int bclink_send_msg(struct sk_buff *buf)
393} 393}
394 394
395/** 395/**
396 * bclink_recv_pkt - receive a broadcast packet, and deliver upwards 396 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
397 * 397 *
398 * net_lock is read_locked, no other locks set 398 * tipc_net_lock is read_locked, no other locks set
399 */ 399 */
400 400
401void bclink_recv_pkt(struct sk_buff *buf) 401void tipc_bclink_recv_pkt(struct sk_buff *buf)
402{ 402{
403 struct tipc_msg *msg = buf_msg(buf); 403 struct tipc_msg *msg = buf_msg(buf);
404 struct node* node = node_find(msg_prevnode(msg)); 404 struct node* node = tipc_node_find(msg_prevnode(msg));
405 u32 next_in; 405 u32 next_in;
406 u32 seqno; 406 u32 seqno;
407 struct sk_buff *deferred; 407 struct sk_buff *deferred;
408 408
409 msg_dbg(msg, "<BC<<<"); 409 msg_dbg(msg, "<BC<<<");
410 410
411 if (unlikely(!node || !node_is_up(node) || !node->bclink.supported || 411 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
412 (msg_mc_netid(msg) != tipc_net_id))) { 412 (msg_mc_netid(msg) != tipc_net_id))) {
413 buf_discard(buf); 413 buf_discard(buf);
414 return; 414 return;
@@ -417,14 +417,14 @@ void bclink_recv_pkt(struct sk_buff *buf)
417 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 417 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
418 msg_dbg(msg, "<BCNACK<<<"); 418 msg_dbg(msg, "<BCNACK<<<");
419 if (msg_destnode(msg) == tipc_own_addr) { 419 if (msg_destnode(msg) == tipc_own_addr) {
420 node_lock(node); 420 tipc_node_lock(node);
421 bclink_acknowledge(node, msg_bcast_ack(msg)); 421 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
422 node_unlock(node); 422 tipc_node_unlock(node);
423 bcl->stats.recv_nacks++; 423 bcl->stats.recv_nacks++;
424 bclink_retransmit_pkt(msg_bcgap_after(msg), 424 bclink_retransmit_pkt(msg_bcgap_after(msg),
425 msg_bcgap_to(msg)); 425 msg_bcgap_to(msg));
426 } else { 426 } else {
427 bclink_peek_nack(msg_destnode(msg), 427 tipc_bclink_peek_nack(msg_destnode(msg),
428 msg_bcast_tag(msg), 428 msg_bcast_tag(msg),
429 msg_bcgap_after(msg), 429 msg_bcgap_after(msg),
430 msg_bcgap_to(msg)); 430 msg_bcgap_to(msg));
@@ -433,7 +433,7 @@ void bclink_recv_pkt(struct sk_buff *buf)
433 return; 433 return;
434 } 434 }
435 435
436 node_lock(node); 436 tipc_node_lock(node);
437receive: 437receive:
438 deferred = node->bclink.deferred_head; 438 deferred = node->bclink.deferred_head;
439 next_in = mod(node->bclink.last_in + 1); 439 next_in = mod(node->bclink.last_in + 1);
@@ -448,26 +448,26 @@ receive:
448 bcl->stats.sent_acks++; 448 bcl->stats.sent_acks++;
449 } 449 }
450 if (likely(msg_isdata(msg))) { 450 if (likely(msg_isdata(msg))) {
451 node_unlock(node); 451 tipc_node_unlock(node);
452 port_recv_mcast(buf, NULL); 452 tipc_port_recv_mcast(buf, NULL);
453 } else if (msg_user(msg) == MSG_BUNDLER) { 453 } else if (msg_user(msg) == MSG_BUNDLER) {
454 bcl->stats.recv_bundles++; 454 bcl->stats.recv_bundles++;
455 bcl->stats.recv_bundled += msg_msgcnt(msg); 455 bcl->stats.recv_bundled += msg_msgcnt(msg);
456 node_unlock(node); 456 tipc_node_unlock(node);
457 link_recv_bundle(buf); 457 tipc_link_recv_bundle(buf);
458 } else if (msg_user(msg) == MSG_FRAGMENTER) { 458 } else if (msg_user(msg) == MSG_FRAGMENTER) {
459 bcl->stats.recv_fragments++; 459 bcl->stats.recv_fragments++;
460 if (link_recv_fragment(&node->bclink.defragm, 460 if (tipc_link_recv_fragment(&node->bclink.defragm,
461 &buf, &msg)) 461 &buf, &msg))
462 bcl->stats.recv_fragmented++; 462 bcl->stats.recv_fragmented++;
463 node_unlock(node); 463 tipc_node_unlock(node);
464 net_route_msg(buf); 464 tipc_net_route_msg(buf);
465 } else { 465 } else {
466 node_unlock(node); 466 tipc_node_unlock(node);
467 net_route_msg(buf); 467 tipc_net_route_msg(buf);
468 } 468 }
469 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { 469 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
470 node_lock(node); 470 tipc_node_lock(node);
471 buf = deferred; 471 buf = deferred;
472 msg = buf_msg(buf); 472 msg = buf_msg(buf);
473 node->bclink.deferred_head = deferred->next; 473 node->bclink.deferred_head = deferred->next;
@@ -478,9 +478,9 @@ receive:
478 u32 gap_after = node->bclink.gap_after; 478 u32 gap_after = node->bclink.gap_after;
479 u32 gap_to = node->bclink.gap_to; 479 u32 gap_to = node->bclink.gap_to;
480 480
481 if (link_defer_pkt(&node->bclink.deferred_head, 481 if (tipc_link_defer_pkt(&node->bclink.deferred_head,
482 &node->bclink.deferred_tail, 482 &node->bclink.deferred_tail,
483 buf)) { 483 buf)) {
484 node->bclink.nack_sync++; 484 node->bclink.nack_sync++;
485 bcl->stats.deferred_recv++; 485 bcl->stats.deferred_recv++;
486 if (seqno == mod(gap_after + 1)) 486 if (seqno == mod(gap_after + 1))
@@ -497,10 +497,10 @@ receive:
497 bcl->stats.duplicates++; 497 bcl->stats.duplicates++;
498 buf_discard(buf); 498 buf_discard(buf);
499 } 499 }
500 node_unlock(node); 500 tipc_node_unlock(node);
501} 501}
502 502
503u32 bclink_get_last_sent(void) 503u32 tipc_bclink_get_last_sent(void)
504{ 504{
505 u32 last_sent = mod(bcl->next_out_no - 1); 505 u32 last_sent = mod(bcl->next_out_no - 1);
506 506
@@ -509,15 +509,15 @@ u32 bclink_get_last_sent(void)
509 return last_sent; 509 return last_sent;
510} 510}
511 511
512u32 bclink_acks_missing(struct node *n_ptr) 512u32 tipc_bclink_acks_missing(struct node *n_ptr)
513{ 513{
514 return (n_ptr->bclink.supported && 514 return (n_ptr->bclink.supported &&
515 (bclink_get_last_sent() != n_ptr->bclink.acked)); 515 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
516} 516}
517 517
518 518
519/** 519/**
520 * bcbearer_send - send a packet through the broadcast pseudo-bearer 520 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
521 * 521 *
522 * Send through as many bearers as necessary to reach all nodes 522 * Send through as many bearers as necessary to reach all nodes
523 * that support TIPC multicasting. 523 * that support TIPC multicasting.
@@ -525,9 +525,9 @@ u32 bclink_acks_missing(struct node *n_ptr)
525 * Returns 0 if packet sent successfully, non-zero if not 525 * Returns 0 if packet sent successfully, non-zero if not
526 */ 526 */
527 527
528int bcbearer_send(struct sk_buff *buf, 528int tipc_bcbearer_send(struct sk_buff *buf,
529 struct tipc_bearer *unused1, 529 struct tipc_bearer *unused1,
530 struct tipc_media_addr *unused2) 530 struct tipc_media_addr *unused2)
531{ 531{
532 static int send_count = 0; 532 static int send_count = 0;
533 533
@@ -541,8 +541,8 @@ int bcbearer_send(struct sk_buff *buf,
541 if (likely(!msg_non_seq(buf_msg(buf)))) { 541 if (likely(!msg_non_seq(buf_msg(buf)))) {
542 struct tipc_msg *msg; 542 struct tipc_msg *msg;
543 543
544 assert(cluster_bcast_nodes.count != 0); 544 assert(tipc_cltr_bcast_nodes.count != 0);
545 bcbuf_set_acks(buf, cluster_bcast_nodes.count); 545 bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
546 msg = buf_msg(buf); 546 msg = buf_msg(buf);
547 msg_set_non_seq(msg); 547 msg_set_non_seq(msg);
548 msg_set_mc_netid(msg, tipc_net_id); 548 msg_set_mc_netid(msg, tipc_net_id);
@@ -555,7 +555,7 @@ int bcbearer_send(struct sk_buff *buf,
555 555
556 /* Send buffer over bearers until all targets reached */ 556 /* Send buffer over bearers until all targets reached */
557 557
558 remains = cluster_bcast_nodes; 558 remains = tipc_cltr_bcast_nodes;
559 559
560 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 560 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
561 struct bearer *p = bcbearer->bpairs[bp_index].primary; 561 struct bearer *p = bcbearer->bpairs[bp_index].primary;
@@ -564,7 +564,7 @@ int bcbearer_send(struct sk_buff *buf,
564 if (!p) 564 if (!p)
565 break; /* no more bearers to try */ 565 break; /* no more bearers to try */
566 566
567 nmap_diff(&remains, &p->nodes, &remains_new); 567 tipc_nmap_diff(&remains, &p->nodes, &remains_new);
568 if (remains_new.count == remains.count) 568 if (remains_new.count == remains.count)
569 continue; /* bearer pair doesn't add anything */ 569 continue; /* bearer pair doesn't add anything */
570 570
@@ -597,10 +597,10 @@ update:
597} 597}
598 598
599/** 599/**
600 * bcbearer_sort - create sets of bearer pairs used by broadcast bearer 600 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
601 */ 601 */
602 602
603void bcbearer_sort(void) 603void tipc_bcbearer_sort(void)
604{ 604{
605 struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 605 struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
606 struct bcbearer_pair *bp_curr; 606 struct bcbearer_pair *bp_curr;
@@ -614,7 +614,7 @@ void bcbearer_sort(void)
614 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 614 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
615 615
616 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 616 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
617 struct bearer *b = &bearers[b_index]; 617 struct bearer *b = &tipc_bearers[b_index];
618 618
619 if (!b->active || !b->nodes.count) 619 if (!b->active || !b->nodes.count)
620 continue; 620 continue;
@@ -630,7 +630,7 @@ void bcbearer_sort(void)
630 bp_curr = bcbearer->bpairs; 630 bp_curr = bcbearer->bpairs;
631 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); 631 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
632 632
633 for (pri = (TIPC_NUM_LINK_PRI - 1); pri >= 0; pri--) { 633 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
634 634
635 if (!bp_temp[pri].primary) 635 if (!bp_temp[pri].primary)
636 continue; 636 continue;
@@ -638,8 +638,8 @@ void bcbearer_sort(void)
638 bp_curr->primary = bp_temp[pri].primary; 638 bp_curr->primary = bp_temp[pri].primary;
639 639
640 if (bp_temp[pri].secondary) { 640 if (bp_temp[pri].secondary) {
641 if (nmap_equal(&bp_temp[pri].primary->nodes, 641 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
642 &bp_temp[pri].secondary->nodes)) { 642 &bp_temp[pri].secondary->nodes)) {
643 bp_curr->secondary = bp_temp[pri].secondary; 643 bp_curr->secondary = bp_temp[pri].secondary;
644 } else { 644 } else {
645 bp_curr++; 645 bp_curr++;
@@ -654,14 +654,14 @@ void bcbearer_sort(void)
654} 654}
655 655
656/** 656/**
657 * bcbearer_push - resolve bearer congestion 657 * tipc_bcbearer_push - resolve bearer congestion
658 * 658 *
659 * Forces bclink to push out any unsent packets, until all packets are gone 659 * Forces bclink to push out any unsent packets, until all packets are gone
660 * or congestion reoccurs. 660 * or congestion reoccurs.
661 * No locks set when function called 661 * No locks set when function called
662 */ 662 */
663 663
664void bcbearer_push(void) 664void tipc_bcbearer_push(void)
665{ 665{
666 struct bearer *b_ptr; 666 struct bearer *b_ptr;
667 667
@@ -669,20 +669,20 @@ void bcbearer_push(void)
669 b_ptr = &bcbearer->bearer; 669 b_ptr = &bcbearer->bearer;
670 if (b_ptr->publ.blocked) { 670 if (b_ptr->publ.blocked) {
671 b_ptr->publ.blocked = 0; 671 b_ptr->publ.blocked = 0;
672 bearer_lock_push(b_ptr); 672 tipc_bearer_lock_push(b_ptr);
673 } 673 }
674 spin_unlock_bh(&bc_lock); 674 spin_unlock_bh(&bc_lock);
675} 675}
676 676
677 677
678int bclink_stats(char *buf, const u32 buf_size) 678int tipc_bclink_stats(char *buf, const u32 buf_size)
679{ 679{
680 struct print_buf pb; 680 struct print_buf pb;
681 681
682 if (!bcl) 682 if (!bcl)
683 return 0; 683 return 0;
684 684
685 printbuf_init(&pb, buf, buf_size); 685 tipc_printbuf_init(&pb, buf, buf_size);
686 686
687 spin_lock_bh(&bc_lock); 687 spin_lock_bh(&bc_lock);
688 688
@@ -718,10 +718,10 @@ int bclink_stats(char *buf, const u32 buf_size)
718 : 0); 718 : 0);
719 719
720 spin_unlock_bh(&bc_lock); 720 spin_unlock_bh(&bc_lock);
721 return printbuf_validate(&pb); 721 return tipc_printbuf_validate(&pb);
722} 722}
723 723
724int bclink_reset_stats(void) 724int tipc_bclink_reset_stats(void)
725{ 725{
726 if (!bcl) 726 if (!bcl)
727 return -ENOPROTOOPT; 727 return -ENOPROTOOPT;
@@ -732,7 +732,7 @@ int bclink_reset_stats(void)
732 return TIPC_OK; 732 return TIPC_OK;
733} 733}
734 734
735int bclink_set_queue_limits(u32 limit) 735int tipc_bclink_set_queue_limits(u32 limit)
736{ 736{
737 if (!bcl) 737 if (!bcl)
738 return -ENOPROTOOPT; 738 return -ENOPROTOOPT;
@@ -740,12 +740,12 @@ int bclink_set_queue_limits(u32 limit)
740 return -EINVAL; 740 return -EINVAL;
741 741
742 spin_lock_bh(&bc_lock); 742 spin_lock_bh(&bc_lock);
743 link_set_queue_limits(bcl, limit); 743 tipc_link_set_queue_limits(bcl, limit);
744 spin_unlock_bh(&bc_lock); 744 spin_unlock_bh(&bc_lock);
745 return TIPC_OK; 745 return TIPC_OK;
746} 746}
747 747
748int bclink_init(void) 748int tipc_bclink_init(void)
749{ 749{
750 bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC); 750 bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
751 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC); 751 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
@@ -762,7 +762,7 @@ int bclink_init(void)
762 memset(bcbearer, 0, sizeof(struct bcbearer)); 762 memset(bcbearer, 0, sizeof(struct bcbearer));
763 INIT_LIST_HEAD(&bcbearer->bearer.cong_links); 763 INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
764 bcbearer->bearer.media = &bcbearer->media; 764 bcbearer->bearer.media = &bcbearer->media;
765 bcbearer->media.send_msg = bcbearer_send; 765 bcbearer->media.send_msg = tipc_bcbearer_send;
766 sprintf(bcbearer->media.name, "tipc-multicast"); 766 sprintf(bcbearer->media.name, "tipc-multicast");
767 767
768 bcl = &bclink->link; 768 bcl = &bclink->link;
@@ -772,27 +772,27 @@ int bclink_init(void)
772 bclink->node.lock = SPIN_LOCK_UNLOCKED; 772 bclink->node.lock = SPIN_LOCK_UNLOCKED;
773 bcl->owner = &bclink->node; 773 bcl->owner = &bclink->node;
774 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 774 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
775 link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 775 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
776 bcl->b_ptr = &bcbearer->bearer; 776 bcl->b_ptr = &bcbearer->bearer;
777 bcl->state = WORKING_WORKING; 777 bcl->state = WORKING_WORKING;
778 sprintf(bcl->name, bc_link_name); 778 sprintf(bcl->name, tipc_bclink_name);
779 779
780 if (BCLINK_LOG_BUF_SIZE) { 780 if (BCLINK_LOG_BUF_SIZE) {
781 char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC); 781 char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
782 782
783 if (!pb) 783 if (!pb)
784 goto nomem; 784 goto nomem;
785 printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE); 785 tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
786 } 786 }
787 787
788 return TIPC_OK; 788 return TIPC_OK;
789} 789}
790 790
791void bclink_stop(void) 791void tipc_bclink_stop(void)
792{ 792{
793 spin_lock_bh(&bc_lock); 793 spin_lock_bh(&bc_lock);
794 if (bcbearer) { 794 if (bcbearer) {
795 link_stop(bcl); 795 tipc_link_stop(bcl);
796 if (BCLINK_LOG_BUF_SIZE) 796 if (BCLINK_LOG_BUF_SIZE)
797 kfree(bcl->print_buf.buf); 797 kfree(bcl->print_buf.buf);
798 bcl = NULL; 798 bcl = NULL;
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5430e524b4f..0e3be2ab330 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -70,14 +70,14 @@ struct port_list {
70 70
71struct node; 71struct node;
72 72
73extern char bc_link_name[]; 73extern char tipc_bclink_name[];
74 74
75 75
76/** 76/**
77 * nmap_get - determine if node exists in a node map 77 * nmap_get - determine if node exists in a node map
78 */ 78 */
79 79
80static inline int nmap_get(struct node_map *nm_ptr, u32 node) 80static inline int tipc_nmap_get(struct node_map *nm_ptr, u32 node)
81{ 81{
82 int n = tipc_node(node); 82 int n = tipc_node(node);
83 int w = n / WSIZE; 83 int w = n / WSIZE;
@@ -90,7 +90,7 @@ static inline int nmap_get(struct node_map *nm_ptr, u32 node)
90 * nmap_add - add a node to a node map 90 * nmap_add - add a node to a node map
91 */ 91 */
92 92
93static inline void nmap_add(struct node_map *nm_ptr, u32 node) 93static inline void tipc_nmap_add(struct node_map *nm_ptr, u32 node)
94{ 94{
95 int n = tipc_node(node); 95 int n = tipc_node(node);
96 int w = n / WSIZE; 96 int w = n / WSIZE;
@@ -106,7 +106,7 @@ static inline void nmap_add(struct node_map *nm_ptr, u32 node)
106 * nmap_remove - remove a node from a node map 106 * nmap_remove - remove a node from a node map
107 */ 107 */
108 108
109static inline void nmap_remove(struct node_map *nm_ptr, u32 node) 109static inline void tipc_nmap_remove(struct node_map *nm_ptr, u32 node)
110{ 110{
111 int n = tipc_node(node); 111 int n = tipc_node(node);
112 int w = n / WSIZE; 112 int w = n / WSIZE;
@@ -122,7 +122,7 @@ static inline void nmap_remove(struct node_map *nm_ptr, u32 node)
122 * nmap_equal - test for equality of node maps 122 * nmap_equal - test for equality of node maps
123 */ 123 */
124 124
125static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b) 125static inline int tipc_nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
126{ 126{
127 return !memcmp(nm_a, nm_b, sizeof(*nm_a)); 127 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
128} 128}
@@ -134,8 +134,8 @@ static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
134 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) 134 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
135 */ 135 */
136 136
137static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b, 137static inline void tipc_nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
138 struct node_map *nm_diff) 138 struct node_map *nm_diff)
139{ 139{
140 int stop = sizeof(nm_a->map) / sizeof(u32); 140 int stop = sizeof(nm_a->map) / sizeof(u32);
141 int w; 141 int w;
@@ -159,7 +159,7 @@ static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
159 * port_list_add - add a port to a port list, ensuring no duplicates 159 * port_list_add - add a port to a port list, ensuring no duplicates
160 */ 160 */
161 161
162static inline void port_list_add(struct port_list *pl_ptr, u32 port) 162static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
163{ 163{
164 struct port_list *item = pl_ptr; 164 struct port_list *item = pl_ptr;
165 int i; 165 int i;
@@ -194,7 +194,7 @@ static inline void port_list_add(struct port_list *pl_ptr, u32 port)
194 * Note: First item is on stack, so it doesn't need to be released 194 * Note: First item is on stack, so it doesn't need to be released
195 */ 195 */
196 196
197static inline void port_list_free(struct port_list *pl_ptr) 197static inline void tipc_port_list_free(struct port_list *pl_ptr)
198{ 198{
199 struct port_list *item; 199 struct port_list *item;
200 struct port_list *next; 200 struct port_list *next;
@@ -206,18 +206,18 @@ static inline void port_list_free(struct port_list *pl_ptr)
206} 206}
207 207
208 208
209int bclink_init(void); 209int tipc_bclink_init(void);
210void bclink_stop(void); 210void tipc_bclink_stop(void);
211void bclink_acknowledge(struct node *n_ptr, u32 acked); 211void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked);
212int bclink_send_msg(struct sk_buff *buf); 212int tipc_bclink_send_msg(struct sk_buff *buf);
213void bclink_recv_pkt(struct sk_buff *buf); 213void tipc_bclink_recv_pkt(struct sk_buff *buf);
214u32 bclink_get_last_sent(void); 214u32 tipc_bclink_get_last_sent(void);
215u32 bclink_acks_missing(struct node *n_ptr); 215u32 tipc_bclink_acks_missing(struct node *n_ptr);
216void bclink_check_gap(struct node *n_ptr, u32 seqno); 216void tipc_bclink_check_gap(struct node *n_ptr, u32 seqno);
217int bclink_stats(char *stats_buf, const u32 buf_size); 217int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
218int bclink_reset_stats(void); 218int tipc_bclink_reset_stats(void);
219int bclink_set_queue_limits(u32 limit); 219int tipc_bclink_set_queue_limits(u32 limit);
220void bcbearer_sort(void); 220void tipc_bcbearer_sort(void);
221void bcbearer_push(void); 221void tipc_bcbearer_push(void);
222 222
223#endif 223#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 3dd19fdc5a2..64dcb0f3a8b 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -48,7 +48,7 @@
48static struct media *media_list = 0; 48static struct media *media_list = 0;
49static u32 media_count = 0; 49static u32 media_count = 0;
50 50
51struct bearer *bearers = 0; 51struct bearer *tipc_bearers = 0;
52 52
53/** 53/**
54 * media_name_valid - validate media name 54 * media_name_valid - validate media name
@@ -107,7 +107,7 @@ int tipc_register_media(u32 media_type,
107 u32 i; 107 u32 i;
108 int res = -EINVAL; 108 int res = -EINVAL;
109 109
110 write_lock_bh(&net_lock); 110 write_lock_bh(&tipc_net_lock);
111 if (!media_list) 111 if (!media_list)
112 goto exit; 112 goto exit;
113 113
@@ -119,7 +119,8 @@ int tipc_register_media(u32 media_type,
119 warn("Media registration error: no broadcast address supplied\n"); 119 warn("Media registration error: no broadcast address supplied\n");
120 goto exit; 120 goto exit;
121 } 121 }
122 if (bearer_priority >= TIPC_NUM_LINK_PRI) { 122 if ((bearer_priority < TIPC_MIN_LINK_PRI) &&
123 (bearer_priority > TIPC_MAX_LINK_PRI)) {
123 warn("Media registration error: priority %u\n", bearer_priority); 124 warn("Media registration error: priority %u\n", bearer_priority);
124 goto exit; 125 goto exit;
125 } 126 }
@@ -164,15 +165,15 @@ int tipc_register_media(u32 media_type,
164 dbg("Media <%s> registered\n", name); 165 dbg("Media <%s> registered\n", name);
165 res = 0; 166 res = 0;
166exit: 167exit:
167 write_unlock_bh(&net_lock); 168 write_unlock_bh(&tipc_net_lock);
168 return res; 169 return res;
169} 170}
170 171
171/** 172/**
172 * media_addr_printf - record media address in print buffer 173 * tipc_media_addr_printf - record media address in print buffer
173 */ 174 */
174 175
175void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) 176void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
176{ 177{
177 struct media *m_ptr; 178 struct media *m_ptr;
178 u32 media_type; 179 u32 media_type;
@@ -200,25 +201,25 @@ void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
200} 201}
201 202
202/** 203/**
203 * media_get_names - record names of registered media in buffer 204 * tipc_media_get_names - record names of registered media in buffer
204 */ 205 */
205 206
206struct sk_buff *media_get_names(void) 207struct sk_buff *tipc_media_get_names(void)
207{ 208{
208 struct sk_buff *buf; 209 struct sk_buff *buf;
209 struct media *m_ptr; 210 struct media *m_ptr;
210 int i; 211 int i;
211 212
212 buf = cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME)); 213 buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
213 if (!buf) 214 if (!buf)
214 return NULL; 215 return NULL;
215 216
216 read_lock_bh(&net_lock); 217 read_lock_bh(&tipc_net_lock);
217 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) { 218 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
218 cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name, 219 tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
219 strlen(m_ptr->name) + 1); 220 strlen(m_ptr->name) + 1);
220 } 221 }
221 read_unlock_bh(&net_lock); 222 read_unlock_bh(&tipc_net_lock);
222 return buf; 223 return buf;
223} 224}
224 225
@@ -282,7 +283,7 @@ static struct bearer *bearer_find(const char *name)
282 struct bearer *b_ptr; 283 struct bearer *b_ptr;
283 u32 i; 284 u32 i;
284 285
285 for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) { 286 for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
286 if (b_ptr->active && (!strcmp(b_ptr->publ.name, name))) 287 if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
287 return b_ptr; 288 return b_ptr;
288 } 289 }
@@ -290,16 +291,16 @@ static struct bearer *bearer_find(const char *name)
290} 291}
291 292
292/** 293/**
293 * bearer_find - locates bearer object with matching interface name 294 * tipc_bearer_find_interface - locates bearer object with matching interface name
294 */ 295 */
295 296
296struct bearer *bearer_find_interface(const char *if_name) 297struct bearer *tipc_bearer_find_interface(const char *if_name)
297{ 298{
298 struct bearer *b_ptr; 299 struct bearer *b_ptr;
299 char *b_if_name; 300 char *b_if_name;
300 u32 i; 301 u32 i;
301 302
302 for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) { 303 for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
303 if (!b_ptr->active) 304 if (!b_ptr->active)
304 continue; 305 continue;
305 b_if_name = strchr(b_ptr->publ.name, ':') + 1; 306 b_if_name = strchr(b_ptr->publ.name, ':') + 1;
@@ -310,54 +311,54 @@ struct bearer *bearer_find_interface(const char *if_name)
310} 311}
311 312
312/** 313/**
313 * bearer_get_names - record names of bearers in buffer 314 * tipc_bearer_get_names - record names of bearers in buffer
314 */ 315 */
315 316
316struct sk_buff *bearer_get_names(void) 317struct sk_buff *tipc_bearer_get_names(void)
317{ 318{
318 struct sk_buff *buf; 319 struct sk_buff *buf;
319 struct media *m_ptr; 320 struct media *m_ptr;
320 struct bearer *b_ptr; 321 struct bearer *b_ptr;
321 int i, j; 322 int i, j;
322 323
323 buf = cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME)); 324 buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
324 if (!buf) 325 if (!buf)
325 return NULL; 326 return NULL;
326 327
327 read_lock_bh(&net_lock); 328 read_lock_bh(&tipc_net_lock);
328 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) { 329 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
329 for (j = 0; j < MAX_BEARERS; j++) { 330 for (j = 0; j < MAX_BEARERS; j++) {
330 b_ptr = &bearers[j]; 331 b_ptr = &tipc_bearers[j];
331 if (b_ptr->active && (b_ptr->media == m_ptr)) { 332 if (b_ptr->active && (b_ptr->media == m_ptr)) {
332 cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME, 333 tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
333 b_ptr->publ.name, 334 b_ptr->publ.name,
334 strlen(b_ptr->publ.name) + 1); 335 strlen(b_ptr->publ.name) + 1);
335 } 336 }
336 } 337 }
337 } 338 }
338 read_unlock_bh(&net_lock); 339 read_unlock_bh(&tipc_net_lock);
339 return buf; 340 return buf;
340} 341}
341 342
342void bearer_add_dest(struct bearer *b_ptr, u32 dest) 343void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest)
343{ 344{
344 nmap_add(&b_ptr->nodes, dest); 345 tipc_nmap_add(&b_ptr->nodes, dest);
345 disc_update_link_req(b_ptr->link_req); 346 tipc_disc_update_link_req(b_ptr->link_req);
346 bcbearer_sort(); 347 tipc_bcbearer_sort();
347} 348}
348 349
349void bearer_remove_dest(struct bearer *b_ptr, u32 dest) 350void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
350{ 351{
351 nmap_remove(&b_ptr->nodes, dest); 352 tipc_nmap_remove(&b_ptr->nodes, dest);
352 disc_update_link_req(b_ptr->link_req); 353 tipc_disc_update_link_req(b_ptr->link_req);
353 bcbearer_sort(); 354 tipc_bcbearer_sort();
354} 355}
355 356
356/* 357/*
357 * bearer_push(): Resolve bearer congestion. Force the waiting 358 * bearer_push(): Resolve bearer congestion. Force the waiting
358 * links to push out their unsent packets, one packet per link 359 * links to push out their unsent packets, one packet per link
359 * per iteration, until all packets are gone or congestion reoccurs. 360 * per iteration, until all packets are gone or congestion reoccurs.
360 * 'net_lock' is read_locked when this function is called 361 * 'tipc_net_lock' is read_locked when this function is called
361 * bearer.lock must be taken before calling 362 * bearer.lock must be taken before calling
362 * Returns binary true(1) ore false(0) 363 * Returns binary true(1) ore false(0)
363 */ 364 */
@@ -371,7 +372,7 @@ static int bearer_push(struct bearer *b_ptr)
371 372
372 while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) { 373 while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
373 list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) { 374 list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
374 res = link_push_packet(ln); 375 res = tipc_link_push_packet(ln);
375 if (res == PUSH_FAILED) 376 if (res == PUSH_FAILED)
376 break; 377 break;
377 if (res == PUSH_FINISHED) 378 if (res == PUSH_FINISHED)
@@ -381,7 +382,7 @@ static int bearer_push(struct bearer *b_ptr)
381 return list_empty(&b_ptr->cong_links); 382 return list_empty(&b_ptr->cong_links);
382} 383}
383 384
384void bearer_lock_push(struct bearer *b_ptr) 385void tipc_bearer_lock_push(struct bearer *b_ptr)
385{ 386{
386 int res; 387 int res;
387 388
@@ -389,7 +390,7 @@ void bearer_lock_push(struct bearer *b_ptr)
389 res = bearer_push(b_ptr); 390 res = bearer_push(b_ptr);
390 spin_unlock_bh(&b_ptr->publ.lock); 391 spin_unlock_bh(&b_ptr->publ.lock);
391 if (res) 392 if (res)
392 bcbearer_push(); 393 tipc_bcbearer_push();
393} 394}
394 395
395 396
@@ -404,7 +405,7 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
404 spin_lock_bh(&b_ptr->publ.lock); 405 spin_lock_bh(&b_ptr->publ.lock);
405 b_ptr->continue_count++; 406 b_ptr->continue_count++;
406 if (!list_empty(&b_ptr->cong_links)) 407 if (!list_empty(&b_ptr->cong_links))
407 k_signal((Handler)bearer_lock_push, (unsigned long)b_ptr); 408 tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
408 b_ptr->publ.blocked = 0; 409 b_ptr->publ.blocked = 0;
409 spin_unlock_bh(&b_ptr->publ.lock); 410 spin_unlock_bh(&b_ptr->publ.lock);
410} 411}
@@ -413,11 +414,11 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
413 * Schedule link for sending of messages after the bearer 414 * Schedule link for sending of messages after the bearer
414 * has been deblocked by 'continue()'. This method is called 415 * has been deblocked by 'continue()'. This method is called
415 * when somebody tries to send a message via this link while 416 * when somebody tries to send a message via this link while
416 * the bearer is congested. 'net_lock' is in read_lock here 417 * the bearer is congested. 'tipc_net_lock' is in read_lock here
417 * bearer.lock is busy 418 * bearer.lock is busy
418 */ 419 */
419 420
420static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr) 421static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
421{ 422{
422 list_move_tail(&l_ptr->link_list, &b_ptr->cong_links); 423 list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
423} 424}
@@ -426,24 +427,24 @@ static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
426 * Schedule link for sending of messages after the bearer 427 * Schedule link for sending of messages after the bearer
427 * has been deblocked by 'continue()'. This method is called 428 * has been deblocked by 'continue()'. This method is called
428 * when somebody tries to send a message via this link while 429 * when somebody tries to send a message via this link while
429 * the bearer is congested. 'net_lock' is in read_lock here, 430 * the bearer is congested. 'tipc_net_lock' is in read_lock here,
430 * bearer.lock is free 431 * bearer.lock is free
431 */ 432 */
432 433
433void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr) 434void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
434{ 435{
435 spin_lock_bh(&b_ptr->publ.lock); 436 spin_lock_bh(&b_ptr->publ.lock);
436 bearer_schedule_unlocked(b_ptr, l_ptr); 437 tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
437 spin_unlock_bh(&b_ptr->publ.lock); 438 spin_unlock_bh(&b_ptr->publ.lock);
438} 439}
439 440
440 441
441/* 442/*
442 * bearer_resolve_congestion(): Check if there is bearer congestion, 443 * tipc_bearer_resolve_congestion(): Check if there is bearer congestion,
443 * and if there is, try to resolve it before returning. 444 * and if there is, try to resolve it before returning.
444 * 'net_lock' is read_locked when this function is called 445 * 'tipc_net_lock' is read_locked when this function is called
445 */ 446 */
446int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr) 447int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
447{ 448{
448 int res = 1; 449 int res = 1;
449 450
@@ -451,7 +452,7 @@ int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
451 return 1; 452 return 1;
452 spin_lock_bh(&b_ptr->publ.lock); 453 spin_lock_bh(&b_ptr->publ.lock);
453 if (!bearer_push(b_ptr)) { 454 if (!bearer_push(b_ptr)) {
454 bearer_schedule_unlocked(b_ptr, l_ptr); 455 tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
455 res = 0; 456 res = 0;
456 } 457 }
457 spin_unlock_bh(&b_ptr->publ.lock); 458 spin_unlock_bh(&b_ptr->publ.lock);
@@ -476,14 +477,19 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
476 477
477 if (tipc_mode != TIPC_NET_MODE) 478 if (tipc_mode != TIPC_NET_MODE)
478 return -ENOPROTOOPT; 479 return -ENOPROTOOPT;
480
479 if (!bearer_name_validate(name, &b_name) || 481 if (!bearer_name_validate(name, &b_name) ||
480 !addr_domain_valid(bcast_scope) || 482 !tipc_addr_domain_valid(bcast_scope) ||
481 !in_scope(bcast_scope, tipc_own_addr) || 483 !in_scope(bcast_scope, tipc_own_addr))
482 (priority > TIPC_NUM_LINK_PRI)) 484 return -EINVAL;
485
486 if ((priority < TIPC_MIN_LINK_PRI ||
487 priority > TIPC_MAX_LINK_PRI) &&
488 (priority != TIPC_MEDIA_LINK_PRI))
483 return -EINVAL; 489 return -EINVAL;
484 490
485 write_lock_bh(&net_lock); 491 write_lock_bh(&tipc_net_lock);
486 if (!bearers) 492 if (!tipc_bearers)
487 goto failed; 493 goto failed;
488 494
489 m_ptr = media_find(b_name.media_name); 495 m_ptr = media_find(b_name.media_name);
@@ -491,22 +497,23 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
491 warn("No media <%s>\n", b_name.media_name); 497 warn("No media <%s>\n", b_name.media_name);
492 goto failed; 498 goto failed;
493 } 499 }
494 if (priority == TIPC_NUM_LINK_PRI) 500
501 if (priority == TIPC_MEDIA_LINK_PRI)
495 priority = m_ptr->priority; 502 priority = m_ptr->priority;
496 503
497restart: 504restart:
498 bearer_id = MAX_BEARERS; 505 bearer_id = MAX_BEARERS;
499 with_this_prio = 1; 506 with_this_prio = 1;
500 for (i = MAX_BEARERS; i-- != 0; ) { 507 for (i = MAX_BEARERS; i-- != 0; ) {
501 if (!bearers[i].active) { 508 if (!tipc_bearers[i].active) {
502 bearer_id = i; 509 bearer_id = i;
503 continue; 510 continue;
504 } 511 }
505 if (!strcmp(name, bearers[i].publ.name)) { 512 if (!strcmp(name, tipc_bearers[i].publ.name)) {
506 warn("Bearer <%s> already enabled\n", name); 513 warn("Bearer <%s> already enabled\n", name);
507 goto failed; 514 goto failed;
508 } 515 }
509 if ((bearers[i].priority == priority) && 516 if ((tipc_bearers[i].priority == priority) &&
510 (++with_this_prio > 2)) { 517 (++with_this_prio > 2)) {
511 if (priority-- == 0) { 518 if (priority-- == 0) {
512 warn("Third bearer <%s> with priority %u, unable to lower to %u\n", 519 warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
@@ -523,7 +530,7 @@ restart:
523 goto failed; 530 goto failed;
524 } 531 }
525 532
526 b_ptr = &bearers[bearer_id]; 533 b_ptr = &tipc_bearers[bearer_id];
527 memset(b_ptr, 0, sizeof(struct bearer)); 534 memset(b_ptr, 0, sizeof(struct bearer));
528 535
529 strcpy(b_ptr->publ.name, name); 536 strcpy(b_ptr->publ.name, name);
@@ -542,16 +549,16 @@ restart:
542 INIT_LIST_HEAD(&b_ptr->cong_links); 549 INIT_LIST_HEAD(&b_ptr->cong_links);
543 INIT_LIST_HEAD(&b_ptr->links); 550 INIT_LIST_HEAD(&b_ptr->links);
544 if (m_ptr->bcast) { 551 if (m_ptr->bcast) {
545 b_ptr->link_req = disc_init_link_req(b_ptr, &m_ptr->bcast_addr, 552 b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
546 bcast_scope, 2); 553 bcast_scope, 2);
547 } 554 }
548 b_ptr->publ.lock = SPIN_LOCK_UNLOCKED; 555 b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
549 write_unlock_bh(&net_lock); 556 write_unlock_bh(&tipc_net_lock);
550 info("Enabled bearer <%s>, discovery domain %s\n", 557 info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
551 name, addr_string_fill(addr_string, bcast_scope)); 558 name, addr_string_fill(addr_string, bcast_scope), priority);
552 return 0; 559 return 0;
553failed: 560failed:
554 write_unlock_bh(&net_lock); 561 write_unlock_bh(&tipc_net_lock);
555 return res; 562 return res;
556} 563}
557 564
@@ -569,11 +576,11 @@ int tipc_block_bearer(const char *name)
569 if (tipc_mode != TIPC_NET_MODE) 576 if (tipc_mode != TIPC_NET_MODE)
570 return -ENOPROTOOPT; 577 return -ENOPROTOOPT;
571 578
572 read_lock_bh(&net_lock); 579 read_lock_bh(&tipc_net_lock);
573 b_ptr = bearer_find(name); 580 b_ptr = bearer_find(name);
574 if (!b_ptr) { 581 if (!b_ptr) {
575 warn("Attempt to block unknown bearer <%s>\n", name); 582 warn("Attempt to block unknown bearer <%s>\n", name);
576 read_unlock_bh(&net_lock); 583 read_unlock_bh(&tipc_net_lock);
577 return -EINVAL; 584 return -EINVAL;
578 } 585 }
579 586
@@ -583,11 +590,11 @@ int tipc_block_bearer(const char *name)
583 struct node *n_ptr = l_ptr->owner; 590 struct node *n_ptr = l_ptr->owner;
584 591
585 spin_lock_bh(&n_ptr->lock); 592 spin_lock_bh(&n_ptr->lock);
586 link_reset(l_ptr); 593 tipc_link_reset(l_ptr);
587 spin_unlock_bh(&n_ptr->lock); 594 spin_unlock_bh(&n_ptr->lock);
588 } 595 }
589 spin_unlock_bh(&b_ptr->publ.lock); 596 spin_unlock_bh(&b_ptr->publ.lock);
590 read_unlock_bh(&net_lock); 597 read_unlock_bh(&tipc_net_lock);
591 info("Blocked bearer <%s>\n", name); 598 info("Blocked bearer <%s>\n", name);
592 return TIPC_OK; 599 return TIPC_OK;
593} 600}
@@ -595,7 +602,7 @@ int tipc_block_bearer(const char *name)
595/** 602/**
596 * bearer_disable - 603 * bearer_disable -
597 * 604 *
598 * Note: This routine assumes caller holds net_lock. 605 * Note: This routine assumes caller holds tipc_net_lock.
599 */ 606 */
600 607
601static int bearer_disable(const char *name) 608static int bearer_disable(const char *name)
@@ -613,19 +620,19 @@ static int bearer_disable(const char *name)
613 return -EINVAL; 620 return -EINVAL;
614 } 621 }
615 622
616 disc_stop_link_req(b_ptr->link_req); 623 tipc_disc_stop_link_req(b_ptr->link_req);
617 spin_lock_bh(&b_ptr->publ.lock); 624 spin_lock_bh(&b_ptr->publ.lock);
618 b_ptr->link_req = NULL; 625 b_ptr->link_req = NULL;
619 b_ptr->publ.blocked = 1; 626 b_ptr->publ.blocked = 1;
620 if (b_ptr->media->disable_bearer) { 627 if (b_ptr->media->disable_bearer) {
621 spin_unlock_bh(&b_ptr->publ.lock); 628 spin_unlock_bh(&b_ptr->publ.lock);
622 write_unlock_bh(&net_lock); 629 write_unlock_bh(&tipc_net_lock);
623 b_ptr->media->disable_bearer(&b_ptr->publ); 630 b_ptr->media->disable_bearer(&b_ptr->publ);
624 write_lock_bh(&net_lock); 631 write_lock_bh(&tipc_net_lock);
625 spin_lock_bh(&b_ptr->publ.lock); 632 spin_lock_bh(&b_ptr->publ.lock);
626 } 633 }
627 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 634 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
628 link_delete(l_ptr); 635 tipc_link_delete(l_ptr);
629 } 636 }
630 spin_unlock_bh(&b_ptr->publ.lock); 637 spin_unlock_bh(&b_ptr->publ.lock);
631 info("Disabled bearer <%s>\n", name); 638 info("Disabled bearer <%s>\n", name);
@@ -637,54 +644,54 @@ int tipc_disable_bearer(const char *name)
637{ 644{
638 int res; 645 int res;
639 646
640 write_lock_bh(&net_lock); 647 write_lock_bh(&tipc_net_lock);
641 res = bearer_disable(name); 648 res = bearer_disable(name);
642 write_unlock_bh(&net_lock); 649 write_unlock_bh(&tipc_net_lock);
643 return res; 650 return res;
644} 651}
645 652
646 653
647 654
648int bearer_init(void) 655int tipc_bearer_init(void)
649{ 656{
650 int res; 657 int res;
651 658
652 write_lock_bh(&net_lock); 659 write_lock_bh(&tipc_net_lock);
653 bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC); 660 tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
654 media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC); 661 media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC);
655 if (bearers && media_list) { 662 if (tipc_bearers && media_list) {
656 memset(bearers, 0, MAX_BEARERS * sizeof(struct bearer)); 663 memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer));
657 memset(media_list, 0, MAX_MEDIA * sizeof(struct media)); 664 memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
658 res = TIPC_OK; 665 res = TIPC_OK;
659 } else { 666 } else {
660 kfree(bearers); 667 kfree(tipc_bearers);
661 kfree(media_list); 668 kfree(media_list);
662 bearers = 0; 669 tipc_bearers = 0;
663 media_list = 0; 670 media_list = 0;
664 res = -ENOMEM; 671 res = -ENOMEM;
665 } 672 }
666 write_unlock_bh(&net_lock); 673 write_unlock_bh(&tipc_net_lock);
667 return res; 674 return res;
668} 675}
669 676
670void bearer_stop(void) 677void tipc_bearer_stop(void)
671{ 678{
672 u32 i; 679 u32 i;
673 680
674 if (!bearers) 681 if (!tipc_bearers)
675 return; 682 return;
676 683
677 for (i = 0; i < MAX_BEARERS; i++) { 684 for (i = 0; i < MAX_BEARERS; i++) {
678 if (bearers[i].active) 685 if (tipc_bearers[i].active)
679 bearers[i].publ.blocked = 1; 686 tipc_bearers[i].publ.blocked = 1;
680 } 687 }
681 for (i = 0; i < MAX_BEARERS; i++) { 688 for (i = 0; i < MAX_BEARERS; i++) {
682 if (bearers[i].active) 689 if (tipc_bearers[i].active)
683 bearer_disable(bearers[i].publ.name); 690 bearer_disable(tipc_bearers[i].publ.name);
684 } 691 }
685 kfree(bearers); 692 kfree(tipc_bearers);
686 kfree(media_list); 693 kfree(media_list);
687 bearers = 0; 694 tipc_bearers = 0;
688 media_list = 0; 695 media_list = 0;
689 media_count = 0; 696 media_count = 0;
690} 697}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 21e63d3f018..c4e7c1c3655 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -37,7 +37,7 @@
37#ifndef _TIPC_BEARER_H 37#ifndef _TIPC_BEARER_H
38#define _TIPC_BEARER_H 38#define _TIPC_BEARER_H
39 39
40#include <net/tipc/tipc_bearer.h> 40#include "core.h"
41#include "bcast.h" 41#include "bcast.h"
42 42
43#define MAX_BEARERS 8 43#define MAX_BEARERS 8
@@ -114,26 +114,24 @@ struct bearer_name {
114 114
115struct link; 115struct link;
116 116
117extern struct bearer *bearers; 117extern struct bearer *tipc_bearers;
118 118
119void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); 119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
120struct sk_buff *media_get_names(void); 120struct sk_buff *tipc_media_get_names(void);
121 121
122struct sk_buff *bearer_get_names(void); 122struct sk_buff *tipc_bearer_get_names(void);
123void bearer_add_dest(struct bearer *b_ptr, u32 dest); 123void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest);
124void bearer_remove_dest(struct bearer *b_ptr, u32 dest); 124void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest);
125void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr); 125void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
126struct bearer *bearer_find_interface(const char *if_name); 126struct bearer *tipc_bearer_find_interface(const char *if_name);
127int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr); 127int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
128int bearer_init(void); 128int tipc_bearer_init(void);
129void bearer_stop(void); 129void tipc_bearer_stop(void);
130int bearer_broadcast(struct sk_buff *buf, struct tipc_bearer *b_ptr, 130void tipc_bearer_lock_push(struct bearer *b_ptr);
131 struct tipc_media_addr *dest);
132void bearer_lock_push(struct bearer *b_ptr);
133 131
134 132
135/** 133/**
136 * bearer_send- sends buffer to destination over bearer 134 * tipc_bearer_send- sends buffer to destination over bearer
137 * 135 *
138 * Returns true (1) if successful, or false (0) if unable to send 136 * Returns true (1) if successful, or false (0) if unable to send
139 * 137 *
@@ -150,23 +148,23 @@ void bearer_lock_push(struct bearer *b_ptr);
150 * and let TIPC's link code deal with the undelivered message. 148 * and let TIPC's link code deal with the undelivered message.
151 */ 149 */
152 150
153static inline int bearer_send(struct bearer *b_ptr, struct sk_buff *buf, 151static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
154 struct tipc_media_addr *dest) 152 struct tipc_media_addr *dest)
155{ 153{
156 return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest); 154 return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
157} 155}
158 156
159/** 157/**
160 * bearer_congested - determines if bearer is currently congested 158 * tipc_bearer_congested - determines if bearer is currently congested
161 */ 159 */
162 160
163static inline int bearer_congested(struct bearer *b_ptr, struct link *l_ptr) 161static inline int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
164{ 162{
165 if (unlikely(b_ptr->publ.blocked)) 163 if (unlikely(b_ptr->publ.blocked))
166 return 1; 164 return 1;
167 if (likely(list_empty(&b_ptr->cong_links))) 165 if (likely(list_empty(&b_ptr->cong_links)))
168 return 0; 166 return 0;
169 return !bearer_resolve_congestion(b_ptr, l_ptr); 167 return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
170} 168}
171 169
172#endif 170#endif
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index f0f7bac51d4..ab974ca1937 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -44,15 +44,15 @@
44#include "msg.h" 44#include "msg.h"
45#include "bearer.h" 45#include "bearer.h"
46 46
47void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 47void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
48 u32 lower, u32 upper); 48 u32 lower, u32 upper);
49struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest); 49struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest);
50 50
51struct node **local_nodes = 0; 51struct node **tipc_local_nodes = 0;
52struct node_map cluster_bcast_nodes = {0,{0,}}; 52struct node_map tipc_cltr_bcast_nodes = {0,{0,}};
53u32 highest_allowed_slave = 0; 53u32 tipc_highest_allowed_slave = 0;
54 54
55struct cluster *cluster_create(u32 addr) 55struct cluster *tipc_cltr_create(u32 addr)
56{ 56{
57 struct _zone *z_ptr; 57 struct _zone *z_ptr;
58 struct cluster *c_ptr; 58 struct cluster *c_ptr;
@@ -77,16 +77,16 @@ struct cluster *cluster_create(u32 addr)
77 } 77 }
78 memset(c_ptr->nodes, 0, alloc); 78 memset(c_ptr->nodes, 0, alloc);
79 if (in_own_cluster(addr)) 79 if (in_own_cluster(addr))
80 local_nodes = c_ptr->nodes; 80 tipc_local_nodes = c_ptr->nodes;
81 c_ptr->highest_slave = LOWEST_SLAVE - 1; 81 c_ptr->highest_slave = LOWEST_SLAVE - 1;
82 c_ptr->highest_node = 0; 82 c_ptr->highest_node = 0;
83 83
84 z_ptr = zone_find(tipc_zone(addr)); 84 z_ptr = tipc_zone_find(tipc_zone(addr));
85 if (z_ptr == NULL) { 85 if (z_ptr == NULL) {
86 z_ptr = zone_create(addr); 86 z_ptr = tipc_zone_create(addr);
87 } 87 }
88 if (z_ptr != NULL) { 88 if (z_ptr != NULL) {
89 zone_attach_cluster(z_ptr, c_ptr); 89 tipc_zone_attach_cluster(z_ptr, c_ptr);
90 c_ptr->owner = z_ptr; 90 c_ptr->owner = z_ptr;
91 } 91 }
92 else { 92 else {
@@ -97,23 +97,23 @@ struct cluster *cluster_create(u32 addr)
97 return c_ptr; 97 return c_ptr;
98} 98}
99 99
100void cluster_delete(struct cluster *c_ptr) 100void tipc_cltr_delete(struct cluster *c_ptr)
101{ 101{
102 u32 n_num; 102 u32 n_num;
103 103
104 if (!c_ptr) 104 if (!c_ptr)
105 return; 105 return;
106 for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) { 106 for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
107 node_delete(c_ptr->nodes[n_num]); 107 tipc_node_delete(c_ptr->nodes[n_num]);
108 } 108 }
109 for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) { 109 for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
110 node_delete(c_ptr->nodes[n_num]); 110 tipc_node_delete(c_ptr->nodes[n_num]);
111 } 111 }
112 kfree(c_ptr->nodes); 112 kfree(c_ptr->nodes);
113 kfree(c_ptr); 113 kfree(c_ptr);
114} 114}
115 115
116u32 cluster_next_node(struct cluster *c_ptr, u32 addr) 116u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr)
117{ 117{
118 struct node *n_ptr; 118 struct node *n_ptr;
119 u32 n_num = tipc_node(addr) + 1; 119 u32 n_num = tipc_node(addr) + 1;
@@ -122,24 +122,24 @@ u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
122 return addr; 122 return addr;
123 for (; n_num <= c_ptr->highest_node; n_num++) { 123 for (; n_num <= c_ptr->highest_node; n_num++) {
124 n_ptr = c_ptr->nodes[n_num]; 124 n_ptr = c_ptr->nodes[n_num];
125 if (n_ptr && node_has_active_links(n_ptr)) 125 if (n_ptr && tipc_node_has_active_links(n_ptr))
126 return n_ptr->addr; 126 return n_ptr->addr;
127 } 127 }
128 for (n_num = 1; n_num < tipc_node(addr); n_num++) { 128 for (n_num = 1; n_num < tipc_node(addr); n_num++) {
129 n_ptr = c_ptr->nodes[n_num]; 129 n_ptr = c_ptr->nodes[n_num];
130 if (n_ptr && node_has_active_links(n_ptr)) 130 if (n_ptr && tipc_node_has_active_links(n_ptr))
131 return n_ptr->addr; 131 return n_ptr->addr;
132 } 132 }
133 return 0; 133 return 0;
134} 134}
135 135
136void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr) 136void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr)
137{ 137{
138 u32 n_num = tipc_node(n_ptr->addr); 138 u32 n_num = tipc_node(n_ptr->addr);
139 u32 max_n_num = tipc_max_nodes; 139 u32 max_n_num = tipc_max_nodes;
140 140
141 if (in_own_cluster(n_ptr->addr)) 141 if (in_own_cluster(n_ptr->addr))
142 max_n_num = highest_allowed_slave; 142 max_n_num = tipc_highest_allowed_slave;
143 assert(n_num > 0); 143 assert(n_num > 0);
144 assert(n_num <= max_n_num); 144 assert(n_num <= max_n_num);
145 assert(c_ptr->nodes[n_num] == 0); 145 assert(c_ptr->nodes[n_num] == 0);
@@ -149,12 +149,12 @@ void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
149} 149}
150 150
151/** 151/**
152 * cluster_select_router - select router to a cluster 152 * tipc_cltr_select_router - select router to a cluster
153 * 153 *
154 * Uses deterministic and fair algorithm. 154 * Uses deterministic and fair algorithm.
155 */ 155 */
156 156
157u32 cluster_select_router(struct cluster *c_ptr, u32 ref) 157u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref)
158{ 158{
159 u32 n_num; 159 u32 n_num;
160 u32 ulim = c_ptr->highest_node; 160 u32 ulim = c_ptr->highest_node;
@@ -174,29 +174,29 @@ u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
174 174
175 /* Lookup upwards with wrap-around */ 175 /* Lookup upwards with wrap-around */
176 do { 176 do {
177 if (node_is_up(c_ptr->nodes[n_num])) 177 if (tipc_node_is_up(c_ptr->nodes[n_num]))
178 break; 178 break;
179 } while (++n_num <= ulim); 179 } while (++n_num <= ulim);
180 if (n_num > ulim) { 180 if (n_num > ulim) {
181 n_num = 1; 181 n_num = 1;
182 do { 182 do {
183 if (node_is_up(c_ptr->nodes[n_num])) 183 if (tipc_node_is_up(c_ptr->nodes[n_num]))
184 break; 184 break;
185 } while (++n_num < tstart); 185 } while (++n_num < tstart);
186 if (n_num == tstart) 186 if (n_num == tstart)
187 return 0; 187 return 0;
188 } 188 }
189 assert(n_num <= ulim); 189 assert(n_num <= ulim);
190 return node_select_router(c_ptr->nodes[n_num], ref); 190 return tipc_node_select_router(c_ptr->nodes[n_num], ref);
191} 191}
192 192
193/** 193/**
194 * cluster_select_node - select destination node within a remote cluster 194 * tipc_cltr_select_node - select destination node within a remote cluster
195 * 195 *
196 * Uses deterministic and fair algorithm. 196 * Uses deterministic and fair algorithm.
197 */ 197 */
198 198
199struct node *cluster_select_node(struct cluster *c_ptr, u32 selector) 199struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector)
200{ 200{
201 u32 n_num; 201 u32 n_num;
202 u32 mask = tipc_max_nodes; 202 u32 mask = tipc_max_nodes;
@@ -215,11 +215,11 @@ struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
215 215
216 /* Lookup upwards with wrap-around */ 216 /* Lookup upwards with wrap-around */
217 for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) { 217 for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
218 if (node_has_active_links(c_ptr->nodes[n_num])) 218 if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
219 return c_ptr->nodes[n_num]; 219 return c_ptr->nodes[n_num];
220 } 220 }
221 for (n_num = 1; n_num < start_entry; n_num++) { 221 for (n_num = 1; n_num < start_entry; n_num++) {
222 if (node_has_active_links(c_ptr->nodes[n_num])) 222 if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
223 return c_ptr->nodes[n_num]; 223 return c_ptr->nodes[n_num];
224 } 224 }
225 return 0; 225 return 0;
@@ -229,7 +229,7 @@ struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
229 * Routing table management: See description in node.c 229 * Routing table management: See description in node.c
230 */ 230 */
231 231
232struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest) 232struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
233{ 233{
234 u32 size = INT_H_SIZE + data_size; 234 u32 size = INT_H_SIZE + data_size;
235 struct sk_buff *buf = buf_acquire(size); 235 struct sk_buff *buf = buf_acquire(size);
@@ -243,39 +243,39 @@ struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
243 return buf; 243 return buf;
244} 244}
245 245
246void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest, 246void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest,
247 u32 lower, u32 upper) 247 u32 lower, u32 upper)
248{ 248{
249 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr); 249 struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
250 struct tipc_msg *msg; 250 struct tipc_msg *msg;
251 251
252 if (buf) { 252 if (buf) {
253 msg = buf_msg(buf); 253 msg = buf_msg(buf);
254 msg_set_remote_node(msg, dest); 254 msg_set_remote_node(msg, dest);
255 msg_set_type(msg, ROUTE_ADDITION); 255 msg_set_type(msg, ROUTE_ADDITION);
256 cluster_multicast(c_ptr, buf, lower, upper); 256 tipc_cltr_multicast(c_ptr, buf, lower, upper);
257 } else { 257 } else {
258 warn("Memory squeeze: broadcast of new route failed\n"); 258 warn("Memory squeeze: broadcast of new route failed\n");
259 } 259 }
260} 260}
261 261
262void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest, 262void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest,
263 u32 lower, u32 upper) 263 u32 lower, u32 upper)
264{ 264{
265 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr); 265 struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
266 struct tipc_msg *msg; 266 struct tipc_msg *msg;
267 267
268 if (buf) { 268 if (buf) {
269 msg = buf_msg(buf); 269 msg = buf_msg(buf);
270 msg_set_remote_node(msg, dest); 270 msg_set_remote_node(msg, dest);
271 msg_set_type(msg, ROUTE_REMOVAL); 271 msg_set_type(msg, ROUTE_REMOVAL);
272 cluster_multicast(c_ptr, buf, lower, upper); 272 tipc_cltr_multicast(c_ptr, buf, lower, upper);
273 } else { 273 } else {
274 warn("Memory squeeze: broadcast of lost route failed\n"); 274 warn("Memory squeeze: broadcast of lost route failed\n");
275 } 275 }
276} 276}
277 277
278void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest) 278void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest)
279{ 279{
280 struct sk_buff *buf; 280 struct sk_buff *buf;
281 struct tipc_msg *msg; 281 struct tipc_msg *msg;
@@ -288,21 +288,21 @@ void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
288 assert(in_own_cluster(c_ptr->addr)); 288 assert(in_own_cluster(c_ptr->addr));
289 if (highest <= LOWEST_SLAVE) 289 if (highest <= LOWEST_SLAVE)
290 return; 290 return;
291 buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1, 291 buf = tipc_cltr_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
292 c_ptr->addr); 292 c_ptr->addr);
293 if (buf) { 293 if (buf) {
294 msg = buf_msg(buf); 294 msg = buf_msg(buf);
295 msg_set_remote_node(msg, c_ptr->addr); 295 msg_set_remote_node(msg, c_ptr->addr);
296 msg_set_type(msg, SLAVE_ROUTING_TABLE); 296 msg_set_type(msg, SLAVE_ROUTING_TABLE);
297 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) { 297 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
298 if (c_ptr->nodes[n_num] && 298 if (c_ptr->nodes[n_num] &&
299 node_has_active_links(c_ptr->nodes[n_num])) { 299 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
300 send = 1; 300 send = 1;
301 msg_set_dataoctet(msg, n_num); 301 msg_set_dataoctet(msg, n_num);
302 } 302 }
303 } 303 }
304 if (send) 304 if (send)
305 link_send(buf, dest, dest); 305 tipc_link_send(buf, dest, dest);
306 else 306 else
307 buf_discard(buf); 307 buf_discard(buf);
308 } else { 308 } else {
@@ -310,7 +310,7 @@ void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
310 } 310 }
311} 311}
312 312
313void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest) 313void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest)
314{ 314{
315 struct sk_buff *buf; 315 struct sk_buff *buf;
316 struct tipc_msg *msg; 316 struct tipc_msg *msg;
@@ -323,20 +323,20 @@ void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
323 assert(!is_slave(dest)); 323 assert(!is_slave(dest));
324 assert(in_own_cluster(dest)); 324 assert(in_own_cluster(dest));
325 highest = c_ptr->highest_node; 325 highest = c_ptr->highest_node;
326 buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr); 326 buf = tipc_cltr_prepare_routing_msg(highest + 1, c_ptr->addr);
327 if (buf) { 327 if (buf) {
328 msg = buf_msg(buf); 328 msg = buf_msg(buf);
329 msg_set_remote_node(msg, c_ptr->addr); 329 msg_set_remote_node(msg, c_ptr->addr);
330 msg_set_type(msg, EXT_ROUTING_TABLE); 330 msg_set_type(msg, EXT_ROUTING_TABLE);
331 for (n_num = 1; n_num <= highest; n_num++) { 331 for (n_num = 1; n_num <= highest; n_num++) {
332 if (c_ptr->nodes[n_num] && 332 if (c_ptr->nodes[n_num] &&
333 node_has_active_links(c_ptr->nodes[n_num])) { 333 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
334 send = 1; 334 send = 1;
335 msg_set_dataoctet(msg, n_num); 335 msg_set_dataoctet(msg, n_num);
336 } 336 }
337 } 337 }
338 if (send) 338 if (send)
339 link_send(buf, dest, dest); 339 tipc_link_send(buf, dest, dest);
340 else 340 else
341 buf_discard(buf); 341 buf_discard(buf);
342 } else { 342 } else {
@@ -344,7 +344,7 @@ void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
344 } 344 }
345} 345}
346 346
347void cluster_send_local_routes(struct cluster *c_ptr, u32 dest) 347void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
348{ 348{
349 struct sk_buff *buf; 349 struct sk_buff *buf;
350 struct tipc_msg *msg; 350 struct tipc_msg *msg;
@@ -354,20 +354,20 @@ void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
354 354
355 assert(is_slave(dest)); 355 assert(is_slave(dest));
356 assert(in_own_cluster(c_ptr->addr)); 356 assert(in_own_cluster(c_ptr->addr));
357 buf = cluster_prepare_routing_msg(highest, c_ptr->addr); 357 buf = tipc_cltr_prepare_routing_msg(highest, c_ptr->addr);
358 if (buf) { 358 if (buf) {
359 msg = buf_msg(buf); 359 msg = buf_msg(buf);
360 msg_set_remote_node(msg, c_ptr->addr); 360 msg_set_remote_node(msg, c_ptr->addr);
361 msg_set_type(msg, LOCAL_ROUTING_TABLE); 361 msg_set_type(msg, LOCAL_ROUTING_TABLE);
362 for (n_num = 1; n_num <= highest; n_num++) { 362 for (n_num = 1; n_num <= highest; n_num++) {
363 if (c_ptr->nodes[n_num] && 363 if (c_ptr->nodes[n_num] &&
364 node_has_active_links(c_ptr->nodes[n_num])) { 364 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
365 send = 1; 365 send = 1;
366 msg_set_dataoctet(msg, n_num); 366 msg_set_dataoctet(msg, n_num);
367 } 367 }
368 } 368 }
369 if (send) 369 if (send)
370 link_send(buf, dest, dest); 370 tipc_link_send(buf, dest, dest);
371 else 371 else
372 buf_discard(buf); 372 buf_discard(buf);
373 } else { 373 } else {
@@ -375,7 +375,7 @@ void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
375 } 375 }
376} 376}
377 377
378void cluster_recv_routing_table(struct sk_buff *buf) 378void tipc_cltr_recv_routing_table(struct sk_buff *buf)
379{ 379{
380 struct tipc_msg *msg = buf_msg(buf); 380 struct tipc_msg *msg = buf_msg(buf);
381 struct cluster *c_ptr; 381 struct cluster *c_ptr;
@@ -388,9 +388,9 @@ void cluster_recv_routing_table(struct sk_buff *buf)
388 u32 c_num; 388 u32 c_num;
389 u32 n_num; 389 u32 n_num;
390 390
391 c_ptr = cluster_find(rem_node); 391 c_ptr = tipc_cltr_find(rem_node);
392 if (!c_ptr) { 392 if (!c_ptr) {
393 c_ptr = cluster_create(rem_node); 393 c_ptr = tipc_cltr_create(rem_node);
394 if (!c_ptr) { 394 if (!c_ptr) {
395 buf_discard(buf); 395 buf_discard(buf);
396 return; 396 return;
@@ -412,10 +412,10 @@ void cluster_recv_routing_table(struct sk_buff *buf)
412 u32 addr = tipc_addr(z_num, c_num, n_num); 412 u32 addr = tipc_addr(z_num, c_num, n_num);
413 n_ptr = c_ptr->nodes[n_num]; 413 n_ptr = c_ptr->nodes[n_num];
414 if (!n_ptr) { 414 if (!n_ptr) {
415 n_ptr = node_create(addr); 415 n_ptr = tipc_node_create(addr);
416 } 416 }
417 if (n_ptr) 417 if (n_ptr)
418 node_add_router(n_ptr, router); 418 tipc_node_add_router(n_ptr, router);
419 } 419 }
420 } 420 }
421 break; 421 break;
@@ -428,10 +428,10 @@ void cluster_recv_routing_table(struct sk_buff *buf)
428 u32 addr = tipc_addr(z_num, c_num, slave_num); 428 u32 addr = tipc_addr(z_num, c_num, slave_num);
429 n_ptr = c_ptr->nodes[slave_num]; 429 n_ptr = c_ptr->nodes[slave_num];
430 if (!n_ptr) { 430 if (!n_ptr) {
431 n_ptr = node_create(addr); 431 n_ptr = tipc_node_create(addr);
432 } 432 }
433 if (n_ptr) 433 if (n_ptr)
434 node_add_router(n_ptr, router); 434 tipc_node_add_router(n_ptr, router);
435 } 435 }
436 } 436 }
437 break; 437 break;
@@ -445,9 +445,9 @@ void cluster_recv_routing_table(struct sk_buff *buf)
445 } 445 }
446 n_ptr = c_ptr->nodes[tipc_node(rem_node)]; 446 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
447 if (!n_ptr) 447 if (!n_ptr)
448 n_ptr = node_create(rem_node); 448 n_ptr = tipc_node_create(rem_node);
449 if (n_ptr) 449 if (n_ptr)
450 node_add_router(n_ptr, router); 450 tipc_node_add_router(n_ptr, router);
451 break; 451 break;
452 case ROUTE_REMOVAL: 452 case ROUTE_REMOVAL:
453 if (!is_slave(tipc_own_addr)) { 453 if (!is_slave(tipc_own_addr)) {
@@ -459,7 +459,7 @@ void cluster_recv_routing_table(struct sk_buff *buf)
459 } 459 }
460 n_ptr = c_ptr->nodes[tipc_node(rem_node)]; 460 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
461 if (n_ptr) 461 if (n_ptr)
462 node_remove_router(n_ptr, router); 462 tipc_node_remove_router(n_ptr, router);
463 break; 463 break;
464 default: 464 default:
465 assert(!"Illegal routing manager message received\n"); 465 assert(!"Illegal routing manager message received\n");
@@ -467,7 +467,7 @@ void cluster_recv_routing_table(struct sk_buff *buf)
467 buf_discard(buf); 467 buf_discard(buf);
468} 468}
469 469
470void cluster_remove_as_router(struct cluster *c_ptr, u32 router) 470void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router)
471{ 471{
472 u32 start_entry; 472 u32 start_entry;
473 u32 tstop; 473 u32 tstop;
@@ -486,17 +486,17 @@ void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
486 486
487 for (n_num = start_entry; n_num <= tstop; n_num++) { 487 for (n_num = start_entry; n_num <= tstop; n_num++) {
488 if (c_ptr->nodes[n_num]) { 488 if (c_ptr->nodes[n_num]) {
489 node_remove_router(c_ptr->nodes[n_num], router); 489 tipc_node_remove_router(c_ptr->nodes[n_num], router);
490 } 490 }
491 } 491 }
492} 492}
493 493
494/** 494/**
495 * cluster_multicast - multicast message to local nodes 495 * tipc_cltr_multicast - multicast message to local nodes
496 */ 496 */
497 497
498void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 498void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
499 u32 lower, u32 upper) 499 u32 lower, u32 upper)
500{ 500{
501 struct sk_buff *buf_copy; 501 struct sk_buff *buf_copy;
502 struct node *n_ptr; 502 struct node *n_ptr;
@@ -505,9 +505,9 @@ void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
505 505
506 assert(lower <= upper); 506 assert(lower <= upper);
507 assert(((lower >= 1) && (lower <= tipc_max_nodes)) || 507 assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
508 ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave))); 508 ((lower >= LOWEST_SLAVE) && (lower <= tipc_highest_allowed_slave)));
509 assert(((upper >= 1) && (upper <= tipc_max_nodes)) || 509 assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
510 ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave))); 510 ((upper >= LOWEST_SLAVE) && (upper <= tipc_highest_allowed_slave)));
511 assert(in_own_cluster(c_ptr->addr)); 511 assert(in_own_cluster(c_ptr->addr));
512 512
513 tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node; 513 tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
@@ -515,22 +515,22 @@ void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
515 tstop = upper; 515 tstop = upper;
516 for (n_num = lower; n_num <= tstop; n_num++) { 516 for (n_num = lower; n_num <= tstop; n_num++) {
517 n_ptr = c_ptr->nodes[n_num]; 517 n_ptr = c_ptr->nodes[n_num];
518 if (n_ptr && node_has_active_links(n_ptr)) { 518 if (n_ptr && tipc_node_has_active_links(n_ptr)) {
519 buf_copy = skb_copy(buf, GFP_ATOMIC); 519 buf_copy = skb_copy(buf, GFP_ATOMIC);
520 if (buf_copy == NULL) 520 if (buf_copy == NULL)
521 break; 521 break;
522 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr); 522 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
523 link_send(buf_copy, n_ptr->addr, n_ptr->addr); 523 tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
524 } 524 }
525 } 525 }
526 buf_discard(buf); 526 buf_discard(buf);
527} 527}
528 528
529/** 529/**
530 * cluster_broadcast - broadcast message to all nodes within cluster 530 * tipc_cltr_broadcast - broadcast message to all nodes within cluster
531 */ 531 */
532 532
533void cluster_broadcast(struct sk_buff *buf) 533void tipc_cltr_broadcast(struct sk_buff *buf)
534{ 534{
535 struct sk_buff *buf_copy; 535 struct sk_buff *buf_copy;
536 struct cluster *c_ptr; 536 struct cluster *c_ptr;
@@ -541,7 +541,7 @@ void cluster_broadcast(struct sk_buff *buf)
541 u32 node_type; 541 u32 node_type;
542 542
543 if (tipc_mode == TIPC_NET_MODE) { 543 if (tipc_mode == TIPC_NET_MODE) {
544 c_ptr = cluster_find(tipc_own_addr); 544 c_ptr = tipc_cltr_find(tipc_own_addr);
545 assert(in_own_cluster(c_ptr->addr)); /* For now */ 545 assert(in_own_cluster(c_ptr->addr)); /* For now */
546 546
547 /* Send to standard nodes, then repeat loop sending to slaves */ 547 /* Send to standard nodes, then repeat loop sending to slaves */
@@ -550,14 +550,14 @@ void cluster_broadcast(struct sk_buff *buf)
550 for (node_type = 1; node_type <= 2; node_type++) { 550 for (node_type = 1; node_type <= 2; node_type++) {
551 for (n_num = tstart; n_num <= tstop; n_num++) { 551 for (n_num = tstart; n_num <= tstop; n_num++) {
552 n_ptr = c_ptr->nodes[n_num]; 552 n_ptr = c_ptr->nodes[n_num];
553 if (n_ptr && node_has_active_links(n_ptr)) { 553 if (n_ptr && tipc_node_has_active_links(n_ptr)) {
554 buf_copy = skb_copy(buf, GFP_ATOMIC); 554 buf_copy = skb_copy(buf, GFP_ATOMIC);
555 if (buf_copy == NULL) 555 if (buf_copy == NULL)
556 goto exit; 556 goto exit;
557 msg_set_destnode(buf_msg(buf_copy), 557 msg_set_destnode(buf_msg(buf_copy),
558 n_ptr->addr); 558 n_ptr->addr);
559 link_send(buf_copy, n_ptr->addr, 559 tipc_link_send(buf_copy, n_ptr->addr,
560 n_ptr->addr); 560 n_ptr->addr);
561 } 561 }
562 } 562 }
563 tstart = LOWEST_SLAVE; 563 tstart = LOWEST_SLAVE;
@@ -568,9 +568,9 @@ exit:
568 buf_discard(buf); 568 buf_discard(buf);
569} 569}
570 570
571int cluster_init(void) 571int tipc_cltr_init(void)
572{ 572{
573 highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves; 573 tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
574 return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM; 574 return tipc_cltr_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
575} 575}
576 576
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
index 1ffb095991d..9963642e105 100644
--- a/net/tipc/cluster.h
+++ b/net/tipc/cluster.h
@@ -60,29 +60,29 @@ struct cluster {
60}; 60};
61 61
62 62
63extern struct node **local_nodes; 63extern struct node **tipc_local_nodes;
64extern u32 highest_allowed_slave; 64extern u32 tipc_highest_allowed_slave;
65extern struct node_map cluster_bcast_nodes; 65extern struct node_map tipc_cltr_bcast_nodes;
66 66
67void cluster_remove_as_router(struct cluster *c_ptr, u32 router); 67void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router);
68void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest); 68void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest);
69struct node *cluster_select_node(struct cluster *c_ptr, u32 selector); 69struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector);
70u32 cluster_select_router(struct cluster *c_ptr, u32 ref); 70u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref);
71void cluster_recv_routing_table(struct sk_buff *buf); 71void tipc_cltr_recv_routing_table(struct sk_buff *buf);
72struct cluster *cluster_create(u32 addr); 72struct cluster *tipc_cltr_create(u32 addr);
73void cluster_delete(struct cluster *c_ptr); 73void tipc_cltr_delete(struct cluster *c_ptr);
74void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr); 74void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr);
75void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest); 75void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest);
76void cluster_broadcast(struct sk_buff *buf); 76void tipc_cltr_broadcast(struct sk_buff *buf);
77int cluster_init(void); 77int tipc_cltr_init(void);
78u32 cluster_next_node(struct cluster *c_ptr, u32 addr); 78u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr);
79void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi); 79void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
80void cluster_send_local_routes(struct cluster *c_ptr, u32 dest); 80void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest);
81void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi); 81void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
82 82
83static inline struct cluster *cluster_find(u32 addr) 83static inline struct cluster *tipc_cltr_find(u32 addr)
84{ 84{
85 struct _zone *z_ptr = zone_find(addr); 85 struct _zone *z_ptr = tipc_zone_find(addr);
86 86
87 if (z_ptr) 87 if (z_ptr)
88 return z_ptr->clusters[1]; 88 return z_ptr->clusters[1];
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 8ddef4fce2c..3c8e6740e5a 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -70,13 +70,13 @@ static int req_tlv_space; /* request message TLV area size */
70static int rep_headroom; /* reply message headroom to use */ 70static int rep_headroom; /* reply message headroom to use */
71 71
72 72
73void cfg_link_event(u32 addr, char *name, int up) 73void tipc_cfg_link_event(u32 addr, char *name, int up)
74{ 74{
75 /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */ 75 /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
76} 76}
77 77
78 78
79struct sk_buff *cfg_reply_alloc(int payload_size) 79struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
80{ 80{
81 struct sk_buff *buf; 81 struct sk_buff *buf;
82 82
@@ -86,14 +86,14 @@ struct sk_buff *cfg_reply_alloc(int payload_size)
86 return buf; 86 return buf;
87} 87}
88 88
89int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 89int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
90 void *tlv_data, int tlv_data_size) 90 void *tlv_data, int tlv_data_size)
91{ 91{
92 struct tlv_desc *tlv = (struct tlv_desc *)buf->tail; 92 struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
93 int new_tlv_space = TLV_SPACE(tlv_data_size); 93 int new_tlv_space = TLV_SPACE(tlv_data_size);
94 94
95 if (skb_tailroom(buf) < new_tlv_space) { 95 if (skb_tailroom(buf) < new_tlv_space) {
96 dbg("cfg_append_tlv unable to append TLV\n"); 96 dbg("tipc_cfg_append_tlv unable to append TLV\n");
97 return 0; 97 return 0;
98 } 98 }
99 skb_put(buf, new_tlv_space); 99 skb_put(buf, new_tlv_space);
@@ -104,28 +104,28 @@ int cfg_append_tlv(struct sk_buff *buf, int tlv_type,
104 return 1; 104 return 1;
105} 105}
106 106
107struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value) 107struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
108{ 108{
109 struct sk_buff *buf; 109 struct sk_buff *buf;
110 u32 value_net; 110 u32 value_net;
111 111
112 buf = cfg_reply_alloc(TLV_SPACE(sizeof(value))); 112 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value)));
113 if (buf) { 113 if (buf) {
114 value_net = htonl(value); 114 value_net = htonl(value);
115 cfg_append_tlv(buf, tlv_type, &value_net, 115 tipc_cfg_append_tlv(buf, tlv_type, &value_net,
116 sizeof(value_net)); 116 sizeof(value_net));
117 } 117 }
118 return buf; 118 return buf;
119} 119}
120 120
121struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string) 121struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
122{ 122{
123 struct sk_buff *buf; 123 struct sk_buff *buf;
124 int string_len = strlen(string) + 1; 124 int string_len = strlen(string) + 1;
125 125
126 buf = cfg_reply_alloc(TLV_SPACE(string_len)); 126 buf = tipc_cfg_reply_alloc(TLV_SPACE(string_len));
127 if (buf) 127 if (buf)
128 cfg_append_tlv(buf, tlv_type, string, string_len); 128 tipc_cfg_append_tlv(buf, tlv_type, string, string_len);
129 return buf; 129 return buf;
130} 130}
131 131
@@ -246,7 +246,7 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg,
246 exit: 246 exit:
247 rmsg.result_len = htonl(msg_sect[1].iov_len); 247 rmsg.result_len = htonl(msg_sect[1].iov_len);
248 rmsg.retval = htonl(rv); 248 rmsg.retval = htonl(rv);
249 cfg_respond(msg_sect, 2u, orig); 249 tipc_cfg_respond(msg_sect, 2u, orig);
250} 250}
251#endif 251#endif
252 252
@@ -255,26 +255,26 @@ static struct sk_buff *cfg_enable_bearer(void)
255 struct tipc_bearer_config *args; 255 struct tipc_bearer_config *args;
256 256
257 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG)) 257 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
258 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 258 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
259 259
260 args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area); 260 args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
261 if (tipc_enable_bearer(args->name, 261 if (tipc_enable_bearer(args->name,
262 ntohl(args->detect_scope), 262 ntohl(args->detect_scope),
263 ntohl(args->priority))) 263 ntohl(args->priority)))
264 return cfg_reply_error_string("unable to enable bearer"); 264 return tipc_cfg_reply_error_string("unable to enable bearer");
265 265
266 return cfg_reply_none(); 266 return tipc_cfg_reply_none();
267} 267}
268 268
269static struct sk_buff *cfg_disable_bearer(void) 269static struct sk_buff *cfg_disable_bearer(void)
270{ 270{
271 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME)) 271 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
272 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 272 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
273 273
274 if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area))) 274 if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
275 return cfg_reply_error_string("unable to disable bearer"); 275 return tipc_cfg_reply_error_string("unable to disable bearer");
276 276
277 return cfg_reply_none(); 277 return tipc_cfg_reply_none();
278} 278}
279 279
280static struct sk_buff *cfg_set_own_addr(void) 280static struct sk_buff *cfg_set_own_addr(void)
@@ -282,25 +282,25 @@ static struct sk_buff *cfg_set_own_addr(void)
282 u32 addr; 282 u32 addr;
283 283
284 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 284 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
285 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 285 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
286 286
287 addr = *(u32 *)TLV_DATA(req_tlv_area); 287 addr = *(u32 *)TLV_DATA(req_tlv_area);
288 addr = ntohl(addr); 288 addr = ntohl(addr);
289 if (addr == tipc_own_addr) 289 if (addr == tipc_own_addr)
290 return cfg_reply_none(); 290 return tipc_cfg_reply_none();
291 if (!addr_node_valid(addr)) 291 if (!tipc_addr_node_valid(addr))
292 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 292 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
293 " (node address)"); 293 " (node address)");
294 if (tipc_own_addr) 294 if (tipc_own_addr)
295 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 295 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
296 " (cannot change node address once assigned)"); 296 " (cannot change node address once assigned)");
297 297
298 spin_unlock_bh(&config_lock); 298 spin_unlock_bh(&config_lock);
299 stop_net(); 299 tipc_core_stop_net();
300 tipc_own_addr = addr; 300 tipc_own_addr = addr;
301 start_net(); 301 tipc_core_start_net();
302 spin_lock_bh(&config_lock); 302 spin_lock_bh(&config_lock);
303 return cfg_reply_none(); 303 return tipc_cfg_reply_none();
304} 304}
305 305
306static struct sk_buff *cfg_set_remote_mng(void) 306static struct sk_buff *cfg_set_remote_mng(void)
@@ -308,12 +308,12 @@ static struct sk_buff *cfg_set_remote_mng(void)
308 u32 value; 308 u32 value;
309 309
310 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 310 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
311 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 311 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
312 312
313 value = *(u32 *)TLV_DATA(req_tlv_area); 313 value = *(u32 *)TLV_DATA(req_tlv_area);
314 value = ntohl(value); 314 value = ntohl(value);
315 tipc_remote_management = (value != 0); 315 tipc_remote_management = (value != 0);
316 return cfg_reply_none(); 316 return tipc_cfg_reply_none();
317} 317}
318 318
319static struct sk_buff *cfg_set_max_publications(void) 319static struct sk_buff *cfg_set_max_publications(void)
@@ -321,15 +321,15 @@ static struct sk_buff *cfg_set_max_publications(void)
321 u32 value; 321 u32 value;
322 322
323 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 323 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
324 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 324 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
325 325
326 value = *(u32 *)TLV_DATA(req_tlv_area); 326 value = *(u32 *)TLV_DATA(req_tlv_area);
327 value = ntohl(value); 327 value = ntohl(value);
328 if (value != delimit(value, 1, 65535)) 328 if (value != delimit(value, 1, 65535))
329 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 329 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
330 " (max publications must be 1-65535)"); 330 " (max publications must be 1-65535)");
331 tipc_max_publications = value; 331 tipc_max_publications = value;
332 return cfg_reply_none(); 332 return tipc_cfg_reply_none();
333} 333}
334 334
335static struct sk_buff *cfg_set_max_subscriptions(void) 335static struct sk_buff *cfg_set_max_subscriptions(void)
@@ -337,15 +337,15 @@ static struct sk_buff *cfg_set_max_subscriptions(void)
337 u32 value; 337 u32 value;
338 338
339 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 339 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
340 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 340 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
341 341
342 value = *(u32 *)TLV_DATA(req_tlv_area); 342 value = *(u32 *)TLV_DATA(req_tlv_area);
343 value = ntohl(value); 343 value = ntohl(value);
344 if (value != delimit(value, 1, 65535)) 344 if (value != delimit(value, 1, 65535))
345 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 345 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
346 " (max subscriptions must be 1-65535"); 346 " (max subscriptions must be 1-65535");
347 tipc_max_subscriptions = value; 347 tipc_max_subscriptions = value;
348 return cfg_reply_none(); 348 return tipc_cfg_reply_none();
349} 349}
350 350
351static struct sk_buff *cfg_set_max_ports(void) 351static struct sk_buff *cfg_set_max_ports(void)
@@ -354,31 +354,31 @@ static struct sk_buff *cfg_set_max_ports(void)
354 u32 value; 354 u32 value;
355 355
356 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 356 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
357 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 357 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
358 value = *(u32 *)TLV_DATA(req_tlv_area); 358 value = *(u32 *)TLV_DATA(req_tlv_area);
359 value = ntohl(value); 359 value = ntohl(value);
360 if (value != delimit(value, 127, 65535)) 360 if (value != delimit(value, 127, 65535))
361 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 361 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
362 " (max ports must be 127-65535)"); 362 " (max ports must be 127-65535)");
363 363
364 if (value == tipc_max_ports) 364 if (value == tipc_max_ports)
365 return cfg_reply_none(); 365 return tipc_cfg_reply_none();
366 366
367 if (atomic_read(&tipc_user_count) > 2) 367 if (atomic_read(&tipc_user_count) > 2)
368 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 368 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
369 " (cannot change max ports while TIPC users exist)"); 369 " (cannot change max ports while TIPC users exist)");
370 370
371 spin_unlock_bh(&config_lock); 371 spin_unlock_bh(&config_lock);
372 orig_mode = tipc_get_mode(); 372 orig_mode = tipc_get_mode();
373 if (orig_mode == TIPC_NET_MODE) 373 if (orig_mode == TIPC_NET_MODE)
374 stop_net(); 374 tipc_core_stop_net();
375 stop_core(); 375 tipc_core_stop();
376 tipc_max_ports = value; 376 tipc_max_ports = value;
377 start_core(); 377 tipc_core_start();
378 if (orig_mode == TIPC_NET_MODE) 378 if (orig_mode == TIPC_NET_MODE)
379 start_net(); 379 tipc_core_start_net();
380 spin_lock_bh(&config_lock); 380 spin_lock_bh(&config_lock);
381 return cfg_reply_none(); 381 return tipc_cfg_reply_none();
382} 382}
383 383
384static struct sk_buff *set_net_max(int value, int *parameter) 384static struct sk_buff *set_net_max(int value, int *parameter)
@@ -388,13 +388,13 @@ static struct sk_buff *set_net_max(int value, int *parameter)
388 if (value != *parameter) { 388 if (value != *parameter) {
389 orig_mode = tipc_get_mode(); 389 orig_mode = tipc_get_mode();
390 if (orig_mode == TIPC_NET_MODE) 390 if (orig_mode == TIPC_NET_MODE)
391 stop_net(); 391 tipc_core_stop_net();
392 *parameter = value; 392 *parameter = value;
393 if (orig_mode == TIPC_NET_MODE) 393 if (orig_mode == TIPC_NET_MODE)
394 start_net(); 394 tipc_core_start_net();
395 } 395 }
396 396
397 return cfg_reply_none(); 397 return tipc_cfg_reply_none();
398} 398}
399 399
400static struct sk_buff *cfg_set_max_zones(void) 400static struct sk_buff *cfg_set_max_zones(void)
@@ -402,12 +402,12 @@ static struct sk_buff *cfg_set_max_zones(void)
402 u32 value; 402 u32 value;
403 403
404 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 404 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
405 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 405 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
406 value = *(u32 *)TLV_DATA(req_tlv_area); 406 value = *(u32 *)TLV_DATA(req_tlv_area);
407 value = ntohl(value); 407 value = ntohl(value);
408 if (value != delimit(value, 1, 255)) 408 if (value != delimit(value, 1, 255))
409 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 409 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
410 " (max zones must be 1-255)"); 410 " (max zones must be 1-255)");
411 return set_net_max(value, &tipc_max_zones); 411 return set_net_max(value, &tipc_max_zones);
412} 412}
413 413
@@ -416,13 +416,13 @@ static struct sk_buff *cfg_set_max_clusters(void)
416 u32 value; 416 u32 value;
417 417
418 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 418 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
419 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 419 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
420 value = *(u32 *)TLV_DATA(req_tlv_area); 420 value = *(u32 *)TLV_DATA(req_tlv_area);
421 value = ntohl(value); 421 value = ntohl(value);
422 if (value != 1) 422 if (value != 1)
423 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 423 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
424 " (max clusters fixed at 1)"); 424 " (max clusters fixed at 1)");
425 return cfg_reply_none(); 425 return tipc_cfg_reply_none();
426} 426}
427 427
428static struct sk_buff *cfg_set_max_nodes(void) 428static struct sk_buff *cfg_set_max_nodes(void)
@@ -430,12 +430,12 @@ static struct sk_buff *cfg_set_max_nodes(void)
430 u32 value; 430 u32 value;
431 431
432 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 432 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
433 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 433 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
434 value = *(u32 *)TLV_DATA(req_tlv_area); 434 value = *(u32 *)TLV_DATA(req_tlv_area);
435 value = ntohl(value); 435 value = ntohl(value);
436 if (value != delimit(value, 8, 2047)) 436 if (value != delimit(value, 8, 2047))
437 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 437 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
438 " (max nodes must be 8-2047)"); 438 " (max nodes must be 8-2047)");
439 return set_net_max(value, &tipc_max_nodes); 439 return set_net_max(value, &tipc_max_nodes);
440} 440}
441 441
@@ -444,13 +444,13 @@ static struct sk_buff *cfg_set_max_slaves(void)
444 u32 value; 444 u32 value;
445 445
446 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 446 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
447 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 447 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
448 value = *(u32 *)TLV_DATA(req_tlv_area); 448 value = *(u32 *)TLV_DATA(req_tlv_area);
449 value = ntohl(value); 449 value = ntohl(value);
450 if (value != 0) 450 if (value != 0)
451 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 451 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
452 " (max secondary nodes fixed at 0)"); 452 " (max secondary nodes fixed at 0)");
453 return cfg_reply_none(); 453 return tipc_cfg_reply_none();
454} 454}
455 455
456static struct sk_buff *cfg_set_netid(void) 456static struct sk_buff *cfg_set_netid(void)
@@ -458,22 +458,22 @@ static struct sk_buff *cfg_set_netid(void)
458 u32 value; 458 u32 value;
459 459
460 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 460 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
461 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 461 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
462 value = *(u32 *)TLV_DATA(req_tlv_area); 462 value = *(u32 *)TLV_DATA(req_tlv_area);
463 value = ntohl(value); 463 value = ntohl(value);
464 if (value != delimit(value, 1, 9999)) 464 if (value != delimit(value, 1, 9999))
465 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 465 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
466 " (network id must be 1-9999)"); 466 " (network id must be 1-9999)");
467 467
468 if (tipc_own_addr) 468 if (tipc_own_addr)
469 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 469 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
470 " (cannot change network id once part of network)"); 470 " (cannot change network id once part of network)");
471 471
472 return set_net_max(value, &tipc_net_id); 472 return set_net_max(value, &tipc_net_id);
473} 473}
474 474
475struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area, 475struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
476 int request_space, int reply_headroom) 476 int request_space, int reply_headroom)
477{ 477{
478 struct sk_buff *rep_tlv_buf; 478 struct sk_buff *rep_tlv_buf;
479 479
@@ -490,19 +490,19 @@ struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
490 if (likely(orig_node == tipc_own_addr)) { 490 if (likely(orig_node == tipc_own_addr)) {
491 /* command is permitted */ 491 /* command is permitted */
492 } else if (cmd >= 0x8000) { 492 } else if (cmd >= 0x8000) {
493 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 493 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
494 " (cannot be done remotely)"); 494 " (cannot be done remotely)");
495 goto exit; 495 goto exit;
496 } else if (!tipc_remote_management) { 496 } else if (!tipc_remote_management) {
497 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NO_REMOTE); 497 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
498 goto exit; 498 goto exit;
499 } 499 }
500 else if (cmd >= 0x4000) { 500 else if (cmd >= 0x4000) {
501 u32 domain = 0; 501 u32 domain = 0;
502 502
503 if ((nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) || 503 if ((tipc_nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
504 (domain != orig_node)) { 504 (domain != orig_node)) {
505 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR); 505 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
506 goto exit; 506 goto exit;
507 } 507 }
508 } 508 }
@@ -511,50 +511,50 @@ struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
511 511
512 switch (cmd) { 512 switch (cmd) {
513 case TIPC_CMD_NOOP: 513 case TIPC_CMD_NOOP:
514 rep_tlv_buf = cfg_reply_none(); 514 rep_tlv_buf = tipc_cfg_reply_none();
515 break; 515 break;
516 case TIPC_CMD_GET_NODES: 516 case TIPC_CMD_GET_NODES:
517 rep_tlv_buf = node_get_nodes(req_tlv_area, req_tlv_space); 517 rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space);
518 break; 518 break;
519 case TIPC_CMD_GET_LINKS: 519 case TIPC_CMD_GET_LINKS:
520 rep_tlv_buf = node_get_links(req_tlv_area, req_tlv_space); 520 rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space);
521 break; 521 break;
522 case TIPC_CMD_SHOW_LINK_STATS: 522 case TIPC_CMD_SHOW_LINK_STATS:
523 rep_tlv_buf = link_cmd_show_stats(req_tlv_area, req_tlv_space); 523 rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space);
524 break; 524 break;
525 case TIPC_CMD_RESET_LINK_STATS: 525 case TIPC_CMD_RESET_LINK_STATS:
526 rep_tlv_buf = link_cmd_reset_stats(req_tlv_area, req_tlv_space); 526 rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space);
527 break; 527 break;
528 case TIPC_CMD_SHOW_NAME_TABLE: 528 case TIPC_CMD_SHOW_NAME_TABLE:
529 rep_tlv_buf = nametbl_get(req_tlv_area, req_tlv_space); 529 rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space);
530 break; 530 break;
531 case TIPC_CMD_GET_BEARER_NAMES: 531 case TIPC_CMD_GET_BEARER_NAMES:
532 rep_tlv_buf = bearer_get_names(); 532 rep_tlv_buf = tipc_bearer_get_names();
533 break; 533 break;
534 case TIPC_CMD_GET_MEDIA_NAMES: 534 case TIPC_CMD_GET_MEDIA_NAMES:
535 rep_tlv_buf = media_get_names(); 535 rep_tlv_buf = tipc_media_get_names();
536 break; 536 break;
537 case TIPC_CMD_SHOW_PORTS: 537 case TIPC_CMD_SHOW_PORTS:
538 rep_tlv_buf = port_get_ports(); 538 rep_tlv_buf = tipc_port_get_ports();
539 break; 539 break;
540#if 0 540#if 0
541 case TIPC_CMD_SHOW_PORT_STATS: 541 case TIPC_CMD_SHOW_PORT_STATS:
542 rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space); 542 rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space);
543 break; 543 break;
544 case TIPC_CMD_RESET_PORT_STATS: 544 case TIPC_CMD_RESET_PORT_STATS:
545 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED); 545 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED);
546 break; 546 break;
547#endif 547#endif
548 case TIPC_CMD_SET_LOG_SIZE: 548 case TIPC_CMD_SET_LOG_SIZE:
549 rep_tlv_buf = log_resize(req_tlv_area, req_tlv_space); 549 rep_tlv_buf = tipc_log_resize(req_tlv_area, req_tlv_space);
550 break; 550 break;
551 case TIPC_CMD_DUMP_LOG: 551 case TIPC_CMD_DUMP_LOG:
552 rep_tlv_buf = log_dump(); 552 rep_tlv_buf = tipc_log_dump();
553 break; 553 break;
554 case TIPC_CMD_SET_LINK_TOL: 554 case TIPC_CMD_SET_LINK_TOL:
555 case TIPC_CMD_SET_LINK_PRI: 555 case TIPC_CMD_SET_LINK_PRI:
556 case TIPC_CMD_SET_LINK_WINDOW: 556 case TIPC_CMD_SET_LINK_WINDOW:
557 rep_tlv_buf = link_cmd_config(req_tlv_area, req_tlv_space, cmd); 557 rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd);
558 break; 558 break;
559 case TIPC_CMD_ENABLE_BEARER: 559 case TIPC_CMD_ENABLE_BEARER:
560 rep_tlv_buf = cfg_enable_bearer(); 560 rep_tlv_buf = cfg_enable_bearer();
@@ -593,31 +593,31 @@ struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
593 rep_tlv_buf = cfg_set_netid(); 593 rep_tlv_buf = cfg_set_netid();
594 break; 594 break;
595 case TIPC_CMD_GET_REMOTE_MNG: 595 case TIPC_CMD_GET_REMOTE_MNG:
596 rep_tlv_buf = cfg_reply_unsigned(tipc_remote_management); 596 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_remote_management);
597 break; 597 break;
598 case TIPC_CMD_GET_MAX_PORTS: 598 case TIPC_CMD_GET_MAX_PORTS:
599 rep_tlv_buf = cfg_reply_unsigned(tipc_max_ports); 599 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
600 break; 600 break;
601 case TIPC_CMD_GET_MAX_PUBL: 601 case TIPC_CMD_GET_MAX_PUBL:
602 rep_tlv_buf = cfg_reply_unsigned(tipc_max_publications); 602 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications);
603 break; 603 break;
604 case TIPC_CMD_GET_MAX_SUBSCR: 604 case TIPC_CMD_GET_MAX_SUBSCR:
605 rep_tlv_buf = cfg_reply_unsigned(tipc_max_subscriptions); 605 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
606 break; 606 break;
607 case TIPC_CMD_GET_MAX_ZONES: 607 case TIPC_CMD_GET_MAX_ZONES:
608 rep_tlv_buf = cfg_reply_unsigned(tipc_max_zones); 608 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_zones);
609 break; 609 break;
610 case TIPC_CMD_GET_MAX_CLUSTERS: 610 case TIPC_CMD_GET_MAX_CLUSTERS:
611 rep_tlv_buf = cfg_reply_unsigned(tipc_max_clusters); 611 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_clusters);
612 break; 612 break;
613 case TIPC_CMD_GET_MAX_NODES: 613 case TIPC_CMD_GET_MAX_NODES:
614 rep_tlv_buf = cfg_reply_unsigned(tipc_max_nodes); 614 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_nodes);
615 break; 615 break;
616 case TIPC_CMD_GET_MAX_SLAVES: 616 case TIPC_CMD_GET_MAX_SLAVES:
617 rep_tlv_buf = cfg_reply_unsigned(tipc_max_slaves); 617 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_slaves);
618 break; 618 break;
619 case TIPC_CMD_GET_NETID: 619 case TIPC_CMD_GET_NETID:
620 rep_tlv_buf = cfg_reply_unsigned(tipc_net_id); 620 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
621 break; 621 break;
622 default: 622 default:
623 rep_tlv_buf = NULL; 623 rep_tlv_buf = NULL;
@@ -655,11 +655,11 @@ static void cfg_named_msg_event(void *userdata,
655 655
656 /* Generate reply for request (if can't, return request) */ 656 /* Generate reply for request (if can't, return request) */
657 657
658 rep_buf = cfg_do_cmd(orig->node, 658 rep_buf = tipc_cfg_do_cmd(orig->node,
659 ntohs(req_hdr->tcm_type), 659 ntohs(req_hdr->tcm_type),
660 msg + sizeof(*req_hdr), 660 msg + sizeof(*req_hdr),
661 size - sizeof(*req_hdr), 661 size - sizeof(*req_hdr),
662 BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr)); 662 BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
663 if (rep_buf) { 663 if (rep_buf) {
664 skb_push(rep_buf, sizeof(*rep_hdr)); 664 skb_push(rep_buf, sizeof(*rep_hdr));
665 rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data; 665 rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
@@ -675,7 +675,7 @@ static void cfg_named_msg_event(void *userdata,
675 tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len); 675 tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
676} 676}
677 677
678int cfg_init(void) 678int tipc_cfg_init(void)
679{ 679{
680 struct tipc_name_seq seq; 680 struct tipc_name_seq seq;
681 int res; 681 int res;
@@ -696,7 +696,7 @@ int cfg_init(void)
696 696
697 seq.type = TIPC_CFG_SRV; 697 seq.type = TIPC_CFG_SRV;
698 seq.lower = seq.upper = tipc_own_addr; 698 seq.lower = seq.upper = tipc_own_addr;
699 res = nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq); 699 res = tipc_nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
700 if (res) 700 if (res)
701 goto failed; 701 goto failed;
702 702
@@ -709,7 +709,7 @@ failed:
709 return res; 709 return res;
710} 710}
711 711
712void cfg_stop(void) 712void tipc_cfg_stop(void)
713{ 713{
714 if (mng.user_ref) { 714 if (mng.user_ref) {
715 tipc_detach(mng.user_ref); 715 tipc_detach(mng.user_ref);
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 646377d4045..7a728f954d8 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -39,42 +39,41 @@
39 39
40/* ---------------------------------------------------------------------- */ 40/* ---------------------------------------------------------------------- */
41 41
42#include <linux/tipc.h> 42#include "core.h"
43#include <linux/tipc_config.h>
44#include "link.h" 43#include "link.h"
45 44
46struct sk_buff *cfg_reply_alloc(int payload_size); 45struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
47int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 46int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
48 void *tlv_data, int tlv_data_size); 47 void *tlv_data, int tlv_data_size);
49struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value); 48struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value);
50struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string); 49struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string);
51 50
52static inline struct sk_buff *cfg_reply_none(void) 51static inline struct sk_buff *tipc_cfg_reply_none(void)
53{ 52{
54 return cfg_reply_alloc(0); 53 return tipc_cfg_reply_alloc(0);
55} 54}
56 55
57static inline struct sk_buff *cfg_reply_unsigned(u32 value) 56static inline struct sk_buff *tipc_cfg_reply_unsigned(u32 value)
58{ 57{
59 return cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value); 58 return tipc_cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
60} 59}
61 60
62static inline struct sk_buff *cfg_reply_error_string(char *string) 61static inline struct sk_buff *tipc_cfg_reply_error_string(char *string)
63{ 62{
64 return cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string); 63 return tipc_cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
65} 64}
66 65
67static inline struct sk_buff *cfg_reply_ultra_string(char *string) 66static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
68{ 67{
69 return cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string); 68 return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
70} 69}
71 70
72struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, 71struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
73 const void *req_tlv_area, int req_tlv_space, 72 const void *req_tlv_area, int req_tlv_space,
74 int headroom); 73 int headroom);
75 74
76void cfg_link_event(u32 addr, char *name, int up); 75void tipc_cfg_link_event(u32 addr, char *name, int up);
77int cfg_init(void); 76int tipc_cfg_init(void);
78void cfg_stop(void); 77void tipc_cfg_stop(void);
79 78
80#endif 79#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
index e83ac06e31b..3d0a8ee4e1d 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -37,7 +37,6 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/version.h>
41#include <linux/random.h> 40#include <linux/random.h>
42 41
43#include "core.h" 42#include "core.h"
@@ -49,14 +48,14 @@
49#include "subscr.h" 48#include "subscr.h"
50#include "config.h" 49#include "config.h"
51 50
52int eth_media_start(void); 51int tipc_eth_media_start(void);
53void eth_media_stop(void); 52void tipc_eth_media_stop(void);
54int handler_start(void); 53int tipc_handler_start(void);
55void handler_stop(void); 54void tipc_handler_stop(void);
56int socket_init(void); 55int tipc_socket_init(void);
57void socket_stop(void); 56void tipc_socket_stop(void);
58int netlink_start(void); 57int tipc_netlink_start(void);
59void netlink_stop(void); 58void tipc_netlink_stop(void);
60 59
61#define MOD_NAME "tipc_start: " 60#define MOD_NAME "tipc_start: "
62 61
@@ -113,56 +112,56 @@ int tipc_get_mode(void)
113} 112}
114 113
115/** 114/**
116 * stop_net - shut down TIPC networking sub-systems 115 * tipc_core_stop_net - shut down TIPC networking sub-systems
117 */ 116 */
118 117
119void stop_net(void) 118void tipc_core_stop_net(void)
120{ 119{
121 eth_media_stop(); 120 tipc_eth_media_stop();
122 tipc_stop_net(); 121 tipc_net_stop();
123} 122}
124 123
125/** 124/**
126 * start_net - start TIPC networking sub-systems 125 * start_net - start TIPC networking sub-systems
127 */ 126 */
128 127
129int start_net(void) 128int tipc_core_start_net(void)
130{ 129{
131 int res; 130 int res;
132 131
133 if ((res = tipc_start_net()) || 132 if ((res = tipc_net_start()) ||
134 (res = eth_media_start())) { 133 (res = tipc_eth_media_start())) {
135 stop_net(); 134 tipc_core_stop_net();
136 } 135 }
137 return res; 136 return res;
138} 137}
139 138
140/** 139/**
141 * stop_core - switch TIPC from SINGLE NODE to NOT RUNNING mode 140 * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
142 */ 141 */
143 142
144void stop_core(void) 143void tipc_core_stop(void)
145{ 144{
146 if (tipc_mode != TIPC_NODE_MODE) 145 if (tipc_mode != TIPC_NODE_MODE)
147 return; 146 return;
148 147
149 tipc_mode = TIPC_NOT_RUNNING; 148 tipc_mode = TIPC_NOT_RUNNING;
150 149
151 netlink_stop(); 150 tipc_netlink_stop();
152 handler_stop(); 151 tipc_handler_stop();
153 cfg_stop(); 152 tipc_cfg_stop();
154 subscr_stop(); 153 tipc_subscr_stop();
155 reg_stop(); 154 tipc_reg_stop();
156 nametbl_stop(); 155 tipc_nametbl_stop();
157 ref_table_stop(); 156 tipc_ref_table_stop();
158 socket_stop(); 157 tipc_socket_stop();
159} 158}
160 159
161/** 160/**
162 * start_core - switch TIPC from NOT RUNNING to SINGLE NODE mode 161 * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
163 */ 162 */
164 163
165int start_core(void) 164int tipc_core_start(void)
166{ 165{
167 int res; 166 int res;
168 167
@@ -172,16 +171,16 @@ int start_core(void)
172 get_random_bytes(&tipc_random, sizeof(tipc_random)); 171 get_random_bytes(&tipc_random, sizeof(tipc_random));
173 tipc_mode = TIPC_NODE_MODE; 172 tipc_mode = TIPC_NODE_MODE;
174 173
175 if ((res = handler_start()) || 174 if ((res = tipc_handler_start()) ||
176 (res = ref_table_init(tipc_max_ports + tipc_max_subscriptions, 175 (res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions,
177 tipc_random)) || 176 tipc_random)) ||
178 (res = reg_start()) || 177 (res = tipc_reg_start()) ||
179 (res = nametbl_init()) || 178 (res = tipc_nametbl_init()) ||
180 (res = k_signal((Handler)subscr_start, 0)) || 179 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
181 (res = k_signal((Handler)cfg_init, 0)) || 180 (res = tipc_k_signal((Handler)tipc_cfg_init, 0)) ||
182 (res = netlink_start()) || 181 (res = tipc_netlink_start()) ||
183 (res = socket_init())) { 182 (res = tipc_socket_init())) {
184 stop_core(); 183 tipc_core_stop();
185 } 184 }
186 return res; 185 return res;
187} 186}
@@ -191,7 +190,7 @@ static int __init tipc_init(void)
191{ 190{
192 int res; 191 int res;
193 192
194 log_reinit(CONFIG_TIPC_LOG); 193 tipc_log_reinit(CONFIG_TIPC_LOG);
195 info("Activated (compiled " __DATE__ " " __TIME__ ")\n"); 194 info("Activated (compiled " __DATE__ " " __TIME__ ")\n");
196 195
197 tipc_own_addr = 0; 196 tipc_own_addr = 0;
@@ -205,7 +204,7 @@ static int __init tipc_init(void)
205 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047); 204 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
206 tipc_net_id = 4711; 205 tipc_net_id = 4711;
207 206
208 if ((res = start_core())) 207 if ((res = tipc_core_start()))
209 err("Unable to start in single node mode\n"); 208 err("Unable to start in single node mode\n");
210 else 209 else
211 info("Started in single node mode\n"); 210 info("Started in single node mode\n");
@@ -214,10 +213,10 @@ static int __init tipc_init(void)
214 213
215static void __exit tipc_exit(void) 214static void __exit tipc_exit(void)
216{ 215{
217 stop_net(); 216 tipc_core_stop_net();
218 stop_core(); 217 tipc_core_stop();
219 info("Deactivated\n"); 218 info("Deactivated\n");
220 log_stop(); 219 tipc_log_stop();
221} 220}
222 221
223module_init(tipc_init); 222module_init(tipc_init);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index b69b60b2cc8..1f2e8b27a13 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -37,6 +37,11 @@
37#ifndef _TIPC_CORE_H 37#ifndef _TIPC_CORE_H
38#define _TIPC_CORE_H 38#define _TIPC_CORE_H
39 39
40#include <linux/tipc.h>
41#include <linux/tipc_config.h>
42#include <net/tipc/tipc_msg.h>
43#include <net/tipc/tipc_port.h>
44#include <net/tipc/tipc_bearer.h>
40#include <net/tipc/tipc.h> 45#include <net/tipc/tipc.h>
41#include <linux/types.h> 46#include <linux/types.h>
42#include <linux/kernel.h> 47#include <linux/kernel.h>
@@ -60,9 +65,9 @@
60#define assert(i) BUG_ON(!(i)) 65#define assert(i) BUG_ON(!(i))
61 66
62struct tipc_msg; 67struct tipc_msg;
63extern struct print_buf *CONS, *LOG; 68extern struct print_buf *TIPC_CONS, *TIPC_LOG;
64extern struct print_buf *TEE(struct print_buf *, struct print_buf *); 69extern struct print_buf *TIPC_TEE(struct print_buf *, struct print_buf *);
65void msg_print(struct print_buf*,struct tipc_msg *,const char*); 70void tipc_msg_print(struct print_buf*,struct tipc_msg *,const char*);
66void tipc_printf(struct print_buf *, const char *fmt, ...); 71void tipc_printf(struct print_buf *, const char *fmt, ...);
67void tipc_dump(struct print_buf*,const char *fmt, ...); 72void tipc_dump(struct print_buf*,const char *fmt, ...);
68 73
@@ -79,7 +84,7 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
79#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg) 84#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg)
80 85
81#define dbg(fmt, arg...) do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0) 86#define dbg(fmt, arg...) do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0)
82#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) msg_print(DBG_OUTPUT, msg, txt);} while(0) 87#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) tipc_msg_print(DBG_OUTPUT, msg, txt);} while(0)
83#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0) 88#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
84 89
85 90
@@ -89,15 +94,15 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
89 * here, or on a per .c file basis, by redefining these symbols. The following 94 * here, or on a per .c file basis, by redefining these symbols. The following
90 * print buffer options are available: 95 * print buffer options are available:
91 * 96 *
92 * NULL : Output to null print buffer (i.e. print nowhere) 97 * NULL : Output to null print buffer (i.e. print nowhere)
93 * CONS : Output to system console 98 * TIPC_CONS : Output to system console
94 * LOG : Output to TIPC log buffer 99 * TIPC_LOG : Output to TIPC log buffer
95 * &buf : Output to user-defined buffer (struct print_buf *) 100 * &buf : Output to user-defined buffer (struct print_buf *)
96 * TEE(&buf_a,&buf_b) : Output to two print buffers (eg. TEE(CONS,LOG) ) 101 * TIPC_TEE(&buf_a,&buf_b) : Output to two print buffers (eg. TIPC_TEE(TIPC_CONS,TIPC_LOG) )
97 */ 102 */
98 103
99#ifndef TIPC_OUTPUT 104#ifndef TIPC_OUTPUT
100#define TIPC_OUTPUT TEE(CONS,LOG) 105#define TIPC_OUTPUT TIPC_TEE(TIPC_CONS,TIPC_LOG)
101#endif 106#endif
102 107
103#ifndef DBG_OUTPUT 108#ifndef DBG_OUTPUT
@@ -162,10 +167,10 @@ extern atomic_t tipc_user_count;
162 * Routines available to privileged subsystems 167 * Routines available to privileged subsystems
163 */ 168 */
164 169
165extern int start_core(void); 170extern int tipc_core_start(void);
166extern void stop_core(void); 171extern void tipc_core_stop(void);
167extern int start_net(void); 172extern int tipc_core_start_net(void);
168extern void stop_net(void); 173extern void tipc_core_stop_net(void);
169 174
170static inline int delimit(int val, int min, int max) 175static inline int delimit(int val, int min, int max)
171{ 176{
@@ -183,7 +188,7 @@ static inline int delimit(int val, int min, int max)
183 188
184typedef void (*Handler) (unsigned long); 189typedef void (*Handler) (unsigned long);
185 190
186u32 k_signal(Handler routine, unsigned long argument); 191u32 tipc_k_signal(Handler routine, unsigned long argument);
187 192
188/** 193/**
189 * k_init_timer - initialize a timer 194 * k_init_timer - initialize a timer
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 7ed60a1cfbb..4f4beefa783 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -44,10 +44,10 @@ static char print_string[MAX_STRING];
44static spinlock_t print_lock = SPIN_LOCK_UNLOCKED; 44static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
45 45
46static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; 46static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
47struct print_buf *CONS = &cons_buf; 47struct print_buf *TIPC_CONS = &cons_buf;
48 48
49static struct print_buf log_buf = { NULL, 0, NULL, NULL }; 49static struct print_buf log_buf = { NULL, 0, NULL, NULL };
50struct print_buf *LOG = &log_buf; 50struct print_buf *TIPC_LOG = &log_buf;
51 51
52 52
53#define FORMAT(PTR,LEN,FMT) \ 53#define FORMAT(PTR,LEN,FMT) \
@@ -66,15 +66,15 @@ struct print_buf *LOG = &log_buf;
66 * simultaneous use of the print buffer(s) being manipulated. 66 * simultaneous use of the print buffer(s) being manipulated.
67 * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of 67 * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of
68 * 'print_string' and to protect its print buffer(s). 68 * 'print_string' and to protect its print buffer(s).
69 * 3) TEE() uses 'print_lock' to protect its print buffer(s). 69 * 3) TIPC_TEE() uses 'print_lock' to protect its print buffer(s).
70 * 4) Routines of the form log_XXX() uses 'print_lock' to protect LOG. 70 * 4) Routines of the form log_XXX() uses 'print_lock' to protect TIPC_LOG.
71 */ 71 */
72 72
73/** 73/**
74 * printbuf_init - initialize print buffer to empty 74 * tipc_printbuf_init - initialize print buffer to empty
75 */ 75 */
76 76
77void printbuf_init(struct print_buf *pb, char *raw, u32 sz) 77void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 sz)
78{ 78{
79 if (!pb || !raw || (sz < (MAX_STRING + 1))) 79 if (!pb || !raw || (sz < (MAX_STRING + 1)))
80 return; 80 return;
@@ -87,26 +87,26 @@ void printbuf_init(struct print_buf *pb, char *raw, u32 sz)
87} 87}
88 88
89/** 89/**
90 * printbuf_reset - reinitialize print buffer to empty state 90 * tipc_printbuf_reset - reinitialize print buffer to empty state
91 */ 91 */
92 92
93void printbuf_reset(struct print_buf *pb) 93void tipc_printbuf_reset(struct print_buf *pb)
94{ 94{
95 if (pb && pb->buf) 95 if (pb && pb->buf)
96 printbuf_init(pb, pb->buf, pb->size); 96 tipc_printbuf_init(pb, pb->buf, pb->size);
97} 97}
98 98
99/** 99/**
100 * printbuf_empty - test if print buffer is in empty state 100 * tipc_printbuf_empty - test if print buffer is in empty state
101 */ 101 */
102 102
103int printbuf_empty(struct print_buf *pb) 103int tipc_printbuf_empty(struct print_buf *pb)
104{ 104{
105 return (!pb || !pb->buf || (pb->crs == pb->buf)); 105 return (!pb || !pb->buf || (pb->crs == pb->buf));
106} 106}
107 107
108/** 108/**
109 * printbuf_validate - check for print buffer overflow 109 * tipc_printbuf_validate - check for print buffer overflow
110 * 110 *
111 * Verifies that a print buffer has captured all data written to it. 111 * Verifies that a print buffer has captured all data written to it.
112 * If data has been lost, linearize buffer and prepend an error message 112 * If data has been lost, linearize buffer and prepend an error message
@@ -114,7 +114,7 @@ int printbuf_empty(struct print_buf *pb)
114 * Returns length of print buffer data string (including trailing NULL) 114 * Returns length of print buffer data string (including trailing NULL)
115 */ 115 */
116 116
117int printbuf_validate(struct print_buf *pb) 117int tipc_printbuf_validate(struct print_buf *pb)
118{ 118{
119 char *err = " *** PRINT BUFFER WRAPPED AROUND ***\n"; 119 char *err = " *** PRINT BUFFER WRAPPED AROUND ***\n";
120 char *cp_buf; 120 char *cp_buf;
@@ -126,13 +126,13 @@ int printbuf_validate(struct print_buf *pb)
126 if (pb->buf[pb->size - 1] == '\0') { 126 if (pb->buf[pb->size - 1] == '\0') {
127 cp_buf = kmalloc(pb->size, GFP_ATOMIC); 127 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
128 if (cp_buf != NULL){ 128 if (cp_buf != NULL){
129 printbuf_init(&cb, cp_buf, pb->size); 129 tipc_printbuf_init(&cb, cp_buf, pb->size);
130 printbuf_move(&cb, pb); 130 tipc_printbuf_move(&cb, pb);
131 printbuf_move(pb, &cb); 131 tipc_printbuf_move(pb, &cb);
132 kfree(cp_buf); 132 kfree(cp_buf);
133 memcpy(pb->buf, err, strlen(err)); 133 memcpy(pb->buf, err, strlen(err));
134 } else { 134 } else {
135 printbuf_reset(pb); 135 tipc_printbuf_reset(pb);
136 tipc_printf(pb, err); 136 tipc_printf(pb, err);
137 } 137 }
138 } 138 }
@@ -140,13 +140,13 @@ int printbuf_validate(struct print_buf *pb)
140} 140}
141 141
142/** 142/**
143 * printbuf_move - move print buffer contents to another print buffer 143 * tipc_printbuf_move - move print buffer contents to another print buffer
144 * 144 *
145 * Current contents of destination print buffer (if any) are discarded. 145 * Current contents of destination print buffer (if any) are discarded.
146 * Source print buffer becomes empty if a successful move occurs. 146 * Source print buffer becomes empty if a successful move occurs.
147 */ 147 */
148 148
149void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from) 149void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
150{ 150{
151 int len; 151 int len;
152 152
@@ -156,12 +156,12 @@ void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
156 return; 156 return;
157 157
158 if (!pb_from || !pb_from->buf) { 158 if (!pb_from || !pb_from->buf) {
159 printbuf_reset(pb_to); 159 tipc_printbuf_reset(pb_to);
160 return; 160 return;
161 } 161 }
162 162
163 if (pb_to->size < pb_from->size) { 163 if (pb_to->size < pb_from->size) {
164 printbuf_reset(pb_to); 164 tipc_printbuf_reset(pb_to);
165 tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***"); 165 tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***");
166 return; 166 return;
167 } 167 }
@@ -179,7 +179,7 @@ void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
179 strcpy(pb_to->crs, pb_from->buf); 179 strcpy(pb_to->crs, pb_from->buf);
180 pb_to->crs += len; 180 pb_to->crs += len;
181 181
182 printbuf_reset(pb_from); 182 tipc_printbuf_reset(pb_from);
183} 183}
184 184
185/** 185/**
@@ -199,7 +199,7 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
199 strcpy(print_string, "*** STRING TOO LONG ***"); 199 strcpy(print_string, "*** STRING TOO LONG ***");
200 200
201 while (pb) { 201 while (pb) {
202 if (pb == CONS) 202 if (pb == TIPC_CONS)
203 printk(print_string); 203 printk(print_string);
204 else if (pb->buf) { 204 else if (pb->buf) {
205 chars_left = pb->buf + pb->size - pb->crs - 1; 205 chars_left = pb->buf + pb->size - pb->crs - 1;
@@ -223,10 +223,10 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
223} 223}
224 224
225/** 225/**
226 * TEE - perform next output operation on both print buffers 226 * TIPC_TEE - perform next output operation on both print buffers
227 */ 227 */
228 228
229struct print_buf *TEE(struct print_buf *b0, struct print_buf *b1) 229struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1)
230{ 230{
231 struct print_buf *pb = b0; 231 struct print_buf *pb = b0;
232 232
@@ -294,96 +294,96 @@ void tipc_dump(struct print_buf *pb, const char *fmt, ...)
294 int len; 294 int len;
295 295
296 spin_lock_bh(&print_lock); 296 spin_lock_bh(&print_lock);
297 FORMAT(CONS->buf, len, fmt); 297 FORMAT(TIPC_CONS->buf, len, fmt);
298 printk(CONS->buf); 298 printk(TIPC_CONS->buf);
299 299
300 for (; pb; pb = pb->next) { 300 for (; pb; pb = pb->next) {
301 if (pb == CONS) 301 if (pb == TIPC_CONS)
302 continue; 302 continue;
303 printk("\n---- Start of dump,%s log ----\n\n", 303 printk("\n---- Start of dump,%s log ----\n\n",
304 (pb == LOG) ? "global" : "local"); 304 (pb == TIPC_LOG) ? "global" : "local");
305 printbuf_dump(pb); 305 printbuf_dump(pb);
306 printbuf_reset(pb); 306 tipc_printbuf_reset(pb);
307 printk("\n-------- End of dump --------\n"); 307 printk("\n-------- End of dump --------\n");
308 } 308 }
309 spin_unlock_bh(&print_lock); 309 spin_unlock_bh(&print_lock);
310} 310}
311 311
312/** 312/**
313 * log_stop - free up TIPC log print buffer 313 * tipc_log_stop - free up TIPC log print buffer
314 */ 314 */
315 315
316void log_stop(void) 316void tipc_log_stop(void)
317{ 317{
318 spin_lock_bh(&print_lock); 318 spin_lock_bh(&print_lock);
319 if (LOG->buf) { 319 if (TIPC_LOG->buf) {
320 kfree(LOG->buf); 320 kfree(TIPC_LOG->buf);
321 LOG->buf = NULL; 321 TIPC_LOG->buf = NULL;
322 } 322 }
323 spin_unlock_bh(&print_lock); 323 spin_unlock_bh(&print_lock);
324} 324}
325 325
326/** 326/**
327 * log_reinit - set TIPC log print buffer to specified size 327 * tipc_log_reinit - set TIPC log print buffer to specified size
328 */ 328 */
329 329
330void log_reinit(int log_size) 330void tipc_log_reinit(int log_size)
331{ 331{
332 log_stop(); 332 tipc_log_stop();
333 333
334 if (log_size) { 334 if (log_size) {
335 if (log_size <= MAX_STRING) 335 if (log_size <= MAX_STRING)
336 log_size = MAX_STRING + 1; 336 log_size = MAX_STRING + 1;
337 spin_lock_bh(&print_lock); 337 spin_lock_bh(&print_lock);
338 printbuf_init(LOG, kmalloc(log_size, GFP_ATOMIC), log_size); 338 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC), log_size);
339 spin_unlock_bh(&print_lock); 339 spin_unlock_bh(&print_lock);
340 } 340 }
341} 341}
342 342
343/** 343/**
344 * log_resize - reconfigure size of TIPC log buffer 344 * tipc_log_resize - reconfigure size of TIPC log buffer
345 */ 345 */
346 346
347struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space) 347struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space)
348{ 348{
349 u32 value; 349 u32 value;
350 350
351 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 351 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
352 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 352 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
353 353
354 value = *(u32 *)TLV_DATA(req_tlv_area); 354 value = *(u32 *)TLV_DATA(req_tlv_area);
355 value = ntohl(value); 355 value = ntohl(value);
356 if (value != delimit(value, 0, 32768)) 356 if (value != delimit(value, 0, 32768))
357 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 357 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
358 " (log size must be 0-32768)"); 358 " (log size must be 0-32768)");
359 log_reinit(value); 359 tipc_log_reinit(value);
360 return cfg_reply_none(); 360 return tipc_cfg_reply_none();
361} 361}
362 362
363/** 363/**
364 * log_dump - capture TIPC log buffer contents in configuration message 364 * tipc_log_dump - capture TIPC log buffer contents in configuration message
365 */ 365 */
366 366
367struct sk_buff *log_dump(void) 367struct sk_buff *tipc_log_dump(void)
368{ 368{
369 struct sk_buff *reply; 369 struct sk_buff *reply;
370 370
371 spin_lock_bh(&print_lock); 371 spin_lock_bh(&print_lock);
372 if (!LOG->buf) 372 if (!TIPC_LOG->buf)
373 reply = cfg_reply_ultra_string("log not activated\n"); 373 reply = tipc_cfg_reply_ultra_string("log not activated\n");
374 else if (printbuf_empty(LOG)) 374 else if (tipc_printbuf_empty(TIPC_LOG))
375 reply = cfg_reply_ultra_string("log is empty\n"); 375 reply = tipc_cfg_reply_ultra_string("log is empty\n");
376 else { 376 else {
377 struct tlv_desc *rep_tlv; 377 struct tlv_desc *rep_tlv;
378 struct print_buf pb; 378 struct print_buf pb;
379 int str_len; 379 int str_len;
380 380
381 str_len = min(LOG->size, 32768u); 381 str_len = min(TIPC_LOG->size, 32768u);
382 reply = cfg_reply_alloc(TLV_SPACE(str_len)); 382 reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len));
383 if (reply) { 383 if (reply) {
384 rep_tlv = (struct tlv_desc *)reply->data; 384 rep_tlv = (struct tlv_desc *)reply->data;
385 printbuf_init(&pb, TLV_DATA(rep_tlv), str_len); 385 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
386 printbuf_move(&pb, LOG); 386 tipc_printbuf_move(&pb, TIPC_LOG);
387 str_len = strlen(TLV_DATA(rep_tlv)) + 1; 387 str_len = strlen(TLV_DATA(rep_tlv)) + 1;
388 skb_put(reply, TLV_SPACE(str_len)); 388 skb_put(reply, TLV_SPACE(str_len));
389 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 389 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
index c6b2a64c224..227f050d2a5 100644
--- a/net/tipc/dbg.h
+++ b/net/tipc/dbg.h
@@ -44,16 +44,16 @@ struct print_buf {
44 struct print_buf *next; 44 struct print_buf *next;
45}; 45};
46 46
47void printbuf_init(struct print_buf *pb, char *buf, u32 sz); 47void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 sz);
48void printbuf_reset(struct print_buf *pb); 48void tipc_printbuf_reset(struct print_buf *pb);
49int printbuf_empty(struct print_buf *pb); 49int tipc_printbuf_empty(struct print_buf *pb);
50int printbuf_validate(struct print_buf *pb); 50int tipc_printbuf_validate(struct print_buf *pb);
51void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from); 51void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
52 52
53void log_reinit(int log_size); 53void tipc_log_reinit(int log_size);
54void log_stop(void); 54void tipc_log_stop(void);
55 55
56struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space); 56struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space);
57struct sk_buff *log_dump(void); 57struct sk_buff *tipc_log_dump(void);
58 58
59#endif 59#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index b106ef1621c..53ba4630c10 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -93,7 +93,7 @@ int disc_create_link(const struct tipc_link_create *argv)
93 * disc_lost_link(): A link has lost contact 93 * disc_lost_link(): A link has lost contact
94 */ 94 */
95 95
96void disc_link_event(u32 addr, char *name, int up) 96void tipc_disc_link_event(u32 addr, char *name, int up)
97{ 97{
98 if (in_own_cluster(addr)) 98 if (in_own_cluster(addr))
99 return; 99 return;
@@ -103,17 +103,17 @@ void disc_link_event(u32 addr, char *name, int up)
103} 103}
104 104
105/** 105/**
106 * disc_init_msg - initialize a link setup message 106 * tipc_disc_init_msg - initialize a link setup message
107 * @type: message type (request or response) 107 * @type: message type (request or response)
108 * @req_links: number of links associated with message 108 * @req_links: number of links associated with message
109 * @dest_domain: network domain of node(s) which should respond to message 109 * @dest_domain: network domain of node(s) which should respond to message
110 * @b_ptr: ptr to bearer issuing message 110 * @b_ptr: ptr to bearer issuing message
111 */ 111 */
112 112
113struct sk_buff *disc_init_msg(u32 type, 113struct sk_buff *tipc_disc_init_msg(u32 type,
114 u32 req_links, 114 u32 req_links,
115 u32 dest_domain, 115 u32 dest_domain,
116 struct bearer *b_ptr) 116 struct bearer *b_ptr)
117{ 117{
118 struct sk_buff *buf = buf_acquire(DSC_H_SIZE); 118 struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
119 struct tipc_msg *msg; 119 struct tipc_msg *msg;
@@ -132,11 +132,11 @@ struct sk_buff *disc_init_msg(u32 type,
132} 132}
133 133
134/** 134/**
135 * disc_recv_msg - handle incoming link setup message (request or response) 135 * tipc_disc_recv_msg - handle incoming link setup message (request or response)
136 * @buf: buffer containing message 136 * @buf: buffer containing message
137 */ 137 */
138 138
139void disc_recv_msg(struct sk_buff *buf) 139void tipc_disc_recv_msg(struct sk_buff *buf)
140{ 140{
141 struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle; 141 struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
142 struct link *link; 142 struct link *link;
@@ -153,9 +153,9 @@ void disc_recv_msg(struct sk_buff *buf)
153 153
154 if (net_id != tipc_net_id) 154 if (net_id != tipc_net_id)
155 return; 155 return;
156 if (!addr_domain_valid(dest)) 156 if (!tipc_addr_domain_valid(dest))
157 return; 157 return;
158 if (!addr_node_valid(orig)) 158 if (!tipc_addr_node_valid(orig))
159 return; 159 return;
160 if (orig == tipc_own_addr) 160 if (orig == tipc_own_addr)
161 return; 161 return;
@@ -169,11 +169,11 @@ void disc_recv_msg(struct sk_buff *buf)
169 /* Always accept link here */ 169 /* Always accept link here */
170 struct sk_buff *rbuf; 170 struct sk_buff *rbuf;
171 struct tipc_media_addr *addr; 171 struct tipc_media_addr *addr;
172 struct node *n_ptr = node_find(orig); 172 struct node *n_ptr = tipc_node_find(orig);
173 int link_up; 173 int link_up;
174 dbg(" in own cluster\n"); 174 dbg(" in own cluster\n");
175 if (n_ptr == NULL) { 175 if (n_ptr == NULL) {
176 n_ptr = node_create(orig); 176 n_ptr = tipc_node_create(orig);
177 } 177 }
178 if (n_ptr == NULL) { 178 if (n_ptr == NULL) {
179 warn("Memory squeeze; Failed to create node\n"); 179 warn("Memory squeeze; Failed to create node\n");
@@ -183,7 +183,7 @@ void disc_recv_msg(struct sk_buff *buf)
183 link = n_ptr->links[b_ptr->identity]; 183 link = n_ptr->links[b_ptr->identity];
184 if (!link) { 184 if (!link) {
185 dbg("creating link\n"); 185 dbg("creating link\n");
186 link = link_create(b_ptr, orig, &media_addr); 186 link = tipc_link_create(b_ptr, orig, &media_addr);
187 if (!link) { 187 if (!link) {
188 spin_unlock_bh(&n_ptr->lock); 188 spin_unlock_bh(&n_ptr->lock);
189 return; 189 return;
@@ -196,13 +196,13 @@ void disc_recv_msg(struct sk_buff *buf)
196 warn("New bearer address for %s\n", 196 warn("New bearer address for %s\n",
197 addr_string_fill(addr_string, orig)); 197 addr_string_fill(addr_string, orig));
198 memcpy(addr, &media_addr, sizeof(*addr)); 198 memcpy(addr, &media_addr, sizeof(*addr));
199 link_reset(link); 199 tipc_link_reset(link);
200 } 200 }
201 link_up = link_is_up(link); 201 link_up = tipc_link_is_up(link);
202 spin_unlock_bh(&n_ptr->lock); 202 spin_unlock_bh(&n_ptr->lock);
203 if ((type == DSC_RESP_MSG) || link_up) 203 if ((type == DSC_RESP_MSG) || link_up)
204 return; 204 return;
205 rbuf = disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr); 205 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
206 if (rbuf != NULL) { 206 if (rbuf != NULL) {
207 msg_dbg(buf_msg(rbuf),"SEND:"); 207 msg_dbg(buf_msg(rbuf),"SEND:");
208 b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr); 208 b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
@@ -212,11 +212,11 @@ void disc_recv_msg(struct sk_buff *buf)
212} 212}
213 213
214/** 214/**
215 * disc_stop_link_req - stop sending periodic link setup requests 215 * tipc_disc_stop_link_req - stop sending periodic link setup requests
216 * @req: ptr to link request structure 216 * @req: ptr to link request structure
217 */ 217 */
218 218
219void disc_stop_link_req(struct link_req *req) 219void tipc_disc_stop_link_req(struct link_req *req)
220{ 220{
221 if (!req) 221 if (!req)
222 return; 222 return;
@@ -228,11 +228,11 @@ void disc_stop_link_req(struct link_req *req)
228} 228}
229 229
230/** 230/**
231 * disc_update_link_req - update frequency of periodic link setup requests 231 * tipc_disc_update_link_req - update frequency of periodic link setup requests
232 * @req: ptr to link request structure 232 * @req: ptr to link request structure
233 */ 233 */
234 234
235void disc_update_link_req(struct link_req *req) 235void tipc_disc_update_link_req(struct link_req *req)
236{ 236{
237 if (!req) 237 if (!req)
238 return; 238 return;
@@ -282,7 +282,7 @@ static void disc_timeout(struct link_req *req)
282} 282}
283 283
284/** 284/**
285 * disc_init_link_req - start sending periodic link setup requests 285 * tipc_disc_init_link_req - start sending periodic link setup requests
286 * @b_ptr: ptr to bearer issuing requests 286 * @b_ptr: ptr to bearer issuing requests
287 * @dest: destination address for request messages 287 * @dest: destination address for request messages
288 * @dest_domain: network domain of node(s) which should respond to message 288 * @dest_domain: network domain of node(s) which should respond to message
@@ -291,10 +291,10 @@ static void disc_timeout(struct link_req *req)
291 * Returns pointer to link request structure, or NULL if unable to create. 291 * Returns pointer to link request structure, or NULL if unable to create.
292 */ 292 */
293 293
294struct link_req *disc_init_link_req(struct bearer *b_ptr, 294struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
295 const struct tipc_media_addr *dest, 295 const struct tipc_media_addr *dest,
296 u32 dest_domain, 296 u32 dest_domain,
297 u32 req_links) 297 u32 req_links)
298{ 298{
299 struct link_req *req; 299 struct link_req *req;
300 300
@@ -302,7 +302,7 @@ struct link_req *disc_init_link_req(struct bearer *b_ptr,
302 if (!req) 302 if (!req)
303 return NULL; 303 return NULL;
304 304
305 req->buf = disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr); 305 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
306 if (!req->buf) { 306 if (!req->buf) {
307 kfree(req); 307 kfree(req);
308 return NULL; 308 return NULL;
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 2a6114d9162..0454fd1ae7f 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -37,20 +37,20 @@
37#ifndef _TIPC_DISCOVER_H 37#ifndef _TIPC_DISCOVER_H
38#define _TIPC_DISCOVER_H 38#define _TIPC_DISCOVER_H
39 39
40#include <linux/tipc.h> 40#include "core.h"
41 41
42struct link_req; 42struct link_req;
43 43
44struct link_req *disc_init_link_req(struct bearer *b_ptr, 44struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
45 const struct tipc_media_addr *dest, 45 const struct tipc_media_addr *dest,
46 u32 dest_domain, 46 u32 dest_domain,
47 u32 req_links); 47 u32 req_links);
48void disc_update_link_req(struct link_req *req); 48void tipc_disc_update_link_req(struct link_req *req);
49void disc_stop_link_req(struct link_req *req); 49void tipc_disc_stop_link_req(struct link_req *req);
50 50
51void disc_recv_msg(struct sk_buff *buf); 51void tipc_disc_recv_msg(struct sk_buff *buf);
52 52
53void disc_link_event(u32 addr, char *name, int up); 53void tipc_disc_link_event(u32 addr, char *name, int up);
54#if 0 54#if 0
55int disc_create_link(const struct tipc_link_create *argv); 55int disc_create_link(const struct tipc_link_create *argv);
56#endif 56#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 34d0462db3a..1f8d83b9c8b 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -38,13 +38,11 @@
38#include <net/tipc/tipc_bearer.h> 38#include <net/tipc/tipc_bearer.h>
39#include <net/tipc/tipc_msg.h> 39#include <net/tipc/tipc_msg.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/version.h>
42 41
43#define MAX_ETH_BEARERS 2 42#define MAX_ETH_BEARERS 2
44#define TIPC_PROTOCOL 0x88ca 43#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI
45#define ETH_LINK_PRIORITY 10
46#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL 44#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
47 45#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN
48 46
49/** 47/**
50 * struct eth_bearer - Ethernet bearer data structure 48 * struct eth_bearer - Ethernet bearer data structure
@@ -78,7 +76,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
78 clone->nh.raw = clone->data; 76 clone->nh.raw = clone->data;
79 dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev; 77 dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
80 clone->dev = dev; 78 clone->dev = dev;
81 dev->hard_header(clone, dev, TIPC_PROTOCOL, 79 dev->hard_header(clone, dev, ETH_P_TIPC,
82 &dest->dev_addr.eth_addr, 80 &dest->dev_addr.eth_addr,
83 dev->dev_addr, clone->len); 81 dev->dev_addr, clone->len);
84 dev_queue_xmit(clone); 82 dev_queue_xmit(clone);
@@ -141,7 +139,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
141 return -EDQUOT; 139 return -EDQUOT;
142 if (!eb_ptr->dev) { 140 if (!eb_ptr->dev) {
143 eb_ptr->dev = dev; 141 eb_ptr->dev = dev;
144 eb_ptr->tipc_packet_type.type = __constant_htons(TIPC_PROTOCOL); 142 eb_ptr->tipc_packet_type.type = __constant_htons(ETH_P_TIPC);
145 eb_ptr->tipc_packet_type.dev = dev; 143 eb_ptr->tipc_packet_type.dev = dev;
146 eb_ptr->tipc_packet_type.func = recv_msg; 144 eb_ptr->tipc_packet_type.func = recv_msg;
147 eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr; 145 eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
@@ -240,13 +238,13 @@ static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size
240} 238}
241 239
242/** 240/**
243 * eth_media_start - activate Ethernet bearer support 241 * tipc_eth_media_start - activate Ethernet bearer support
244 * 242 *
245 * Register Ethernet media type with TIPC bearer code. Also register 243 * Register Ethernet media type with TIPC bearer code. Also register
246 * with OS for notifications about device state changes. 244 * with OS for notifications about device state changes.
247 */ 245 */
248 246
249int eth_media_start(void) 247int tipc_eth_media_start(void)
250{ 248{
251 struct tipc_media_addr bcast_addr; 249 struct tipc_media_addr bcast_addr;
252 int res; 250 int res;
@@ -260,7 +258,7 @@ int eth_media_start(void)
260 res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth", 258 res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
261 enable_bearer, disable_bearer, send_msg, 259 enable_bearer, disable_bearer, send_msg,
262 eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY, 260 eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
263 ETH_LINK_TOLERANCE, TIPC_DEF_LINK_WIN); 261 ETH_LINK_TOLERANCE, ETH_LINK_WINDOW);
264 if (res) 262 if (res)
265 return res; 263 return res;
266 264
@@ -273,10 +271,10 @@ int eth_media_start(void)
273} 271}
274 272
275/** 273/**
276 * eth_media_stop - deactivate Ethernet bearer support 274 * tipc_eth_media_stop - deactivate Ethernet bearer support
277 */ 275 */
278 276
279void eth_media_stop(void) 277void tipc_eth_media_stop(void)
280{ 278{
281 int i; 279 int i;
282 280
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index f320010f8a6..966f70a1b60 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -52,7 +52,7 @@ static void process_signal_queue(unsigned long dummy);
52static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0); 52static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
53 53
54 54
55unsigned int k_signal(Handler routine, unsigned long argument) 55unsigned int tipc_k_signal(Handler routine, unsigned long argument)
56{ 56{
57 struct queue_item *item; 57 struct queue_item *item;
58 58
@@ -93,7 +93,7 @@ static void process_signal_queue(unsigned long dummy)
93 spin_unlock_bh(&qitem_lock); 93 spin_unlock_bh(&qitem_lock);
94} 94}
95 95
96int handler_start(void) 96int tipc_handler_start(void)
97{ 97{
98 tipc_queue_item_cache = 98 tipc_queue_item_cache =
99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item), 99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
@@ -107,7 +107,7 @@ int handler_start(void)
107 return 0; 107 return 0;
108} 108}
109 109
110void handler_stop(void) 110void tipc_handler_stop(void)
111{ 111{
112 struct list_head *l, *n; 112 struct list_head *l, *n;
113 struct queue_item *item; 113 struct queue_item *item;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 7265f4be476..511872afa45 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -148,12 +148,12 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
148#define LINK_LOG_BUF_SIZE 0 148#define LINK_LOG_BUF_SIZE 0
149 149
150#define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0) 150#define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
151#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0) 151#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) tipc_msg_print(&l_ptr->print_buf, msg, txt); } while(0)
152#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0) 152#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
153#define dbg_link_dump() do { \ 153#define dbg_link_dump() do { \
154 if (LINK_LOG_BUF_SIZE) { \ 154 if (LINK_LOG_BUF_SIZE) { \
155 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \ 155 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
156 printbuf_move(LOG, &l_ptr->print_buf); \ 156 tipc_printbuf_move(LOG, &l_ptr->print_buf); \
157 } \ 157 } \
158} while (0) 158} while (0)
159 159
@@ -252,14 +252,14 @@ static inline u32 link_last_sent(struct link *l_ptr)
252 * Simple non-inlined link routines (i.e. referenced outside this file) 252 * Simple non-inlined link routines (i.e. referenced outside this file)
253 */ 253 */
254 254
255int link_is_up(struct link *l_ptr) 255int tipc_link_is_up(struct link *l_ptr)
256{ 256{
257 if (!l_ptr) 257 if (!l_ptr)
258 return 0; 258 return 0;
259 return (link_working_working(l_ptr) || link_working_unknown(l_ptr)); 259 return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
260} 260}
261 261
262int link_is_active(struct link *l_ptr) 262int tipc_link_is_active(struct link *l_ptr)
263{ 263{
264 return ((l_ptr->owner->active_links[0] == l_ptr) || 264 return ((l_ptr->owner->active_links[0] == l_ptr) ||
265 (l_ptr->owner->active_links[1] == l_ptr)); 265 (l_ptr->owner->active_links[1] == l_ptr));
@@ -338,15 +338,15 @@ static int link_name_validate(const char *name, struct link_name *name_parts)
338 * link_timeout - handle expiration of link timer 338 * link_timeout - handle expiration of link timer
339 * @l_ptr: pointer to link 339 * @l_ptr: pointer to link
340 * 340 *
341 * This routine must not grab "net_lock" to avoid a potential deadlock conflict 341 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
342 * with link_delete(). (There is no risk that the node will be deleted by 342 * with tipc_link_delete(). (There is no risk that the node will be deleted by
343 * another thread because link_delete() always cancels the link timer before 343 * another thread because tipc_link_delete() always cancels the link timer before
344 * node_delete() is called.) 344 * tipc_node_delete() is called.)
345 */ 345 */
346 346
347static void link_timeout(struct link *l_ptr) 347static void link_timeout(struct link *l_ptr)
348{ 348{
349 node_lock(l_ptr->owner); 349 tipc_node_lock(l_ptr->owner);
350 350
351 /* update counters used in statistical profiling of send traffic */ 351 /* update counters used in statistical profiling of send traffic */
352 352
@@ -391,9 +391,9 @@ static void link_timeout(struct link *l_ptr)
391 link_state_event(l_ptr, TIMEOUT_EVT); 391 link_state_event(l_ptr, TIMEOUT_EVT);
392 392
393 if (l_ptr->next_out) 393 if (l_ptr->next_out)
394 link_push_queue(l_ptr); 394 tipc_link_push_queue(l_ptr);
395 395
396 node_unlock(l_ptr->owner); 396 tipc_node_unlock(l_ptr->owner);
397} 397}
398 398
399static inline void link_set_timer(struct link *l_ptr, u32 time) 399static inline void link_set_timer(struct link *l_ptr, u32 time)
@@ -402,7 +402,7 @@ static inline void link_set_timer(struct link *l_ptr, u32 time)
402} 402}
403 403
404/** 404/**
405 * link_create - create a new link 405 * tipc_link_create - create a new link
406 * @b_ptr: pointer to associated bearer 406 * @b_ptr: pointer to associated bearer
407 * @peer: network address of node at other end of link 407 * @peer: network address of node at other end of link
408 * @media_addr: media address to use when sending messages over link 408 * @media_addr: media address to use when sending messages over link
@@ -410,8 +410,8 @@ static inline void link_set_timer(struct link *l_ptr, u32 time)
410 * Returns pointer to link. 410 * Returns pointer to link.
411 */ 411 */
412 412
413struct link *link_create(struct bearer *b_ptr, const u32 peer, 413struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
414 const struct tipc_media_addr *media_addr) 414 const struct tipc_media_addr *media_addr)
415{ 415{
416 struct link *l_ptr; 416 struct link *l_ptr;
417 struct tipc_msg *msg; 417 struct tipc_msg *msg;
@@ -449,7 +449,7 @@ struct link *link_create(struct bearer *b_ptr, const u32 peer,
449 strcpy((char *)msg_data(msg), if_name); 449 strcpy((char *)msg_data(msg), if_name);
450 450
451 l_ptr->priority = b_ptr->priority; 451 l_ptr->priority = b_ptr->priority;
452 link_set_queue_limits(l_ptr, b_ptr->media->window); 452 tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
453 453
454 link_init_max_pkt(l_ptr); 454 link_init_max_pkt(l_ptr);
455 455
@@ -458,7 +458,7 @@ struct link *link_create(struct bearer *b_ptr, const u32 peer,
458 458
459 link_reset_statistics(l_ptr); 459 link_reset_statistics(l_ptr);
460 460
461 l_ptr->owner = node_attach_link(l_ptr); 461 l_ptr->owner = tipc_node_attach_link(l_ptr);
462 if (!l_ptr->owner) { 462 if (!l_ptr->owner) {
463 kfree(l_ptr); 463 kfree(l_ptr);
464 return NULL; 464 return NULL;
@@ -472,52 +472,52 @@ struct link *link_create(struct bearer *b_ptr, const u32 peer,
472 warn("Memory squeeze; Failed to create link\n"); 472 warn("Memory squeeze; Failed to create link\n");
473 return NULL; 473 return NULL;
474 } 474 }
475 printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE); 475 tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
476 } 476 }
477 477
478 k_signal((Handler)link_start, (unsigned long)l_ptr); 478 tipc_k_signal((Handler)tipc_link_start, (unsigned long)l_ptr);
479 479
480 dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n", 480 dbg("tipc_link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
481 l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit); 481 l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
482 482
483 return l_ptr; 483 return l_ptr;
484} 484}
485 485
486/** 486/**
487 * link_delete - delete a link 487 * tipc_link_delete - delete a link
488 * @l_ptr: pointer to link 488 * @l_ptr: pointer to link
489 * 489 *
490 * Note: 'net_lock' is write_locked, bearer is locked. 490 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
491 * This routine must not grab the node lock until after link timer cancellation 491 * This routine must not grab the node lock until after link timer cancellation
492 * to avoid a potential deadlock situation. 492 * to avoid a potential deadlock situation.
493 */ 493 */
494 494
495void link_delete(struct link *l_ptr) 495void tipc_link_delete(struct link *l_ptr)
496{ 496{
497 if (!l_ptr) { 497 if (!l_ptr) {
498 err("Attempt to delete non-existent link\n"); 498 err("Attempt to delete non-existent link\n");
499 return; 499 return;
500 } 500 }
501 501
502 dbg("link_delete()\n"); 502 dbg("tipc_link_delete()\n");
503 503
504 k_cancel_timer(&l_ptr->timer); 504 k_cancel_timer(&l_ptr->timer);
505 505
506 node_lock(l_ptr->owner); 506 tipc_node_lock(l_ptr->owner);
507 link_reset(l_ptr); 507 tipc_link_reset(l_ptr);
508 node_detach_link(l_ptr->owner, l_ptr); 508 tipc_node_detach_link(l_ptr->owner, l_ptr);
509 link_stop(l_ptr); 509 tipc_link_stop(l_ptr);
510 list_del_init(&l_ptr->link_list); 510 list_del_init(&l_ptr->link_list);
511 if (LINK_LOG_BUF_SIZE) 511 if (LINK_LOG_BUF_SIZE)
512 kfree(l_ptr->print_buf.buf); 512 kfree(l_ptr->print_buf.buf);
513 node_unlock(l_ptr->owner); 513 tipc_node_unlock(l_ptr->owner);
514 k_term_timer(&l_ptr->timer); 514 k_term_timer(&l_ptr->timer);
515 kfree(l_ptr); 515 kfree(l_ptr);
516} 516}
517 517
518void link_start(struct link *l_ptr) 518void tipc_link_start(struct link *l_ptr)
519{ 519{
520 dbg("link_start %x\n", l_ptr); 520 dbg("tipc_link_start %x\n", l_ptr);
521 link_state_event(l_ptr, STARTING_EVT); 521 link_state_event(l_ptr, STARTING_EVT);
522} 522}
523 523
@@ -535,8 +535,8 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
535{ 535{
536 struct port *p_ptr; 536 struct port *p_ptr;
537 537
538 spin_lock_bh(&port_list_lock); 538 spin_lock_bh(&tipc_port_list_lock);
539 p_ptr = port_lock(origport); 539 p_ptr = tipc_port_lock(origport);
540 if (p_ptr) { 540 if (p_ptr) {
541 if (!p_ptr->wakeup) 541 if (!p_ptr->wakeup)
542 goto exit; 542 goto exit;
@@ -548,13 +548,13 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
548 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); 548 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
549 l_ptr->stats.link_congs++; 549 l_ptr->stats.link_congs++;
550exit: 550exit:
551 port_unlock(p_ptr); 551 tipc_port_unlock(p_ptr);
552 } 552 }
553 spin_unlock_bh(&port_list_lock); 553 spin_unlock_bh(&tipc_port_list_lock);
554 return -ELINKCONG; 554 return -ELINKCONG;
555} 555}
556 556
557void link_wakeup_ports(struct link *l_ptr, int all) 557void tipc_link_wakeup_ports(struct link *l_ptr, int all)
558{ 558{
559 struct port *p_ptr; 559 struct port *p_ptr;
560 struct port *temp_p_ptr; 560 struct port *temp_p_ptr;
@@ -564,7 +564,7 @@ void link_wakeup_ports(struct link *l_ptr, int all)
564 win = 100000; 564 win = 100000;
565 if (win <= 0) 565 if (win <= 0)
566 return; 566 return;
567 if (!spin_trylock_bh(&port_list_lock)) 567 if (!spin_trylock_bh(&tipc_port_list_lock))
568 return; 568 return;
569 if (link_congested(l_ptr)) 569 if (link_congested(l_ptr))
570 goto exit; 570 goto exit;
@@ -583,7 +583,7 @@ void link_wakeup_ports(struct link *l_ptr, int all)
583 } 583 }
584 584
585exit: 585exit:
586 spin_unlock_bh(&port_list_lock); 586 spin_unlock_bh(&tipc_port_list_lock);
587} 587}
588 588
589/** 589/**
@@ -606,11 +606,11 @@ static void link_release_outqueue(struct link *l_ptr)
606} 606}
607 607
608/** 608/**
609 * link_reset_fragments - purge link's inbound message fragments queue 609 * tipc_link_reset_fragments - purge link's inbound message fragments queue
610 * @l_ptr: pointer to link 610 * @l_ptr: pointer to link
611 */ 611 */
612 612
613void link_reset_fragments(struct link *l_ptr) 613void tipc_link_reset_fragments(struct link *l_ptr)
614{ 614{
615 struct sk_buff *buf = l_ptr->defragm_buf; 615 struct sk_buff *buf = l_ptr->defragm_buf;
616 struct sk_buff *next; 616 struct sk_buff *next;
@@ -624,11 +624,11 @@ void link_reset_fragments(struct link *l_ptr)
624} 624}
625 625
626/** 626/**
627 * link_stop - purge all inbound and outbound messages associated with link 627 * tipc_link_stop - purge all inbound and outbound messages associated with link
628 * @l_ptr: pointer to link 628 * @l_ptr: pointer to link
629 */ 629 */
630 630
631void link_stop(struct link *l_ptr) 631void tipc_link_stop(struct link *l_ptr)
632{ 632{
633 struct sk_buff *buf; 633 struct sk_buff *buf;
634 struct sk_buff *next; 634 struct sk_buff *next;
@@ -647,7 +647,7 @@ void link_stop(struct link *l_ptr)
647 buf = next; 647 buf = next;
648 } 648 }
649 649
650 link_reset_fragments(l_ptr); 650 tipc_link_reset_fragments(l_ptr);
651 651
652 buf_discard(l_ptr->proto_msg_queue); 652 buf_discard(l_ptr->proto_msg_queue);
653 l_ptr->proto_msg_queue = NULL; 653 l_ptr->proto_msg_queue = NULL;
@@ -677,7 +677,7 @@ static void link_send_event(void (*fcn)(u32 a, char *n, int up),
677 ev->up = up; 677 ev->up = up;
678 ev->fcn = fcn; 678 ev->fcn = fcn;
679 memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME); 679 memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
680 k_signal((Handler)link_recv_event, (unsigned long)ev); 680 tipc_k_signal((Handler)link_recv_event, (unsigned long)ev);
681} 681}
682 682
683#else 683#else
@@ -686,7 +686,7 @@ static void link_send_event(void (*fcn)(u32 a, char *n, int up),
686 686
687#endif 687#endif
688 688
689void link_reset(struct link *l_ptr) 689void tipc_link_reset(struct link *l_ptr)
690{ 690{
691 struct sk_buff *buf; 691 struct sk_buff *buf;
692 u32 prev_state = l_ptr->state; 692 u32 prev_state = l_ptr->state;
@@ -706,13 +706,13 @@ void link_reset(struct link *l_ptr)
706 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET)) 706 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
707 return; 707 return;
708 708
709 node_link_down(l_ptr->owner, l_ptr); 709 tipc_node_link_down(l_ptr->owner, l_ptr);
710 bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 710 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
711#if 0 711#if 0
712 tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name); 712 tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name);
713 dbg_link_dump(); 713 dbg_link_dump();
714#endif 714#endif
715 if (node_has_active_links(l_ptr->owner) && 715 if (tipc_node_has_active_links(l_ptr->owner) &&
716 l_ptr->owner->permit_changeover) { 716 l_ptr->owner->permit_changeover) {
717 l_ptr->reset_checkpoint = checkpoint; 717 l_ptr->reset_checkpoint = checkpoint;
718 l_ptr->exp_msg_count = START_CHANGEOVER; 718 l_ptr->exp_msg_count = START_CHANGEOVER;
@@ -730,7 +730,7 @@ void link_reset(struct link *l_ptr)
730 buf = next; 730 buf = next;
731 } 731 }
732 if (!list_empty(&l_ptr->waiting_ports)) 732 if (!list_empty(&l_ptr->waiting_ports))
733 link_wakeup_ports(l_ptr, 1); 733 tipc_link_wakeup_ports(l_ptr, 1);
734 734
735 l_ptr->retransm_queue_head = 0; 735 l_ptr->retransm_queue_head = 0;
736 l_ptr->retransm_queue_size = 0; 736 l_ptr->retransm_queue_size = 0;
@@ -747,20 +747,20 @@ void link_reset(struct link *l_ptr)
747 l_ptr->stale_count = 0; 747 l_ptr->stale_count = 0;
748 link_reset_statistics(l_ptr); 748 link_reset_statistics(l_ptr);
749 749
750 link_send_event(cfg_link_event, l_ptr, 0); 750 link_send_event(tipc_cfg_link_event, l_ptr, 0);
751 if (!in_own_cluster(l_ptr->addr)) 751 if (!in_own_cluster(l_ptr->addr))
752 link_send_event(disc_link_event, l_ptr, 0); 752 link_send_event(tipc_disc_link_event, l_ptr, 0);
753} 753}
754 754
755 755
756static void link_activate(struct link *l_ptr) 756static void link_activate(struct link *l_ptr)
757{ 757{
758 l_ptr->next_in_no = 1; 758 l_ptr->next_in_no = 1;
759 node_link_up(l_ptr->owner, l_ptr); 759 tipc_node_link_up(l_ptr->owner, l_ptr);
760 bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 760 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
761 link_send_event(cfg_link_event, l_ptr, 1); 761 link_send_event(tipc_cfg_link_event, l_ptr, 1);
762 if (!in_own_cluster(l_ptr->addr)) 762 if (!in_own_cluster(l_ptr->addr))
763 link_send_event(disc_link_event, l_ptr, 1); 763 link_send_event(tipc_disc_link_event, l_ptr, 1);
764} 764}
765 765
766/** 766/**
@@ -799,13 +799,13 @@ static void link_state_event(struct link *l_ptr, unsigned event)
799 dbg_link("TIM "); 799 dbg_link("TIM ");
800 if (l_ptr->next_in_no != l_ptr->checkpoint) { 800 if (l_ptr->next_in_no != l_ptr->checkpoint) {
801 l_ptr->checkpoint = l_ptr->next_in_no; 801 l_ptr->checkpoint = l_ptr->next_in_no;
802 if (bclink_acks_missing(l_ptr->owner)) { 802 if (tipc_bclink_acks_missing(l_ptr->owner)) {
803 link_send_proto_msg(l_ptr, STATE_MSG, 803 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
804 0, 0, 0, 0, 0); 804 0, 0, 0, 0, 0);
805 l_ptr->fsm_msg_cnt++; 805 l_ptr->fsm_msg_cnt++;
806 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { 806 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
807 link_send_proto_msg(l_ptr, STATE_MSG, 807 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
808 1, 0, 0, 0, 0); 808 1, 0, 0, 0, 0);
809 l_ptr->fsm_msg_cnt++; 809 l_ptr->fsm_msg_cnt++;
810 } 810 }
811 link_set_timer(l_ptr, cont_intv); 811 link_set_timer(l_ptr, cont_intv);
@@ -814,16 +814,16 @@ static void link_state_event(struct link *l_ptr, unsigned event)
814 dbg_link(" -> WU\n"); 814 dbg_link(" -> WU\n");
815 l_ptr->state = WORKING_UNKNOWN; 815 l_ptr->state = WORKING_UNKNOWN;
816 l_ptr->fsm_msg_cnt = 0; 816 l_ptr->fsm_msg_cnt = 0;
817 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 817 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
818 l_ptr->fsm_msg_cnt++; 818 l_ptr->fsm_msg_cnt++;
819 link_set_timer(l_ptr, cont_intv / 4); 819 link_set_timer(l_ptr, cont_intv / 4);
820 break; 820 break;
821 case RESET_MSG: 821 case RESET_MSG:
822 dbg_link("RES -> RR\n"); 822 dbg_link("RES -> RR\n");
823 link_reset(l_ptr); 823 tipc_link_reset(l_ptr);
824 l_ptr->state = RESET_RESET; 824 l_ptr->state = RESET_RESET;
825 l_ptr->fsm_msg_cnt = 0; 825 l_ptr->fsm_msg_cnt = 0;
826 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 826 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
827 l_ptr->fsm_msg_cnt++; 827 l_ptr->fsm_msg_cnt++;
828 link_set_timer(l_ptr, cont_intv); 828 link_set_timer(l_ptr, cont_intv);
829 break; 829 break;
@@ -844,10 +844,10 @@ static void link_state_event(struct link *l_ptr, unsigned event)
844 break; 844 break;
845 case RESET_MSG: 845 case RESET_MSG:
846 dbg_link("RES -> RR\n"); 846 dbg_link("RES -> RR\n");
847 link_reset(l_ptr); 847 tipc_link_reset(l_ptr);
848 l_ptr->state = RESET_RESET; 848 l_ptr->state = RESET_RESET;
849 l_ptr->fsm_msg_cnt = 0; 849 l_ptr->fsm_msg_cnt = 0;
850 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 850 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
851 l_ptr->fsm_msg_cnt++; 851 l_ptr->fsm_msg_cnt++;
852 link_set_timer(l_ptr, cont_intv); 852 link_set_timer(l_ptr, cont_intv);
853 break; 853 break;
@@ -858,9 +858,9 @@ static void link_state_event(struct link *l_ptr, unsigned event)
858 l_ptr->state = WORKING_WORKING; 858 l_ptr->state = WORKING_WORKING;
859 l_ptr->fsm_msg_cnt = 0; 859 l_ptr->fsm_msg_cnt = 0;
860 l_ptr->checkpoint = l_ptr->next_in_no; 860 l_ptr->checkpoint = l_ptr->next_in_no;
861 if (bclink_acks_missing(l_ptr->owner)) { 861 if (tipc_bclink_acks_missing(l_ptr->owner)) {
862 link_send_proto_msg(l_ptr, STATE_MSG, 862 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
863 0, 0, 0, 0, 0); 863 0, 0, 0, 0, 0);
864 l_ptr->fsm_msg_cnt++; 864 l_ptr->fsm_msg_cnt++;
865 } 865 }
866 link_set_timer(l_ptr, cont_intv); 866 link_set_timer(l_ptr, cont_intv);
@@ -868,18 +868,18 @@ static void link_state_event(struct link *l_ptr, unsigned event)
868 dbg_link("Probing %u/%u,timer = %u ms)\n", 868 dbg_link("Probing %u/%u,timer = %u ms)\n",
869 l_ptr->fsm_msg_cnt, l_ptr->abort_limit, 869 l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
870 cont_intv / 4); 870 cont_intv / 4);
871 link_send_proto_msg(l_ptr, STATE_MSG, 871 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
872 1, 0, 0, 0, 0); 872 1, 0, 0, 0, 0);
873 l_ptr->fsm_msg_cnt++; 873 l_ptr->fsm_msg_cnt++;
874 link_set_timer(l_ptr, cont_intv / 4); 874 link_set_timer(l_ptr, cont_intv / 4);
875 } else { /* Link has failed */ 875 } else { /* Link has failed */
876 dbg_link("-> RU (%u probes unanswered)\n", 876 dbg_link("-> RU (%u probes unanswered)\n",
877 l_ptr->fsm_msg_cnt); 877 l_ptr->fsm_msg_cnt);
878 link_reset(l_ptr); 878 tipc_link_reset(l_ptr);
879 l_ptr->state = RESET_UNKNOWN; 879 l_ptr->state = RESET_UNKNOWN;
880 l_ptr->fsm_msg_cnt = 0; 880 l_ptr->fsm_msg_cnt = 0;
881 link_send_proto_msg(l_ptr, RESET_MSG, 881 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
882 0, 0, 0, 0, 0); 882 0, 0, 0, 0, 0);
883 l_ptr->fsm_msg_cnt++; 883 l_ptr->fsm_msg_cnt++;
884 link_set_timer(l_ptr, cont_intv); 884 link_set_timer(l_ptr, cont_intv);
885 } 885 }
@@ -904,7 +904,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
904 l_ptr->state = WORKING_WORKING; 904 l_ptr->state = WORKING_WORKING;
905 l_ptr->fsm_msg_cnt = 0; 905 l_ptr->fsm_msg_cnt = 0;
906 link_activate(l_ptr); 906 link_activate(l_ptr);
907 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 907 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
908 l_ptr->fsm_msg_cnt++; 908 l_ptr->fsm_msg_cnt++;
909 link_set_timer(l_ptr, cont_intv); 909 link_set_timer(l_ptr, cont_intv);
910 break; 910 break;
@@ -913,7 +913,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
913 dbg_link(" -> RR\n"); 913 dbg_link(" -> RR\n");
914 l_ptr->state = RESET_RESET; 914 l_ptr->state = RESET_RESET;
915 l_ptr->fsm_msg_cnt = 0; 915 l_ptr->fsm_msg_cnt = 0;
916 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0); 916 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
917 l_ptr->fsm_msg_cnt++; 917 l_ptr->fsm_msg_cnt++;
918 link_set_timer(l_ptr, cont_intv); 918 link_set_timer(l_ptr, cont_intv);
919 break; 919 break;
@@ -923,7 +923,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
923 /* fall through */ 923 /* fall through */
924 case TIMEOUT_EVT: 924 case TIMEOUT_EVT:
925 dbg_link("TIM \n"); 925 dbg_link("TIM \n");
926 link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 926 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
927 l_ptr->fsm_msg_cnt++; 927 l_ptr->fsm_msg_cnt++;
928 link_set_timer(l_ptr, cont_intv); 928 link_set_timer(l_ptr, cont_intv);
929 break; 929 break;
@@ -947,7 +947,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
947 l_ptr->state = WORKING_WORKING; 947 l_ptr->state = WORKING_WORKING;
948 l_ptr->fsm_msg_cnt = 0; 948 l_ptr->fsm_msg_cnt = 0;
949 link_activate(l_ptr); 949 link_activate(l_ptr);
950 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 950 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
951 l_ptr->fsm_msg_cnt++; 951 l_ptr->fsm_msg_cnt++;
952 link_set_timer(l_ptr, cont_intv); 952 link_set_timer(l_ptr, cont_intv);
953 break; 953 break;
@@ -956,7 +956,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
956 break; 956 break;
957 case TIMEOUT_EVT: 957 case TIMEOUT_EVT:
958 dbg_link("TIM\n"); 958 dbg_link("TIM\n");
959 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 959 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
960 l_ptr->fsm_msg_cnt++; 960 l_ptr->fsm_msg_cnt++;
961 link_set_timer(l_ptr, cont_intv); 961 link_set_timer(l_ptr, cont_intv);
962 dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt); 962 dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
@@ -1023,12 +1023,12 @@ static inline void link_add_to_outqueue(struct link *l_ptr,
1023} 1023}
1024 1024
1025/* 1025/*
1026 * link_send_buf() is the 'full path' for messages, called from 1026 * tipc_link_send_buf() is the 'full path' for messages, called from
1027 * inside TIPC when the 'fast path' in tipc_send_buf 1027 * inside TIPC when the 'fast path' in tipc_send_buf
1028 * has failed, and from link_send() 1028 * has failed, and from link_send()
1029 */ 1029 */
1030 1030
1031int link_send_buf(struct link *l_ptr, struct sk_buff *buf) 1031int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1032{ 1032{
1033 struct tipc_msg *msg = buf_msg(buf); 1033 struct tipc_msg *msg = buf_msg(buf);
1034 u32 size = msg_size(msg); 1034 u32 size = msg_size(msg);
@@ -1051,7 +1051,7 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1051 buf_discard(buf); 1051 buf_discard(buf);
1052 if (imp > CONN_MANAGER) { 1052 if (imp > CONN_MANAGER) {
1053 warn("Resetting <%s>, send queue full", l_ptr->name); 1053 warn("Resetting <%s>, send queue full", l_ptr->name);
1054 link_reset(l_ptr); 1054 tipc_link_reset(l_ptr);
1055 } 1055 }
1056 return dsz; 1056 return dsz;
1057 } 1057 }
@@ -1059,21 +1059,21 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1059 /* Fragmentation needed ? */ 1059 /* Fragmentation needed ? */
1060 1060
1061 if (size > max_packet) 1061 if (size > max_packet)
1062 return link_send_long_buf(l_ptr, buf); 1062 return tipc_link_send_long_buf(l_ptr, buf);
1063 1063
1064 /* Packet can be queued or sent: */ 1064 /* Packet can be queued or sent: */
1065 1065
1066 if (queue_size > l_ptr->stats.max_queue_sz) 1066 if (queue_size > l_ptr->stats.max_queue_sz)
1067 l_ptr->stats.max_queue_sz = queue_size; 1067 l_ptr->stats.max_queue_sz = queue_size;
1068 1068
1069 if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) && 1069 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
1070 !link_congested(l_ptr))) { 1070 !link_congested(l_ptr))) {
1071 link_add_to_outqueue(l_ptr, buf, msg); 1071 link_add_to_outqueue(l_ptr, buf, msg);
1072 1072
1073 if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) { 1073 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
1074 l_ptr->unacked_window = 0; 1074 l_ptr->unacked_window = 0;
1075 } else { 1075 } else {
1076 bearer_schedule(l_ptr->b_ptr, l_ptr); 1076 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1077 l_ptr->stats.bearer_congs++; 1077 l_ptr->stats.bearer_congs++;
1078 l_ptr->next_out = buf; 1078 l_ptr->next_out = buf;
1079 } 1079 }
@@ -1088,7 +1088,7 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1088 1088
1089 if (l_ptr->next_out && 1089 if (l_ptr->next_out &&
1090 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { 1090 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
1091 bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); 1091 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1092 return dsz; 1092 return dsz;
1093 } 1093 }
1094 1094
@@ -1114,38 +1114,38 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1114 if (!l_ptr->next_out) 1114 if (!l_ptr->next_out)
1115 l_ptr->next_out = buf; 1115 l_ptr->next_out = buf;
1116 link_add_to_outqueue(l_ptr, buf, msg); 1116 link_add_to_outqueue(l_ptr, buf, msg);
1117 bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); 1117 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1118 return dsz; 1118 return dsz;
1119} 1119}
1120 1120
1121/* 1121/*
1122 * link_send(): same as link_send_buf(), but the link to use has 1122 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
1123 * not been selected yet, and the the owner node is not locked 1123 * not been selected yet, and the the owner node is not locked
1124 * Called by TIPC internal users, e.g. the name distributor 1124 * Called by TIPC internal users, e.g. the name distributor
1125 */ 1125 */
1126 1126
1127int link_send(struct sk_buff *buf, u32 dest, u32 selector) 1127int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
1128{ 1128{
1129 struct link *l_ptr; 1129 struct link *l_ptr;
1130 struct node *n_ptr; 1130 struct node *n_ptr;
1131 int res = -ELINKCONG; 1131 int res = -ELINKCONG;
1132 1132
1133 read_lock_bh(&net_lock); 1133 read_lock_bh(&tipc_net_lock);
1134 n_ptr = node_select(dest, selector); 1134 n_ptr = tipc_node_select(dest, selector);
1135 if (n_ptr) { 1135 if (n_ptr) {
1136 node_lock(n_ptr); 1136 tipc_node_lock(n_ptr);
1137 l_ptr = n_ptr->active_links[selector & 1]; 1137 l_ptr = n_ptr->active_links[selector & 1];
1138 dbg("link_send: found link %x for dest %x\n", l_ptr, dest); 1138 dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
1139 if (l_ptr) { 1139 if (l_ptr) {
1140 res = link_send_buf(l_ptr, buf); 1140 res = tipc_link_send_buf(l_ptr, buf);
1141 } 1141 }
1142 node_unlock(n_ptr); 1142 tipc_node_unlock(n_ptr);
1143 } else { 1143 } else {
1144 dbg("Attempt to send msg to unknown node:\n"); 1144 dbg("Attempt to send msg to unknown node:\n");
1145 msg_dbg(buf_msg(buf),">>>"); 1145 msg_dbg(buf_msg(buf),">>>");
1146 buf_discard(buf); 1146 buf_discard(buf);
1147 } 1147 }
1148 read_unlock_bh(&net_lock); 1148 read_unlock_bh(&tipc_net_lock);
1149 return res; 1149 return res;
1150} 1150}
1151 1151
@@ -1166,14 +1166,14 @@ static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1166 if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) { 1166 if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
1167 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) { 1167 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1168 link_add_to_outqueue(l_ptr, buf, msg); 1168 link_add_to_outqueue(l_ptr, buf, msg);
1169 if (likely(bearer_send(l_ptr->b_ptr, buf, 1169 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1170 &l_ptr->media_addr))) { 1170 &l_ptr->media_addr))) {
1171 l_ptr->unacked_window = 0; 1171 l_ptr->unacked_window = 0;
1172 msg_dbg(msg,"SENT_FAST:"); 1172 msg_dbg(msg,"SENT_FAST:");
1173 return res; 1173 return res;
1174 } 1174 }
1175 dbg("failed sent fast...\n"); 1175 dbg("failed sent fast...\n");
1176 bearer_schedule(l_ptr->b_ptr, l_ptr); 1176 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1177 l_ptr->stats.bearer_congs++; 1177 l_ptr->stats.bearer_congs++;
1178 l_ptr->next_out = buf; 1178 l_ptr->next_out = buf;
1179 return res; 1179 return res;
@@ -1182,7 +1182,7 @@ static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1182 else 1182 else
1183 *used_max_pkt = link_max_pkt(l_ptr); 1183 *used_max_pkt = link_max_pkt(l_ptr);
1184 } 1184 }
1185 return link_send_buf(l_ptr, buf); /* All other cases */ 1185 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
1186} 1186}
1187 1187
1188/* 1188/*
@@ -1200,24 +1200,24 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1200 u32 dummy; 1200 u32 dummy;
1201 1201
1202 if (destnode == tipc_own_addr) 1202 if (destnode == tipc_own_addr)
1203 return port_recv_msg(buf); 1203 return tipc_port_recv_msg(buf);
1204 1204
1205 read_lock_bh(&net_lock); 1205 read_lock_bh(&tipc_net_lock);
1206 n_ptr = node_select(destnode, selector); 1206 n_ptr = tipc_node_select(destnode, selector);
1207 if (likely(n_ptr)) { 1207 if (likely(n_ptr)) {
1208 node_lock(n_ptr); 1208 tipc_node_lock(n_ptr);
1209 l_ptr = n_ptr->active_links[selector]; 1209 l_ptr = n_ptr->active_links[selector];
1210 dbg("send_fast: buf %x selected %x, destnode = %x\n", 1210 dbg("send_fast: buf %x selected %x, destnode = %x\n",
1211 buf, l_ptr, destnode); 1211 buf, l_ptr, destnode);
1212 if (likely(l_ptr)) { 1212 if (likely(l_ptr)) {
1213 res = link_send_buf_fast(l_ptr, buf, &dummy); 1213 res = link_send_buf_fast(l_ptr, buf, &dummy);
1214 node_unlock(n_ptr); 1214 tipc_node_unlock(n_ptr);
1215 read_unlock_bh(&net_lock); 1215 read_unlock_bh(&tipc_net_lock);
1216 return res; 1216 return res;
1217 } 1217 }
1218 node_unlock(n_ptr); 1218 tipc_node_unlock(n_ptr);
1219 } 1219 }
1220 read_unlock_bh(&net_lock); 1220 read_unlock_bh(&tipc_net_lock);
1221 res = msg_data_sz(buf_msg(buf)); 1221 res = msg_data_sz(buf_msg(buf));
1222 tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1222 tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1223 return res; 1223 return res;
@@ -1225,15 +1225,15 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1225 1225
1226 1226
1227/* 1227/*
1228 * link_send_sections_fast: Entry for messages where the 1228 * tipc_link_send_sections_fast: Entry for messages where the
1229 * destination processor is known and the header is complete, 1229 * destination processor is known and the header is complete,
1230 * except for total message length. 1230 * except for total message length.
1231 * Returns user data length or errno. 1231 * Returns user data length or errno.
1232 */ 1232 */
1233int link_send_sections_fast(struct port *sender, 1233int tipc_link_send_sections_fast(struct port *sender,
1234 struct iovec const *msg_sect, 1234 struct iovec const *msg_sect,
1235 const u32 num_sect, 1235 const u32 num_sect,
1236 u32 destaddr) 1236 u32 destaddr)
1237{ 1237{
1238 struct tipc_msg *hdr = &sender->publ.phdr; 1238 struct tipc_msg *hdr = &sender->publ.phdr;
1239 struct link *l_ptr; 1239 struct link *l_ptr;
@@ -1253,10 +1253,10 @@ again:
1253 res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt, 1253 res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
1254 !sender->user_port, &buf); 1254 !sender->user_port, &buf);
1255 1255
1256 read_lock_bh(&net_lock); 1256 read_lock_bh(&tipc_net_lock);
1257 node = node_select(destaddr, selector); 1257 node = tipc_node_select(destaddr, selector);
1258 if (likely(node)) { 1258 if (likely(node)) {
1259 node_lock(node); 1259 tipc_node_lock(node);
1260 l_ptr = node->active_links[selector]; 1260 l_ptr = node->active_links[selector];
1261 if (likely(l_ptr)) { 1261 if (likely(l_ptr)) {
1262 if (likely(buf)) { 1262 if (likely(buf)) {
@@ -1265,8 +1265,8 @@ again:
1265 if (unlikely(res < 0)) 1265 if (unlikely(res < 0))
1266 buf_discard(buf); 1266 buf_discard(buf);
1267exit: 1267exit:
1268 node_unlock(node); 1268 tipc_node_unlock(node);
1269 read_unlock_bh(&net_lock); 1269 read_unlock_bh(&tipc_net_lock);
1270 return res; 1270 return res;
1271 } 1271 }
1272 1272
@@ -1290,8 +1290,8 @@ exit:
1290 */ 1290 */
1291 1291
1292 sender->max_pkt = link_max_pkt(l_ptr); 1292 sender->max_pkt = link_max_pkt(l_ptr);
1293 node_unlock(node); 1293 tipc_node_unlock(node);
1294 read_unlock_bh(&net_lock); 1294 read_unlock_bh(&tipc_net_lock);
1295 1295
1296 1296
1297 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1297 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
@@ -1300,17 +1300,17 @@ exit:
1300 return link_send_sections_long(sender, msg_sect, 1300 return link_send_sections_long(sender, msg_sect,
1301 num_sect, destaddr); 1301 num_sect, destaddr);
1302 } 1302 }
1303 node_unlock(node); 1303 tipc_node_unlock(node);
1304 } 1304 }
1305 read_unlock_bh(&net_lock); 1305 read_unlock_bh(&tipc_net_lock);
1306 1306
1307 /* Couldn't find a link to the destination node */ 1307 /* Couldn't find a link to the destination node */
1308 1308
1309 if (buf) 1309 if (buf)
1310 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1310 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1311 if (res >= 0) 1311 if (res >= 0)
1312 return port_reject_sections(sender, hdr, msg_sect, num_sect, 1312 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1313 TIPC_ERR_NO_NODE); 1313 TIPC_ERR_NO_NODE);
1314 return res; 1314 return res;
1315} 1315}
1316 1316
@@ -1444,17 +1444,17 @@ error:
1444 * Now we have a buffer chain. Select a link and check 1444 * Now we have a buffer chain. Select a link and check
1445 * that packet size is still OK 1445 * that packet size is still OK
1446 */ 1446 */
1447 node = node_select(destaddr, sender->publ.ref & 1); 1447 node = tipc_node_select(destaddr, sender->publ.ref & 1);
1448 if (likely(node)) { 1448 if (likely(node)) {
1449 node_lock(node); 1449 tipc_node_lock(node);
1450 l_ptr = node->active_links[sender->publ.ref & 1]; 1450 l_ptr = node->active_links[sender->publ.ref & 1];
1451 if (!l_ptr) { 1451 if (!l_ptr) {
1452 node_unlock(node); 1452 tipc_node_unlock(node);
1453 goto reject; 1453 goto reject;
1454 } 1454 }
1455 if (link_max_pkt(l_ptr) < max_pkt) { 1455 if (link_max_pkt(l_ptr) < max_pkt) {
1456 sender->max_pkt = link_max_pkt(l_ptr); 1456 sender->max_pkt = link_max_pkt(l_ptr);
1457 node_unlock(node); 1457 tipc_node_unlock(node);
1458 for (; buf_chain; buf_chain = buf) { 1458 for (; buf_chain; buf_chain = buf) {
1459 buf = buf_chain->next; 1459 buf = buf_chain->next;
1460 buf_discard(buf_chain); 1460 buf_discard(buf_chain);
@@ -1467,8 +1467,8 @@ reject:
1467 buf = buf_chain->next; 1467 buf = buf_chain->next;
1468 buf_discard(buf_chain); 1468 buf_discard(buf_chain);
1469 } 1469 }
1470 return port_reject_sections(sender, hdr, msg_sect, num_sect, 1470 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1471 TIPC_ERR_NO_NODE); 1471 TIPC_ERR_NO_NODE);
1472 } 1472 }
1473 1473
1474 /* Append whole chain to send queue: */ 1474 /* Append whole chain to send queue: */
@@ -1491,15 +1491,15 @@ reject:
1491 1491
1492 /* Send it, if possible: */ 1492 /* Send it, if possible: */
1493 1493
1494 link_push_queue(l_ptr); 1494 tipc_link_push_queue(l_ptr);
1495 node_unlock(node); 1495 tipc_node_unlock(node);
1496 return dsz; 1496 return dsz;
1497} 1497}
1498 1498
1499/* 1499/*
1500 * link_push_packet: Push one unsent packet to the media 1500 * tipc_link_push_packet: Push one unsent packet to the media
1501 */ 1501 */
1502u32 link_push_packet(struct link *l_ptr) 1502u32 tipc_link_push_packet(struct link *l_ptr)
1503{ 1503{
1504 struct sk_buff *buf = l_ptr->first_out; 1504 struct sk_buff *buf = l_ptr->first_out;
1505 u32 r_q_size = l_ptr->retransm_queue_size; 1505 u32 r_q_size = l_ptr->retransm_queue_size;
@@ -1526,7 +1526,7 @@ u32 link_push_packet(struct link *l_ptr)
1526 if (r_q_size && buf && !skb_cloned(buf)) { 1526 if (r_q_size && buf && !skb_cloned(buf)) {
1527 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1527 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1528 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1528 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1529 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1529 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1530 msg_dbg(buf_msg(buf), ">DEF-RETR>"); 1530 msg_dbg(buf_msg(buf), ">DEF-RETR>");
1531 l_ptr->retransm_queue_head = mod(++r_q_head); 1531 l_ptr->retransm_queue_head = mod(++r_q_head);
1532 l_ptr->retransm_queue_size = --r_q_size; 1532 l_ptr->retransm_queue_size = --r_q_size;
@@ -1545,7 +1545,7 @@ u32 link_push_packet(struct link *l_ptr)
1545 if (buf) { 1545 if (buf) {
1546 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1546 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1547 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in); 1547 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
1548 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1548 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1549 msg_dbg(buf_msg(buf), ">DEF-PROT>"); 1549 msg_dbg(buf_msg(buf), ">DEF-PROT>");
1550 l_ptr->unacked_window = 0; 1550 l_ptr->unacked_window = 0;
1551 buf_discard(buf); 1551 buf_discard(buf);
@@ -1569,7 +1569,7 @@ u32 link_push_packet(struct link *l_ptr)
1569 if (mod(next - first) < l_ptr->queue_limit[0]) { 1569 if (mod(next - first) < l_ptr->queue_limit[0]) {
1570 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1570 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1571 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1571 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1572 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1572 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1573 if (msg_user(msg) == MSG_BUNDLER) 1573 if (msg_user(msg) == MSG_BUNDLER)
1574 msg_set_type(msg, CLOSED_MSG); 1574 msg_set_type(msg, CLOSED_MSG);
1575 msg_dbg(msg, ">PUSH-DATA>"); 1575 msg_dbg(msg, ">PUSH-DATA>");
@@ -1589,29 +1589,29 @@ u32 link_push_packet(struct link *l_ptr)
1589 * push_queue(): push out the unsent messages of a link where 1589 * push_queue(): push out the unsent messages of a link where
1590 * congestion has abated. Node is locked 1590 * congestion has abated. Node is locked
1591 */ 1591 */
1592void link_push_queue(struct link *l_ptr) 1592void tipc_link_push_queue(struct link *l_ptr)
1593{ 1593{
1594 u32 res; 1594 u32 res;
1595 1595
1596 if (bearer_congested(l_ptr->b_ptr, l_ptr)) 1596 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1597 return; 1597 return;
1598 1598
1599 do { 1599 do {
1600 res = link_push_packet(l_ptr); 1600 res = tipc_link_push_packet(l_ptr);
1601 } 1601 }
1602 while (res == TIPC_OK); 1602 while (res == TIPC_OK);
1603 if (res == PUSH_FAILED) 1603 if (res == PUSH_FAILED)
1604 bearer_schedule(l_ptr->b_ptr, l_ptr); 1604 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1605} 1605}
1606 1606
1607void link_retransmit(struct link *l_ptr, struct sk_buff *buf, 1607void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1608 u32 retransmits) 1608 u32 retransmits)
1609{ 1609{
1610 struct tipc_msg *msg; 1610 struct tipc_msg *msg;
1611 1611
1612 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); 1612 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1613 1613
1614 if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) { 1614 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
1615 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>"); 1615 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
1616 dbg_print_link(l_ptr, " "); 1616 dbg_print_link(l_ptr, " ");
1617 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf)); 1617 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
@@ -1622,15 +1622,15 @@ void link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1622 msg = buf_msg(buf); 1622 msg = buf_msg(buf);
1623 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1623 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1624 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1624 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1625 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1625 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1626 /* Catch if retransmissions fail repeatedly: */ 1626 /* Catch if retransmissions fail repeatedly: */
1627 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1627 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1628 if (++l_ptr->stale_count > 100) { 1628 if (++l_ptr->stale_count > 100) {
1629 msg_print(CONS, buf_msg(buf), ">RETR>"); 1629 tipc_msg_print(TIPC_CONS, buf_msg(buf), ">RETR>");
1630 info("...Retransmitted %u times\n", 1630 info("...Retransmitted %u times\n",
1631 l_ptr->stale_count); 1631 l_ptr->stale_count);
1632 link_print(l_ptr, CONS, "Resetting Link\n");; 1632 link_print(l_ptr, TIPC_CONS, "Resetting Link\n");;
1633 link_reset(l_ptr); 1633 tipc_link_reset(l_ptr);
1634 break; 1634 break;
1635 } 1635 }
1636 } else { 1636 } else {
@@ -1643,7 +1643,7 @@ void link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1643 retransmits--; 1643 retransmits--;
1644 l_ptr->stats.retransmitted++; 1644 l_ptr->stats.retransmitted++;
1645 } else { 1645 } else {
1646 bearer_schedule(l_ptr->b_ptr, l_ptr); 1646 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1647 l_ptr->stats.bearer_congs++; 1647 l_ptr->stats.bearer_congs++;
1648 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf)); 1648 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1649 l_ptr->retransm_queue_size = retransmits; 1649 l_ptr->retransm_queue_size = retransmits;
@@ -1663,9 +1663,9 @@ static void link_recv_non_seq(struct sk_buff *buf)
1663 struct tipc_msg *msg = buf_msg(buf); 1663 struct tipc_msg *msg = buf_msg(buf);
1664 1664
1665 if (msg_user(msg) == LINK_CONFIG) 1665 if (msg_user(msg) == LINK_CONFIG)
1666 disc_recv_msg(buf); 1666 tipc_disc_recv_msg(buf);
1667 else 1667 else
1668 bclink_recv_pkt(buf); 1668 tipc_bclink_recv_pkt(buf);
1669} 1669}
1670 1670
1671/** 1671/**
@@ -1692,7 +1692,7 @@ static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1692 1692
1693void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) 1693void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1694{ 1694{
1695 read_lock_bh(&net_lock); 1695 read_lock_bh(&tipc_net_lock);
1696 while (head) { 1696 while (head) {
1697 struct bearer *b_ptr; 1697 struct bearer *b_ptr;
1698 struct node *n_ptr; 1698 struct node *n_ptr;
@@ -1720,22 +1720,22 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1720 link_recv_non_seq(buf); 1720 link_recv_non_seq(buf);
1721 continue; 1721 continue;
1722 } 1722 }
1723 n_ptr = node_find(msg_prevnode(msg)); 1723 n_ptr = tipc_node_find(msg_prevnode(msg));
1724 if (unlikely(!n_ptr)) 1724 if (unlikely(!n_ptr))
1725 goto cont; 1725 goto cont;
1726 1726
1727 node_lock(n_ptr); 1727 tipc_node_lock(n_ptr);
1728 l_ptr = n_ptr->links[b_ptr->identity]; 1728 l_ptr = n_ptr->links[b_ptr->identity];
1729 if (unlikely(!l_ptr)) { 1729 if (unlikely(!l_ptr)) {
1730 node_unlock(n_ptr); 1730 tipc_node_unlock(n_ptr);
1731 goto cont; 1731 goto cont;
1732 } 1732 }
1733 /* 1733 /*
1734 * Release acked messages 1734 * Release acked messages
1735 */ 1735 */
1736 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) { 1736 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1737 if (node_is_up(n_ptr) && n_ptr->bclink.supported) 1737 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
1738 bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1738 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1739 } 1739 }
1740 1740
1741 crs = l_ptr->first_out; 1741 crs = l_ptr->first_out;
@@ -1752,12 +1752,12 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1752 l_ptr->out_queue_size -= released; 1752 l_ptr->out_queue_size -= released;
1753 } 1753 }
1754 if (unlikely(l_ptr->next_out)) 1754 if (unlikely(l_ptr->next_out))
1755 link_push_queue(l_ptr); 1755 tipc_link_push_queue(l_ptr);
1756 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1756 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1757 link_wakeup_ports(l_ptr, 0); 1757 tipc_link_wakeup_ports(l_ptr, 0);
1758 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { 1758 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1759 l_ptr->stats.sent_acks++; 1759 l_ptr->stats.sent_acks++;
1760 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1760 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1761 } 1761 }
1762 1762
1763protocol_check: 1763protocol_check:
@@ -1770,8 +1770,8 @@ protocol_check:
1770 if (likely(msg_is_dest(msg, tipc_own_addr))) { 1770 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1771deliver: 1771deliver:
1772 if (likely(msg_isdata(msg))) { 1772 if (likely(msg_isdata(msg))) {
1773 node_unlock(n_ptr); 1773 tipc_node_unlock(n_ptr);
1774 port_recv_msg(buf); 1774 tipc_port_recv_msg(buf);
1775 continue; 1775 continue;
1776 } 1776 }
1777 switch (msg_user(msg)) { 1777 switch (msg_user(msg)) {
@@ -1779,34 +1779,32 @@ deliver:
1779 l_ptr->stats.recv_bundles++; 1779 l_ptr->stats.recv_bundles++;
1780 l_ptr->stats.recv_bundled += 1780 l_ptr->stats.recv_bundled +=
1781 msg_msgcnt(msg); 1781 msg_msgcnt(msg);
1782 node_unlock(n_ptr); 1782 tipc_node_unlock(n_ptr);
1783 link_recv_bundle(buf); 1783 tipc_link_recv_bundle(buf);
1784 continue; 1784 continue;
1785 case ROUTE_DISTRIBUTOR: 1785 case ROUTE_DISTRIBUTOR:
1786 node_unlock(n_ptr); 1786 tipc_node_unlock(n_ptr);
1787 cluster_recv_routing_table(buf); 1787 tipc_cltr_recv_routing_table(buf);
1788 continue; 1788 continue;
1789 case NAME_DISTRIBUTOR: 1789 case NAME_DISTRIBUTOR:
1790 node_unlock(n_ptr); 1790 tipc_node_unlock(n_ptr);
1791 named_recv(buf); 1791 tipc_named_recv(buf);
1792 continue; 1792 continue;
1793 case CONN_MANAGER: 1793 case CONN_MANAGER:
1794 node_unlock(n_ptr); 1794 tipc_node_unlock(n_ptr);
1795 port_recv_proto_msg(buf); 1795 tipc_port_recv_proto_msg(buf);
1796 continue; 1796 continue;
1797 case MSG_FRAGMENTER: 1797 case MSG_FRAGMENTER:
1798 l_ptr->stats.recv_fragments++; 1798 l_ptr->stats.recv_fragments++;
1799 if (link_recv_fragment( 1799 if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1800 &l_ptr->defragm_buf, 1800 &buf, &msg)) {
1801 &buf, &msg)) {
1802 l_ptr->stats.recv_fragmented++; 1801 l_ptr->stats.recv_fragmented++;
1803 goto deliver; 1802 goto deliver;
1804 } 1803 }
1805 break; 1804 break;
1806 case CHANGEOVER_PROTOCOL: 1805 case CHANGEOVER_PROTOCOL:
1807 type = msg_type(msg); 1806 type = msg_type(msg);
1808 if (link_recv_changeover_msg( 1807 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1809 &l_ptr, &buf)) {
1810 msg = buf_msg(buf); 1808 msg = buf_msg(buf);
1811 seq_no = msg_seqno(msg); 1809 seq_no = msg_seqno(msg);
1812 TIPC_SKB_CB(buf)->handle 1810 TIPC_SKB_CB(buf)->handle
@@ -1818,20 +1816,20 @@ deliver:
1818 break; 1816 break;
1819 } 1817 }
1820 } 1818 }
1821 node_unlock(n_ptr); 1819 tipc_node_unlock(n_ptr);
1822 net_route_msg(buf); 1820 tipc_net_route_msg(buf);
1823 continue; 1821 continue;
1824 } 1822 }
1825 link_handle_out_of_seq_msg(l_ptr, buf); 1823 link_handle_out_of_seq_msg(l_ptr, buf);
1826 head = link_insert_deferred_queue(l_ptr, head); 1824 head = link_insert_deferred_queue(l_ptr, head);
1827 node_unlock(n_ptr); 1825 tipc_node_unlock(n_ptr);
1828 continue; 1826 continue;
1829 } 1827 }
1830 1828
1831 if (msg_user(msg) == LINK_PROTOCOL) { 1829 if (msg_user(msg) == LINK_PROTOCOL) {
1832 link_recv_proto_msg(l_ptr, buf); 1830 link_recv_proto_msg(l_ptr, buf);
1833 head = link_insert_deferred_queue(l_ptr, head); 1831 head = link_insert_deferred_queue(l_ptr, head);
1834 node_unlock(n_ptr); 1832 tipc_node_unlock(n_ptr);
1835 continue; 1833 continue;
1836 } 1834 }
1837 msg_dbg(msg,"NSEQ<REC<"); 1835 msg_dbg(msg,"NSEQ<REC<");
@@ -1842,14 +1840,14 @@ deliver:
1842 msg_dbg(msg,"RECV-REINS:"); 1840 msg_dbg(msg,"RECV-REINS:");
1843 buf->next = head; 1841 buf->next = head;
1844 head = buf; 1842 head = buf;
1845 node_unlock(n_ptr); 1843 tipc_node_unlock(n_ptr);
1846 continue; 1844 continue;
1847 } 1845 }
1848 node_unlock(n_ptr); 1846 tipc_node_unlock(n_ptr);
1849cont: 1847cont:
1850 buf_discard(buf); 1848 buf_discard(buf);
1851 } 1849 }
1852 read_unlock_bh(&net_lock); 1850 read_unlock_bh(&tipc_net_lock);
1853} 1851}
1854 1852
1855/* 1853/*
@@ -1858,9 +1856,9 @@ cont:
1858 * Returns the increase of the queue length,i.e. 0 or 1 1856 * Returns the increase of the queue length,i.e. 0 or 1
1859 */ 1857 */
1860 1858
1861u32 link_defer_pkt(struct sk_buff **head, 1859u32 tipc_link_defer_pkt(struct sk_buff **head,
1862 struct sk_buff **tail, 1860 struct sk_buff **tail,
1863 struct sk_buff *buf) 1861 struct sk_buff *buf)
1864{ 1862{
1865 struct sk_buff *prev = 0; 1863 struct sk_buff *prev = 0;
1866 struct sk_buff *crs = *head; 1864 struct sk_buff *crs = *head;
@@ -1939,12 +1937,12 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
1939 return; 1937 return;
1940 } 1938 }
1941 1939
1942 if (link_defer_pkt(&l_ptr->oldest_deferred_in, 1940 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1943 &l_ptr->newest_deferred_in, buf)) { 1941 &l_ptr->newest_deferred_in, buf)) {
1944 l_ptr->deferred_inqueue_sz++; 1942 l_ptr->deferred_inqueue_sz++;
1945 l_ptr->stats.deferred_recv++; 1943 l_ptr->stats.deferred_recv++;
1946 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1944 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1947 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1945 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1948 } else 1946 } else
1949 l_ptr->stats.duplicates++; 1947 l_ptr->stats.duplicates++;
1950} 1948}
@@ -1952,8 +1950,8 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
1952/* 1950/*
1953 * Send protocol message to the other endpoint. 1951 * Send protocol message to the other endpoint.
1954 */ 1952 */
1955void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg, 1953void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1956 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) 1954 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1957{ 1955{
1958 struct sk_buff *buf = 0; 1956 struct sk_buff *buf = 0;
1959 struct tipc_msg *msg = l_ptr->pmsg; 1957 struct tipc_msg *msg = l_ptr->pmsg;
@@ -1964,12 +1962,12 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1964 msg_set_type(msg, msg_typ); 1962 msg_set_type(msg, msg_typ);
1965 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1963 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1966 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 1964 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
1967 msg_set_last_bcast(msg, bclink_get_last_sent()); 1965 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1968 1966
1969 if (msg_typ == STATE_MSG) { 1967 if (msg_typ == STATE_MSG) {
1970 u32 next_sent = mod(l_ptr->next_out_no); 1968 u32 next_sent = mod(l_ptr->next_out_no);
1971 1969
1972 if (!link_is_up(l_ptr)) 1970 if (!tipc_link_is_up(l_ptr))
1973 return; 1971 return;
1974 if (l_ptr->next_out) 1972 if (l_ptr->next_out)
1975 next_sent = msg_seqno(buf_msg(l_ptr->next_out)); 1973 next_sent = msg_seqno(buf_msg(l_ptr->next_out));
@@ -2013,7 +2011,7 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2013 msg_set_max_pkt(msg, l_ptr->max_pkt_target); 2011 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
2014 } 2012 }
2015 2013
2016 if (node_has_redundant_links(l_ptr->owner)) { 2014 if (tipc_node_has_redundant_links(l_ptr->owner)) {
2017 msg_set_redundant_link(msg); 2015 msg_set_redundant_link(msg);
2018 } else { 2016 } else {
2019 msg_clear_redundant_link(msg); 2017 msg_clear_redundant_link(msg);
@@ -2026,7 +2024,7 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2026 2024
2027 /* Congestion? */ 2025 /* Congestion? */
2028 2026
2029 if (bearer_congested(l_ptr->b_ptr, l_ptr)) { 2027 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2030 if (!l_ptr->proto_msg_queue) { 2028 if (!l_ptr->proto_msg_queue) {
2031 l_ptr->proto_msg_queue = 2029 l_ptr->proto_msg_queue =
2032 buf_acquire(sizeof(l_ptr->proto_msg)); 2030 buf_acquire(sizeof(l_ptr->proto_msg));
@@ -2050,14 +2048,14 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2050 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg)); 2048 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2051 msg_set_size(buf_msg(buf), msg_size); 2049 msg_set_size(buf_msg(buf), msg_size);
2052 2050
2053 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 2051 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2054 l_ptr->unacked_window = 0; 2052 l_ptr->unacked_window = 0;
2055 buf_discard(buf); 2053 buf_discard(buf);
2056 return; 2054 return;
2057 } 2055 }
2058 2056
2059 /* New congestion */ 2057 /* New congestion */
2060 bearer_schedule(l_ptr->b_ptr, l_ptr); 2058 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2061 l_ptr->proto_msg_queue = buf; 2059 l_ptr->proto_msg_queue = buf;
2062 l_ptr->stats.bearer_congs++; 2060 l_ptr->stats.bearer_congs++;
2063} 2061}
@@ -2131,7 +2129,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2131 l_ptr->peer_bearer_id = msg_bearer_id(msg); 2129 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2132 2130
2133 /* Synchronize broadcast sequence numbers */ 2131 /* Synchronize broadcast sequence numbers */
2134 if (!node_has_redundant_links(l_ptr->owner)) { 2132 if (!tipc_node_has_redundant_links(l_ptr->owner)) {
2135 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg)); 2133 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2136 } 2134 }
2137 break; 2135 break;
@@ -2145,7 +2143,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2145 warn("Changing prio <%s>: %u->%u\n", 2143 warn("Changing prio <%s>: %u->%u\n",
2146 l_ptr->name, l_ptr->priority, msg_linkprio(msg)); 2144 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2147 l_ptr->priority = msg_linkprio(msg); 2145 l_ptr->priority = msg_linkprio(msg);
2148 link_reset(l_ptr); /* Enforce change to take effect */ 2146 tipc_link_reset(l_ptr); /* Enforce change to take effect */
2149 break; 2147 break;
2150 } 2148 }
2151 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 2149 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
@@ -2176,17 +2174,17 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2176 2174
2177 /* Protocol message before retransmits, reduce loss risk */ 2175 /* Protocol message before retransmits, reduce loss risk */
2178 2176
2179 bclink_check_gap(l_ptr->owner, msg_last_bcast(msg)); 2177 tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2180 2178
2181 if (rec_gap || (msg_probe(msg))) { 2179 if (rec_gap || (msg_probe(msg))) {
2182 link_send_proto_msg(l_ptr, STATE_MSG, 2180 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2183 0, rec_gap, 0, 0, max_pkt_ack); 2181 0, rec_gap, 0, 0, max_pkt_ack);
2184 } 2182 }
2185 if (msg_seq_gap(msg)) { 2183 if (msg_seq_gap(msg)) {
2186 msg_dbg(msg, "With Gap:"); 2184 msg_dbg(msg, "With Gap:");
2187 l_ptr->stats.recv_nacks++; 2185 l_ptr->stats.recv_nacks++;
2188 link_retransmit(l_ptr, l_ptr->first_out, 2186 tipc_link_retransmit(l_ptr, l_ptr->first_out,
2189 msg_seq_gap(msg)); 2187 msg_seq_gap(msg));
2190 } 2188 }
2191 break; 2189 break;
2192 default: 2190 default:
@@ -2198,20 +2196,20 @@ exit:
2198 2196
2199 2197
2200/* 2198/*
2201 * link_tunnel(): Send one message via a link belonging to 2199 * tipc_link_tunnel(): Send one message via a link belonging to
2202 * another bearer. Owner node is locked. 2200 * another bearer. Owner node is locked.
2203 */ 2201 */
2204void link_tunnel(struct link *l_ptr, 2202void tipc_link_tunnel(struct link *l_ptr,
2205 struct tipc_msg *tunnel_hdr, 2203 struct tipc_msg *tunnel_hdr,
2206 struct tipc_msg *msg, 2204 struct tipc_msg *msg,
2207 u32 selector) 2205 u32 selector)
2208{ 2206{
2209 struct link *tunnel; 2207 struct link *tunnel;
2210 struct sk_buff *buf; 2208 struct sk_buff *buf;
2211 u32 length = msg_size(msg); 2209 u32 length = msg_size(msg);
2212 2210
2213 tunnel = l_ptr->owner->active_links[selector & 1]; 2211 tunnel = l_ptr->owner->active_links[selector & 1];
2214 if (!link_is_up(tunnel)) 2212 if (!tipc_link_is_up(tunnel))
2215 return; 2213 return;
2216 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 2214 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2217 buf = buf_acquire(length + INT_H_SIZE); 2215 buf = buf_acquire(length + INT_H_SIZE);
@@ -2222,7 +2220,7 @@ void link_tunnel(struct link *l_ptr,
2222 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane); 2220 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
2223 msg_dbg(buf_msg(buf), ">SEND>"); 2221 msg_dbg(buf_msg(buf), ">SEND>");
2224 assert(tunnel); 2222 assert(tunnel);
2225 link_send_buf(tunnel, buf); 2223 tipc_link_send_buf(tunnel, buf);
2226} 2224}
2227 2225
2228 2226
@@ -2232,12 +2230,12 @@ void link_tunnel(struct link *l_ptr,
2232 * Owner node is locked. 2230 * Owner node is locked.
2233 */ 2231 */
2234 2232
2235void link_changeover(struct link *l_ptr) 2233void tipc_link_changeover(struct link *l_ptr)
2236{ 2234{
2237 u32 msgcount = l_ptr->out_queue_size; 2235 u32 msgcount = l_ptr->out_queue_size;
2238 struct sk_buff *crs = l_ptr->first_out; 2236 struct sk_buff *crs = l_ptr->first_out;
2239 struct link *tunnel = l_ptr->owner->active_links[0]; 2237 struct link *tunnel = l_ptr->owner->active_links[0];
2240 int split_bundles = node_has_redundant_links(l_ptr->owner); 2238 int split_bundles = tipc_node_has_redundant_links(l_ptr->owner);
2241 struct tipc_msg tunnel_hdr; 2239 struct tipc_msg tunnel_hdr;
2242 2240
2243 if (!tunnel) 2241 if (!tunnel)
@@ -2261,7 +2259,7 @@ void link_changeover(struct link *l_ptr)
2261 dbg("%c->%c:", l_ptr->b_ptr->net_plane, 2259 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2262 tunnel->b_ptr->net_plane); 2260 tunnel->b_ptr->net_plane);
2263 msg_dbg(&tunnel_hdr, "EMPTY>SEND>"); 2261 msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
2264 link_send_buf(tunnel, buf); 2262 tipc_link_send_buf(tunnel, buf);
2265 } else { 2263 } else {
2266 warn("Memory squeeze; link changeover failed\n"); 2264 warn("Memory squeeze; link changeover failed\n");
2267 } 2265 }
@@ -2277,20 +2275,20 @@ void link_changeover(struct link *l_ptr)
2277 2275
2278 while (msgcount--) { 2276 while (msgcount--) {
2279 msg_set_seqno(m,msg_seqno(msg)); 2277 msg_set_seqno(m,msg_seqno(msg));
2280 link_tunnel(l_ptr, &tunnel_hdr, m, 2278 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2281 msg_link_selector(m)); 2279 msg_link_selector(m));
2282 pos += align(msg_size(m)); 2280 pos += align(msg_size(m));
2283 m = (struct tipc_msg *)pos; 2281 m = (struct tipc_msg *)pos;
2284 } 2282 }
2285 } else { 2283 } else {
2286 link_tunnel(l_ptr, &tunnel_hdr, msg, 2284 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2287 msg_link_selector(msg)); 2285 msg_link_selector(msg));
2288 } 2286 }
2289 crs = crs->next; 2287 crs = crs->next;
2290 } 2288 }
2291} 2289}
2292 2290
2293void link_send_duplicate(struct link *l_ptr, struct link *tunnel) 2291void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2294{ 2292{
2295 struct sk_buff *iter; 2293 struct sk_buff *iter;
2296 struct tipc_msg tunnel_hdr; 2294 struct tipc_msg tunnel_hdr;
@@ -2320,8 +2318,8 @@ void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2320 dbg("%c->%c:", l_ptr->b_ptr->net_plane, 2318 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2321 tunnel->b_ptr->net_plane); 2319 tunnel->b_ptr->net_plane);
2322 msg_dbg(buf_msg(outbuf), ">SEND>"); 2320 msg_dbg(buf_msg(outbuf), ">SEND>");
2323 link_send_buf(tunnel, outbuf); 2321 tipc_link_send_buf(tunnel, outbuf);
2324 if (!link_is_up(l_ptr)) 2322 if (!tipc_link_is_up(l_ptr))
2325 return; 2323 return;
2326 iter = iter->next; 2324 iter = iter->next;
2327 } 2325 }
@@ -2393,9 +2391,9 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2393 2391
2394 /* First original message ?: */ 2392 /* First original message ?: */
2395 2393
2396 if (link_is_up(dest_link)) { 2394 if (tipc_link_is_up(dest_link)) {
2397 msg_dbg(tunnel_msg, "UP/FIRST/<REC<"); 2395 msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
2398 link_reset(dest_link); 2396 tipc_link_reset(dest_link);
2399 dest_link->exp_msg_count = msg_count; 2397 dest_link->exp_msg_count = msg_count;
2400 if (!msg_count) 2398 if (!msg_count)
2401 goto exit; 2399 goto exit;
@@ -2436,7 +2434,7 @@ exit:
2436/* 2434/*
2437 * Bundler functionality: 2435 * Bundler functionality:
2438 */ 2436 */
2439void link_recv_bundle(struct sk_buff *buf) 2437void tipc_link_recv_bundle(struct sk_buff *buf)
2440{ 2438{
2441 u32 msgcount = msg_msgcnt(buf_msg(buf)); 2439 u32 msgcount = msg_msgcnt(buf_msg(buf));
2442 u32 pos = INT_H_SIZE; 2440 u32 pos = INT_H_SIZE;
@@ -2456,7 +2454,7 @@ void link_recv_bundle(struct sk_buff *buf)
2456 }; 2454 };
2457 pos += align(msg_size(buf_msg(obuf))); 2455 pos += align(msg_size(buf_msg(obuf)));
2458 msg_dbg(buf_msg(obuf), " /"); 2456 msg_dbg(buf_msg(obuf), " /");
2459 net_route_msg(obuf); 2457 tipc_net_route_msg(obuf);
2460 } 2458 }
2461 buf_discard(buf); 2459 buf_discard(buf);
2462} 2460}
@@ -2467,11 +2465,11 @@ void link_recv_bundle(struct sk_buff *buf)
2467 2465
2468 2466
2469/* 2467/*
2470 * link_send_long_buf: Entry for buffers needing fragmentation. 2468 * tipc_link_send_long_buf: Entry for buffers needing fragmentation.
2471 * The buffer is complete, inclusive total message length. 2469 * The buffer is complete, inclusive total message length.
2472 * Returns user data length. 2470 * Returns user data length.
2473 */ 2471 */
2474int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) 2472int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2475{ 2473{
2476 struct tipc_msg *inmsg = buf_msg(buf); 2474 struct tipc_msg *inmsg = buf_msg(buf);
2477 struct tipc_msg fragm_hdr; 2475 struct tipc_msg fragm_hdr;
@@ -2521,8 +2519,8 @@ int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2521 /* Send queued messages first, if any: */ 2519 /* Send queued messages first, if any: */
2522 2520
2523 l_ptr->stats.sent_fragments++; 2521 l_ptr->stats.sent_fragments++;
2524 link_send_buf(l_ptr, fragm); 2522 tipc_link_send_buf(l_ptr, fragm);
2525 if (!link_is_up(l_ptr)) 2523 if (!tipc_link_is_up(l_ptr))
2526 return dsz; 2524 return dsz;
2527 msg_set_fragm_no(&fragm_hdr, ++fragm_no); 2525 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
2528 rest -= fragm_sz; 2526 rest -= fragm_sz;
@@ -2582,11 +2580,11 @@ static inline void incr_timer_cnt(struct sk_buff *buf)
2582} 2580}
2583 2581
2584/* 2582/*
2585 * link_recv_fragment(): Called with node lock on. Returns 2583 * tipc_link_recv_fragment(): Called with node lock on. Returns
2586 * the reassembled buffer if message is complete. 2584 * the reassembled buffer if message is complete.
2587 */ 2585 */
2588int link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, 2586int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2589 struct tipc_msg **m) 2587 struct tipc_msg **m)
2590{ 2588{
2591 struct sk_buff *prev = 0; 2589 struct sk_buff *prev = 0;
2592 struct sk_buff *fbuf = *fb; 2590 struct sk_buff *fbuf = *fb;
@@ -2714,7 +2712,7 @@ static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2714} 2712}
2715 2713
2716 2714
2717void link_set_queue_limits(struct link *l_ptr, u32 window) 2715void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2718{ 2716{
2719 /* Data messages from this node, inclusive FIRST_FRAGM */ 2717 /* Data messages from this node, inclusive FIRST_FRAGM */
2720 l_ptr->queue_limit[DATA_LOW] = window; 2718 l_ptr->queue_limit[DATA_LOW] = window;
@@ -2739,7 +2737,7 @@ void link_set_queue_limits(struct link *l_ptr, u32 window)
2739 * @name - ptr to link name string 2737 * @name - ptr to link name string
2740 * @node - ptr to area to be filled with ptr to associated node 2738 * @node - ptr to area to be filled with ptr to associated node
2741 * 2739 *
2742 * Caller must hold 'net_lock' to ensure node and bearer are not deleted; 2740 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2743 * this also prevents link deletion. 2741 * this also prevents link deletion.
2744 * 2742 *
2745 * Returns pointer to link (or 0 if invalid link name). 2743 * Returns pointer to link (or 0 if invalid link name).
@@ -2754,11 +2752,11 @@ static struct link *link_find_link(const char *name, struct node **node)
2754 if (!link_name_validate(name, &link_name_parts)) 2752 if (!link_name_validate(name, &link_name_parts))
2755 return 0; 2753 return 0;
2756 2754
2757 b_ptr = bearer_find_interface(link_name_parts.if_local); 2755 b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
2758 if (!b_ptr) 2756 if (!b_ptr)
2759 return 0; 2757 return 0;
2760 2758
2761 *node = node_find(link_name_parts.addr_peer); 2759 *node = tipc_node_find(link_name_parts.addr_peer);
2762 if (!*node) 2760 if (!*node)
2763 return 0; 2761 return 0;
2764 2762
@@ -2769,8 +2767,8 @@ static struct link *link_find_link(const char *name, struct node **node)
2769 return l_ptr; 2767 return l_ptr;
2770} 2768}
2771 2769
2772struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2770struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2773 u16 cmd) 2771 u16 cmd)
2774{ 2772{
2775 struct tipc_link_config *args; 2773 struct tipc_link_config *args;
2776 u32 new_value; 2774 u32 new_value;
@@ -2779,61 +2777,62 @@ struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2779 int res; 2777 int res;
2780 2778
2781 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2779 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2782 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2780 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2783 2781
2784 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); 2782 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2785 new_value = ntohl(args->value); 2783 new_value = ntohl(args->value);
2786 2784
2787 if (!strcmp(args->name, bc_link_name)) { 2785 if (!strcmp(args->name, tipc_bclink_name)) {
2788 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && 2786 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2789 (bclink_set_queue_limits(new_value) == 0)) 2787 (tipc_bclink_set_queue_limits(new_value) == 0))
2790 return cfg_reply_none(); 2788 return tipc_cfg_reply_none();
2791 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 2789 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2792 " (cannot change setting on broadcast link)"); 2790 " (cannot change setting on broadcast link)");
2793 } 2791 }
2794 2792
2795 read_lock_bh(&net_lock); 2793 read_lock_bh(&tipc_net_lock);
2796 l_ptr = link_find_link(args->name, &node); 2794 l_ptr = link_find_link(args->name, &node);
2797 if (!l_ptr) { 2795 if (!l_ptr) {
2798 read_unlock_bh(&net_lock); 2796 read_unlock_bh(&tipc_net_lock);
2799 return cfg_reply_error_string("link not found"); 2797 return tipc_cfg_reply_error_string("link not found");
2800 } 2798 }
2801 2799
2802 node_lock(node); 2800 tipc_node_lock(node);
2803 res = -EINVAL; 2801 res = -EINVAL;
2804 switch (cmd) { 2802 switch (cmd) {
2805 case TIPC_CMD_SET_LINK_TOL: 2803 case TIPC_CMD_SET_LINK_TOL:
2806 if ((new_value >= TIPC_MIN_LINK_TOL) && 2804 if ((new_value >= TIPC_MIN_LINK_TOL) &&
2807 (new_value <= TIPC_MAX_LINK_TOL)) { 2805 (new_value <= TIPC_MAX_LINK_TOL)) {
2808 link_set_supervision_props(l_ptr, new_value); 2806 link_set_supervision_props(l_ptr, new_value);
2809 link_send_proto_msg(l_ptr, STATE_MSG, 2807 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2810 0, 0, new_value, 0, 0); 2808 0, 0, new_value, 0, 0);
2811 res = TIPC_OK; 2809 res = TIPC_OK;
2812 } 2810 }
2813 break; 2811 break;
2814 case TIPC_CMD_SET_LINK_PRI: 2812 case TIPC_CMD_SET_LINK_PRI:
2815 if (new_value < TIPC_NUM_LINK_PRI) { 2813 if ((new_value >= TIPC_MIN_LINK_PRI) &&
2814 (new_value <= TIPC_MAX_LINK_PRI)) {
2816 l_ptr->priority = new_value; 2815 l_ptr->priority = new_value;
2817 link_send_proto_msg(l_ptr, STATE_MSG, 2816 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2818 0, 0, 0, new_value, 0); 2817 0, 0, 0, new_value, 0);
2819 res = TIPC_OK; 2818 res = TIPC_OK;
2820 } 2819 }
2821 break; 2820 break;
2822 case TIPC_CMD_SET_LINK_WINDOW: 2821 case TIPC_CMD_SET_LINK_WINDOW:
2823 if ((new_value >= TIPC_MIN_LINK_WIN) && 2822 if ((new_value >= TIPC_MIN_LINK_WIN) &&
2824 (new_value <= TIPC_MAX_LINK_WIN)) { 2823 (new_value <= TIPC_MAX_LINK_WIN)) {
2825 link_set_queue_limits(l_ptr, new_value); 2824 tipc_link_set_queue_limits(l_ptr, new_value);
2826 res = TIPC_OK; 2825 res = TIPC_OK;
2827 } 2826 }
2828 break; 2827 break;
2829 } 2828 }
2830 node_unlock(node); 2829 tipc_node_unlock(node);
2831 2830
2832 read_unlock_bh(&net_lock); 2831 read_unlock_bh(&tipc_net_lock);
2833 if (res) 2832 if (res)
2834 return cfg_reply_error_string("cannot change link setting"); 2833 return tipc_cfg_reply_error_string("cannot change link setting");
2835 2834
2836 return cfg_reply_none(); 2835 return tipc_cfg_reply_none();
2837} 2836}
2838 2837
2839/** 2838/**
@@ -2848,34 +2847,34 @@ static void link_reset_statistics(struct link *l_ptr)
2848 l_ptr->stats.recv_info = l_ptr->next_in_no; 2847 l_ptr->stats.recv_info = l_ptr->next_in_no;
2849} 2848}
2850 2849
2851struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2850struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2852{ 2851{
2853 char *link_name; 2852 char *link_name;
2854 struct link *l_ptr; 2853 struct link *l_ptr;
2855 struct node *node; 2854 struct node *node;
2856 2855
2857 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2856 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2858 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2857 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2859 2858
2860 link_name = (char *)TLV_DATA(req_tlv_area); 2859 link_name = (char *)TLV_DATA(req_tlv_area);
2861 if (!strcmp(link_name, bc_link_name)) { 2860 if (!strcmp(link_name, tipc_bclink_name)) {
2862 if (bclink_reset_stats()) 2861 if (tipc_bclink_reset_stats())
2863 return cfg_reply_error_string("link not found"); 2862 return tipc_cfg_reply_error_string("link not found");
2864 return cfg_reply_none(); 2863 return tipc_cfg_reply_none();
2865 } 2864 }
2866 2865
2867 read_lock_bh(&net_lock); 2866 read_lock_bh(&tipc_net_lock);
2868 l_ptr = link_find_link(link_name, &node); 2867 l_ptr = link_find_link(link_name, &node);
2869 if (!l_ptr) { 2868 if (!l_ptr) {
2870 read_unlock_bh(&net_lock); 2869 read_unlock_bh(&tipc_net_lock);
2871 return cfg_reply_error_string("link not found"); 2870 return tipc_cfg_reply_error_string("link not found");
2872 } 2871 }
2873 2872
2874 node_lock(node); 2873 tipc_node_lock(node);
2875 link_reset_statistics(l_ptr); 2874 link_reset_statistics(l_ptr);
2876 node_unlock(node); 2875 tipc_node_unlock(node);
2877 read_unlock_bh(&net_lock); 2876 read_unlock_bh(&tipc_net_lock);
2878 return cfg_reply_none(); 2877 return tipc_cfg_reply_none();
2879} 2878}
2880 2879
2881/** 2880/**
@@ -2888,7 +2887,7 @@ static u32 percent(u32 count, u32 total)
2888} 2887}
2889 2888
2890/** 2889/**
2891 * link_stats - print link statistics 2890 * tipc_link_stats - print link statistics
2892 * @name: link name 2891 * @name: link name
2893 * @buf: print buffer area 2892 * @buf: print buffer area
2894 * @buf_size: size of print buffer area 2893 * @buf_size: size of print buffer area
@@ -2896,7 +2895,7 @@ static u32 percent(u32 count, u32 total)
2896 * Returns length of print buffer data string (or 0 if error) 2895 * Returns length of print buffer data string (or 0 if error)
2897 */ 2896 */
2898 2897
2899static int link_stats(const char *name, char *buf, const u32 buf_size) 2898static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2900{ 2899{
2901 struct print_buf pb; 2900 struct print_buf pb;
2902 struct link *l_ptr; 2901 struct link *l_ptr;
@@ -2904,22 +2903,22 @@ static int link_stats(const char *name, char *buf, const u32 buf_size)
2904 char *status; 2903 char *status;
2905 u32 profile_total = 0; 2904 u32 profile_total = 0;
2906 2905
2907 if (!strcmp(name, bc_link_name)) 2906 if (!strcmp(name, tipc_bclink_name))
2908 return bclink_stats(buf, buf_size); 2907 return tipc_bclink_stats(buf, buf_size);
2909 2908
2910 printbuf_init(&pb, buf, buf_size); 2909 tipc_printbuf_init(&pb, buf, buf_size);
2911 2910
2912 read_lock_bh(&net_lock); 2911 read_lock_bh(&tipc_net_lock);
2913 l_ptr = link_find_link(name, &node); 2912 l_ptr = link_find_link(name, &node);
2914 if (!l_ptr) { 2913 if (!l_ptr) {
2915 read_unlock_bh(&net_lock); 2914 read_unlock_bh(&tipc_net_lock);
2916 return 0; 2915 return 0;
2917 } 2916 }
2918 node_lock(node); 2917 tipc_node_lock(node);
2919 2918
2920 if (link_is_active(l_ptr)) 2919 if (tipc_link_is_active(l_ptr))
2921 status = "ACTIVE"; 2920 status = "ACTIVE";
2922 else if (link_is_up(l_ptr)) 2921 else if (tipc_link_is_up(l_ptr))
2923 status = "STANDBY"; 2922 status = "STANDBY";
2924 else 2923 else
2925 status = "DEFUNCT"; 2924 status = "DEFUNCT";
@@ -2975,33 +2974,33 @@ static int link_stats(const char *name, char *buf, const u32 buf_size)
2975 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts) 2974 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2976 : 0); 2975 : 0);
2977 2976
2978 node_unlock(node); 2977 tipc_node_unlock(node);
2979 read_unlock_bh(&net_lock); 2978 read_unlock_bh(&tipc_net_lock);
2980 return printbuf_validate(&pb); 2979 return tipc_printbuf_validate(&pb);
2981} 2980}
2982 2981
2983#define MAX_LINK_STATS_INFO 2000 2982#define MAX_LINK_STATS_INFO 2000
2984 2983
2985struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2984struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2986{ 2985{
2987 struct sk_buff *buf; 2986 struct sk_buff *buf;
2988 struct tlv_desc *rep_tlv; 2987 struct tlv_desc *rep_tlv;
2989 int str_len; 2988 int str_len;
2990 2989
2991 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2990 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2992 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2991 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2993 2992
2994 buf = cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO)); 2993 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2995 if (!buf) 2994 if (!buf)
2996 return NULL; 2995 return NULL;
2997 2996
2998 rep_tlv = (struct tlv_desc *)buf->data; 2997 rep_tlv = (struct tlv_desc *)buf->data;
2999 2998
3000 str_len = link_stats((char *)TLV_DATA(req_tlv_area), 2999 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
3001 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO); 3000 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3002 if (!str_len) { 3001 if (!str_len) {
3003 buf_discard(buf); 3002 buf_discard(buf);
3004 return cfg_reply_error_string("link not found"); 3003 return tipc_cfg_reply_error_string("link not found");
3005 } 3004 }
3006 3005
3007 skb_put(buf, TLV_SPACE(str_len)); 3006 skb_put(buf, TLV_SPACE(str_len));
@@ -3020,20 +3019,20 @@ int link_control(const char *name, u32 op, u32 val)
3020 u32 a; 3019 u32 a;
3021 3020
3022 a = link_name2addr(name, &bearer_id); 3021 a = link_name2addr(name, &bearer_id);
3023 read_lock_bh(&net_lock); 3022 read_lock_bh(&tipc_net_lock);
3024 node = node_find(a); 3023 node = tipc_node_find(a);
3025 if (node) { 3024 if (node) {
3026 node_lock(node); 3025 tipc_node_lock(node);
3027 l_ptr = node->links[bearer_id]; 3026 l_ptr = node->links[bearer_id];
3028 if (l_ptr) { 3027 if (l_ptr) {
3029 if (op == TIPC_REMOVE_LINK) { 3028 if (op == TIPC_REMOVE_LINK) {
3030 struct bearer *b_ptr = l_ptr->b_ptr; 3029 struct bearer *b_ptr = l_ptr->b_ptr;
3031 spin_lock_bh(&b_ptr->publ.lock); 3030 spin_lock_bh(&b_ptr->publ.lock);
3032 link_delete(l_ptr); 3031 tipc_link_delete(l_ptr);
3033 spin_unlock_bh(&b_ptr->publ.lock); 3032 spin_unlock_bh(&b_ptr->publ.lock);
3034 } 3033 }
3035 if (op == TIPC_CMD_BLOCK_LINK) { 3034 if (op == TIPC_CMD_BLOCK_LINK) {
3036 link_reset(l_ptr); 3035 tipc_link_reset(l_ptr);
3037 l_ptr->blocked = 1; 3036 l_ptr->blocked = 1;
3038 } 3037 }
3039 if (op == TIPC_CMD_UNBLOCK_LINK) { 3038 if (op == TIPC_CMD_UNBLOCK_LINK) {
@@ -3041,22 +3040,22 @@ int link_control(const char *name, u32 op, u32 val)
3041 } 3040 }
3042 res = TIPC_OK; 3041 res = TIPC_OK;
3043 } 3042 }
3044 node_unlock(node); 3043 tipc_node_unlock(node);
3045 } 3044 }
3046 read_unlock_bh(&net_lock); 3045 read_unlock_bh(&tipc_net_lock);
3047 return res; 3046 return res;
3048} 3047}
3049#endif 3048#endif
3050 3049
3051/** 3050/**
3052 * link_get_max_pkt - get maximum packet size to use when sending to destination 3051 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
3053 * @dest: network address of destination node 3052 * @dest: network address of destination node
3054 * @selector: used to select from set of active links 3053 * @selector: used to select from set of active links
3055 * 3054 *
3056 * If no active link can be found, uses default maximum packet size. 3055 * If no active link can be found, uses default maximum packet size.
3057 */ 3056 */
3058 3057
3059u32 link_get_max_pkt(u32 dest, u32 selector) 3058u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3060{ 3059{
3061 struct node *n_ptr; 3060 struct node *n_ptr;
3062 struct link *l_ptr; 3061 struct link *l_ptr;
@@ -3065,16 +3064,16 @@ u32 link_get_max_pkt(u32 dest, u32 selector)
3065 if (dest == tipc_own_addr) 3064 if (dest == tipc_own_addr)
3066 return MAX_MSG_SIZE; 3065 return MAX_MSG_SIZE;
3067 3066
3068 read_lock_bh(&net_lock); 3067 read_lock_bh(&tipc_net_lock);
3069 n_ptr = node_select(dest, selector); 3068 n_ptr = tipc_node_select(dest, selector);
3070 if (n_ptr) { 3069 if (n_ptr) {
3071 node_lock(n_ptr); 3070 tipc_node_lock(n_ptr);
3072 l_ptr = n_ptr->active_links[selector & 1]; 3071 l_ptr = n_ptr->active_links[selector & 1];
3073 if (l_ptr) 3072 if (l_ptr)
3074 res = link_max_pkt(l_ptr); 3073 res = link_max_pkt(l_ptr);
3075 node_unlock(n_ptr); 3074 tipc_node_unlock(n_ptr);
3076 } 3075 }
3077 read_unlock_bh(&net_lock); 3076 read_unlock_bh(&tipc_net_lock);
3078 return res; 3077 return res;
3079} 3078}
3080 3079
diff --git a/net/tipc/link.h b/net/tipc/link.h
index c2553f07375..2d3c157f707 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -221,44 +221,43 @@ struct link {
221 221
222struct port; 222struct port;
223 223
224struct link *link_create(struct bearer *b_ptr, const u32 peer, 224struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
225 const struct tipc_media_addr *media_addr); 225 const struct tipc_media_addr *media_addr);
226void link_delete(struct link *l_ptr); 226void tipc_link_delete(struct link *l_ptr);
227void link_changeover(struct link *l_ptr); 227void tipc_link_changeover(struct link *l_ptr);
228void link_send_duplicate(struct link *l_ptr, struct link *dest); 228void tipc_link_send_duplicate(struct link *l_ptr, struct link *dest);
229void link_reset_fragments(struct link *l_ptr); 229void tipc_link_reset_fragments(struct link *l_ptr);
230int link_is_up(struct link *l_ptr); 230int tipc_link_is_up(struct link *l_ptr);
231int link_is_active(struct link *l_ptr); 231int tipc_link_is_active(struct link *l_ptr);
232void link_start(struct link *l_ptr); 232void tipc_link_start(struct link *l_ptr);
233u32 link_push_packet(struct link *l_ptr); 233u32 tipc_link_push_packet(struct link *l_ptr);
234void link_stop(struct link *l_ptr); 234void tipc_link_stop(struct link *l_ptr);
235struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd); 235struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
236struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space); 236struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
237struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space); 237struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
238void link_reset(struct link *l_ptr); 238void tipc_link_reset(struct link *l_ptr);
239int link_send(struct sk_buff *buf, u32 dest, u32 selector); 239int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
240int link_send_buf(struct link *l_ptr, struct sk_buff *buf); 240int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
241u32 link_get_max_pkt(u32 dest,u32 selector); 241u32 tipc_link_get_max_pkt(u32 dest,u32 selector);
242int link_send_sections_fast(struct port* sender, 242int tipc_link_send_sections_fast(struct port* sender,
243 struct iovec const *msg_sect, 243 struct iovec const *msg_sect,
244 const u32 num_sect, 244 const u32 num_sect,
245 u32 destnode); 245 u32 destnode);
246 246int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
247int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf); 247void tipc_link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
248void link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr, 248 struct tipc_msg *msg, u32 selector);
249 struct tipc_msg *msg, u32 selector); 249void tipc_link_recv_bundle(struct sk_buff *buf);
250void link_recv_bundle(struct sk_buff *buf); 250int tipc_link_recv_fragment(struct sk_buff **pending,
251int link_recv_fragment(struct sk_buff **pending, 251 struct sk_buff **fb,
252 struct sk_buff **fb, 252 struct tipc_msg **msg);
253 struct tipc_msg **msg); 253void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
254void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap, 254 u32 tolerance, u32 priority, u32 acked_mtu);
255 u32 tolerance, u32 priority, u32 acked_mtu); 255void tipc_link_push_queue(struct link *l_ptr);
256void link_push_queue(struct link *l_ptr); 256u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
257u32 link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
258 struct sk_buff *buf); 257 struct sk_buff *buf);
259void link_wakeup_ports(struct link *l_ptr, int all); 258void tipc_link_wakeup_ports(struct link *l_ptr, int all);
260void link_set_queue_limits(struct link *l_ptr, u32 window); 259void tipc_link_set_queue_limits(struct link *l_ptr, u32 window);
261void link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits); 260void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
262 261
263/* 262/*
264 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) 263 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 03dbc55cb04..3bd345a344e 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -41,18 +41,7 @@
41#include "bearer.h" 41#include "bearer.h"
42 42
43 43
44void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a) 44void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
45{
46 memcpy(&((int *)m)[5], a, sizeof(*a));
47}
48
49void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
50{
51 memcpy(a, &((int*)m)[5], sizeof(*a));
52}
53
54
55void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
56{ 45{
57 u32 usr = msg_user(msg); 46 u32 usr = msg_user(msg);
58 tipc_printf(buf, str); 47 tipc_printf(buf, str);
@@ -318,7 +307,7 @@ void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
318 tipc_printf(buf, ":REQL(%u):", msg_req_links(msg)); 307 tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
319 tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg)); 308 tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
320 tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg)); 309 tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
321 media_addr_printf(buf, orig); 310 tipc_media_addr_printf(buf, orig);
322 } 311 }
323 if (msg_user(msg) == BCAST_PROTOCOL) { 312 if (msg_user(msg) == BCAST_PROTOCOL) {
324 tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg)); 313 tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
@@ -326,9 +315,9 @@ void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
326 } 315 }
327 tipc_printf(buf, "\n"); 316 tipc_printf(buf, "\n");
328 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) { 317 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
329 msg_print(buf,msg_get_wrapped(msg)," /"); 318 tipc_msg_print(buf,msg_get_wrapped(msg)," /");
330 } 319 }
331 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) { 320 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
332 msg_print(buf,msg_get_wrapped(msg)," /"); 321 tipc_msg_print(buf,msg_get_wrapped(msg)," /");
333 } 322 }
334} 323}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 662c81862a0..6699aaf7bd4 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -37,7 +37,7 @@
37#ifndef _TIPC_MSG_H 37#ifndef _TIPC_MSG_H
38#define _TIPC_MSG_H 38#define _TIPC_MSG_H
39 39
40#include <net/tipc/tipc_msg.h> 40#include "core.h"
41 41
42#define TIPC_VERSION 2 42#define TIPC_VERSION 2
43#define DATA_LOW TIPC_LOW_IMPORTANCE 43#define DATA_LOW TIPC_LOW_IMPORTANCE
@@ -805,14 +805,14 @@ static inline int msg_build(struct tipc_msg *hdr,
805 return -EFAULT; 805 return -EFAULT;
806} 806}
807 807
808static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
809{
810 memcpy(&((int *)m)[5], a, sizeof(*a));
811}
808 812
809struct tipc_media_addr; 813static inline void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
810 814{
811extern void msg_set_media_addr(struct tipc_msg *m, 815 memcpy(a, &((int*)m)[5], sizeof(*a));
812 struct tipc_media_addr *a); 816}
813
814extern void msg_get_media_addr(struct tipc_msg *m,
815 struct tipc_media_addr *a);
816
817 817
818#endif 818#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 41cbaf1a4a7..830f9099904 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -114,10 +114,10 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
114} 114}
115 115
116/** 116/**
117 * named_publish - tell other nodes about a new publication by this node 117 * tipc_named_publish - tell other nodes about a new publication by this node
118 */ 118 */
119 119
120void named_publish(struct publication *publ) 120void tipc_named_publish(struct publication *publ)
121{ 121{
122 struct sk_buff *buf; 122 struct sk_buff *buf;
123 struct distr_item *item; 123 struct distr_item *item;
@@ -133,15 +133,15 @@ void named_publish(struct publication *publ)
133 133
134 item = (struct distr_item *)msg_data(buf_msg(buf)); 134 item = (struct distr_item *)msg_data(buf_msg(buf));
135 publ_to_item(item, publ); 135 publ_to_item(item, publ);
136 dbg("named_withdraw: broadcasting publish msg\n"); 136 dbg("tipc_named_withdraw: broadcasting publish msg\n");
137 cluster_broadcast(buf); 137 tipc_cltr_broadcast(buf);
138} 138}
139 139
140/** 140/**
141 * named_withdraw - tell other nodes about a withdrawn publication by this node 141 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
142 */ 142 */
143 143
144void named_withdraw(struct publication *publ) 144void tipc_named_withdraw(struct publication *publ)
145{ 145{
146 struct sk_buff *buf; 146 struct sk_buff *buf;
147 struct distr_item *item; 147 struct distr_item *item;
@@ -157,15 +157,15 @@ void named_withdraw(struct publication *publ)
157 157
158 item = (struct distr_item *)msg_data(buf_msg(buf)); 158 item = (struct distr_item *)msg_data(buf_msg(buf));
159 publ_to_item(item, publ); 159 publ_to_item(item, publ);
160 dbg("named_withdraw: broadcasting withdraw msg\n"); 160 dbg("tipc_named_withdraw: broadcasting withdraw msg\n");
161 cluster_broadcast(buf); 161 tipc_cltr_broadcast(buf);
162} 162}
163 163
164/** 164/**
165 * named_node_up - tell specified node about all publications by this node 165 * tipc_named_node_up - tell specified node about all publications by this node
166 */ 166 */
167 167
168void named_node_up(unsigned long node) 168void tipc_named_node_up(unsigned long node)
169{ 169{
170 struct publication *publ; 170 struct publication *publ;
171 struct distr_item *item = 0; 171 struct distr_item *item = 0;
@@ -175,7 +175,7 @@ void named_node_up(unsigned long node)
175 u32 max_item_buf; 175 u32 max_item_buf;
176 176
177 assert(in_own_cluster(node)); 177 assert(in_own_cluster(node));
178 read_lock_bh(&nametbl_lock); 178 read_lock_bh(&tipc_nametbl_lock);
179 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE; 179 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
180 max_item_buf *= ITEM_SIZE; 180 max_item_buf *= ITEM_SIZE;
181 rest = publ_cnt * ITEM_SIZE; 181 rest = publ_cnt * ITEM_SIZE;
@@ -196,15 +196,15 @@ void named_node_up(unsigned long node)
196 left -= ITEM_SIZE; 196 left -= ITEM_SIZE;
197 if (!left) { 197 if (!left) {
198 msg_set_link_selector(buf_msg(buf), node); 198 msg_set_link_selector(buf_msg(buf), node);
199 dbg("named_node_up: sending publish msg to " 199 dbg("tipc_named_node_up: sending publish msg to "
200 "<%u.%u.%u>\n", tipc_zone(node), 200 "<%u.%u.%u>\n", tipc_zone(node),
201 tipc_cluster(node), tipc_node(node)); 201 tipc_cluster(node), tipc_node(node));
202 link_send(buf, node, node); 202 tipc_link_send(buf, node, node);
203 buf = 0; 203 buf = 0;
204 } 204 }
205 } 205 }
206exit: 206exit:
207 read_unlock_bh(&nametbl_lock); 207 read_unlock_bh(&tipc_nametbl_lock);
208} 208}
209 209
210/** 210/**
@@ -221,73 +221,73 @@ exit:
221static void node_is_down(struct publication *publ) 221static void node_is_down(struct publication *publ)
222{ 222{
223 struct publication *p; 223 struct publication *p;
224 write_lock_bh(&nametbl_lock); 224 write_lock_bh(&tipc_nametbl_lock);
225 dbg("node_is_down: withdrawing %u, %u, %u\n", 225 dbg("node_is_down: withdrawing %u, %u, %u\n",
226 publ->type, publ->lower, publ->upper); 226 publ->type, publ->lower, publ->upper);
227 publ->key += 1222345; 227 publ->key += 1222345;
228 p = nametbl_remove_publ(publ->type, publ->lower, 228 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
229 publ->node, publ->ref, publ->key); 229 publ->node, publ->ref, publ->key);
230 assert(p == publ); 230 assert(p == publ);
231 write_unlock_bh(&nametbl_lock); 231 write_unlock_bh(&tipc_nametbl_lock);
232 if (publ) 232 if (publ)
233 kfree(publ); 233 kfree(publ);
234} 234}
235 235
236/** 236/**
237 * named_recv - process name table update message sent by another node 237 * tipc_named_recv - process name table update message sent by another node
238 */ 238 */
239 239
240void named_recv(struct sk_buff *buf) 240void tipc_named_recv(struct sk_buff *buf)
241{ 241{
242 struct publication *publ; 242 struct publication *publ;
243 struct tipc_msg *msg = buf_msg(buf); 243 struct tipc_msg *msg = buf_msg(buf);
244 struct distr_item *item = (struct distr_item *)msg_data(msg); 244 struct distr_item *item = (struct distr_item *)msg_data(msg);
245 u32 count = msg_data_sz(msg) / ITEM_SIZE; 245 u32 count = msg_data_sz(msg) / ITEM_SIZE;
246 246
247 write_lock_bh(&nametbl_lock); 247 write_lock_bh(&tipc_nametbl_lock);
248 while (count--) { 248 while (count--) {
249 if (msg_type(msg) == PUBLICATION) { 249 if (msg_type(msg) == PUBLICATION) {
250 dbg("named_recv: got publication for %u, %u, %u\n", 250 dbg("tipc_named_recv: got publication for %u, %u, %u\n",
251 ntohl(item->type), ntohl(item->lower), 251 ntohl(item->type), ntohl(item->lower),
252 ntohl(item->upper)); 252 ntohl(item->upper));
253 publ = nametbl_insert_publ(ntohl(item->type), 253 publ = tipc_nametbl_insert_publ(ntohl(item->type),
254 ntohl(item->lower), 254 ntohl(item->lower),
255 ntohl(item->upper), 255 ntohl(item->upper),
256 TIPC_CLUSTER_SCOPE, 256 TIPC_CLUSTER_SCOPE,
257 msg_orignode(msg), 257 msg_orignode(msg),
258 ntohl(item->ref), 258 ntohl(item->ref),
259 ntohl(item->key)); 259 ntohl(item->key));
260 if (publ) { 260 if (publ) {
261 nodesub_subscribe(&publ->subscr, 261 tipc_nodesub_subscribe(&publ->subscr,
262 msg_orignode(msg), 262 msg_orignode(msg),
263 publ, 263 publ,
264 (net_ev_handler)node_is_down); 264 (net_ev_handler)node_is_down);
265 } 265 }
266 } else if (msg_type(msg) == WITHDRAWAL) { 266 } else if (msg_type(msg) == WITHDRAWAL) {
267 dbg("named_recv: got withdrawl for %u, %u, %u\n", 267 dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n",
268 ntohl(item->type), ntohl(item->lower), 268 ntohl(item->type), ntohl(item->lower),
269 ntohl(item->upper)); 269 ntohl(item->upper));
270 publ = nametbl_remove_publ(ntohl(item->type), 270 publ = tipc_nametbl_remove_publ(ntohl(item->type),
271 ntohl(item->lower), 271 ntohl(item->lower),
272 msg_orignode(msg), 272 msg_orignode(msg),
273 ntohl(item->ref), 273 ntohl(item->ref),
274 ntohl(item->key)); 274 ntohl(item->key));
275 275
276 if (publ) { 276 if (publ) {
277 nodesub_unsubscribe(&publ->subscr); 277 tipc_nodesub_unsubscribe(&publ->subscr);
278 kfree(publ); 278 kfree(publ);
279 } 279 }
280 } else { 280 } else {
281 warn("named_recv: unknown msg\n"); 281 warn("tipc_named_recv: unknown msg\n");
282 } 282 }
283 item++; 283 item++;
284 } 284 }
285 write_unlock_bh(&nametbl_lock); 285 write_unlock_bh(&tipc_nametbl_lock);
286 buf_discard(buf); 286 buf_discard(buf);
287} 287}
288 288
289/** 289/**
290 * named_reinit - re-initialize local publication list 290 * tipc_named_reinit - re-initialize local publication list
291 * 291 *
292 * This routine is called whenever TIPC networking is (re)enabled. 292 * This routine is called whenever TIPC networking is (re)enabled.
293 * All existing publications by this node that have "cluster" or "zone" scope 293 * All existing publications by this node that have "cluster" or "zone" scope
@@ -295,15 +295,15 @@ void named_recv(struct sk_buff *buf)
295 * (If the node's address is unchanged, the update loop terminates immediately.) 295 * (If the node's address is unchanged, the update loop terminates immediately.)
296 */ 296 */
297 297
298void named_reinit(void) 298void tipc_named_reinit(void)
299{ 299{
300 struct publication *publ; 300 struct publication *publ;
301 301
302 write_lock_bh(&nametbl_lock); 302 write_lock_bh(&tipc_nametbl_lock);
303 list_for_each_entry(publ, &publ_root, local_list) { 303 list_for_each_entry(publ, &publ_root, local_list) {
304 if (publ->node == tipc_own_addr) 304 if (publ->node == tipc_own_addr)
305 break; 305 break;
306 publ->node = tipc_own_addr; 306 publ->node = tipc_own_addr;
307 } 307 }
308 write_unlock_bh(&nametbl_lock); 308 write_unlock_bh(&tipc_nametbl_lock);
309} 309}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index a04bdeac84e..843da0172f4 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -39,10 +39,10 @@
39 39
40#include "name_table.h" 40#include "name_table.h"
41 41
42void named_publish(struct publication *publ); 42void tipc_named_publish(struct publication *publ);
43void named_withdraw(struct publication *publ); 43void tipc_named_withdraw(struct publication *publ);
44void named_node_up(unsigned long node); 44void tipc_named_node_up(unsigned long node);
45void named_recv(struct sk_buff *buf); 45void tipc_named_recv(struct sk_buff *buf);
46void named_reinit(void); 46void tipc_named_reinit(void);
47 47
48#endif 48#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 972c83eb83b..3f4b23bd08f 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -99,9 +99,9 @@ struct name_table {
99 u32 local_publ_count; 99 u32 local_publ_count;
100}; 100};
101 101
102struct name_table table = { NULL } ; 102static struct name_table table = { NULL } ;
103static atomic_t rsv_publ_ok = ATOMIC_INIT(0); 103static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
104rwlock_t nametbl_lock = RW_LOCK_UNLOCKED; 104rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED;
105 105
106 106
107static inline int hash(int x) 107static inline int hash(int x)
@@ -139,10 +139,10 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
139} 139}
140 140
141/** 141/**
142 * subseq_alloc - allocate a specified number of sub-sequence structures 142 * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
143 */ 143 */
144 144
145struct sub_seq *subseq_alloc(u32 cnt) 145struct sub_seq *tipc_subseq_alloc(u32 cnt)
146{ 146{
147 u32 sz = cnt * sizeof(struct sub_seq); 147 u32 sz = cnt * sizeof(struct sub_seq);
148 struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC); 148 struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
@@ -153,16 +153,16 @@ struct sub_seq *subseq_alloc(u32 cnt)
153} 153}
154 154
155/** 155/**
156 * nameseq_create - create a name sequence structure for the specified 'type' 156 * tipc_nameseq_create - create a name sequence structure for the specified 'type'
157 * 157 *
158 * Allocates a single sub-sequence structure and sets it to all 0's. 158 * Allocates a single sub-sequence structure and sets it to all 0's.
159 */ 159 */
160 160
161struct name_seq *nameseq_create(u32 type, struct hlist_head *seq_head) 161struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
162{ 162{
163 struct name_seq *nseq = 163 struct name_seq *nseq =
164 (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC); 164 (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
165 struct sub_seq *sseq = subseq_alloc(1); 165 struct sub_seq *sseq = tipc_subseq_alloc(1);
166 166
167 if (!nseq || !sseq) { 167 if (!nseq || !sseq) {
168 warn("Memory squeeze; failed to create name sequence\n"); 168 warn("Memory squeeze; failed to create name sequence\n");
@@ -175,7 +175,7 @@ struct name_seq *nameseq_create(u32 type, struct hlist_head *seq_head)
175 nseq->lock = SPIN_LOCK_UNLOCKED; 175 nseq->lock = SPIN_LOCK_UNLOCKED;
176 nseq->type = type; 176 nseq->type = type;
177 nseq->sseqs = sseq; 177 nseq->sseqs = sseq;
178 dbg("nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n", 178 dbg("tipc_nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
179 nseq, type, nseq->sseqs, nseq->first_free); 179 nseq, type, nseq->sseqs, nseq->first_free);
180 nseq->alloc = 1; 180 nseq->alloc = 1;
181 INIT_HLIST_NODE(&nseq->ns_list); 181 INIT_HLIST_NODE(&nseq->ns_list);
@@ -240,10 +240,10 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
240} 240}
241 241
242/** 242/**
243 * nameseq_insert_publ - 243 * tipc_nameseq_insert_publ -
244 */ 244 */
245 245
246struct publication *nameseq_insert_publ(struct name_seq *nseq, 246struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
247 u32 type, u32 lower, u32 upper, 247 u32 type, u32 lower, u32 upper,
248 u32 scope, u32 node, u32 port, u32 key) 248 u32 scope, u32 node, u32 port, u32 key)
249{ 249{
@@ -285,7 +285,7 @@ struct publication *nameseq_insert_publ(struct name_seq *nseq,
285 285
286 if (nseq->first_free == nseq->alloc) { 286 if (nseq->first_free == nseq->alloc) {
287 struct sub_seq *sseqs = nseq->sseqs; 287 struct sub_seq *sseqs = nseq->sseqs;
288 nseq->sseqs = subseq_alloc(nseq->alloc * 2); 288 nseq->sseqs = tipc_subseq_alloc(nseq->alloc * 2);
289 if (nseq->sseqs != NULL) { 289 if (nseq->sseqs != NULL) {
290 memcpy(nseq->sseqs, sseqs, 290 memcpy(nseq->sseqs, sseqs,
291 nseq->alloc * sizeof (struct sub_seq)); 291 nseq->alloc * sizeof (struct sub_seq));
@@ -354,23 +354,23 @@ struct publication *nameseq_insert_publ(struct name_seq *nseq,
354 */ 354 */
355 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 355 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
356 dbg("calling report_overlap()\n"); 356 dbg("calling report_overlap()\n");
357 subscr_report_overlap(s, 357 tipc_subscr_report_overlap(s,
358 publ->lower, 358 publ->lower,
359 publ->upper, 359 publ->upper,
360 TIPC_PUBLISHED, 360 TIPC_PUBLISHED,
361 publ->ref, 361 publ->ref,
362 publ->node, 362 publ->node,
363 created_subseq); 363 created_subseq);
364 } 364 }
365 return publ; 365 return publ;
366} 366}
367 367
368/** 368/**
369 * nameseq_remove_publ - 369 * tipc_nameseq_remove_publ -
370 */ 370 */
371 371
372struct publication *nameseq_remove_publ(struct name_seq *nseq, u32 inst, 372struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
373 u32 node, u32 ref, u32 key) 373 u32 node, u32 ref, u32 key)
374{ 374{
375 struct publication *publ; 375 struct publication *publ;
376 struct publication *prev; 376 struct publication *prev;
@@ -470,24 +470,24 @@ struct publication *nameseq_remove_publ(struct name_seq *nseq, u32 inst,
470 * Any subscriptions waiting ? 470 * Any subscriptions waiting ?
471 */ 471 */
472 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 472 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
473 subscr_report_overlap(s, 473 tipc_subscr_report_overlap(s,
474 publ->lower, 474 publ->lower,
475 publ->upper, 475 publ->upper,
476 TIPC_WITHDRAWN, 476 TIPC_WITHDRAWN,
477 publ->ref, 477 publ->ref,
478 publ->node, 478 publ->node,
479 removed_subseq); 479 removed_subseq);
480 } 480 }
481 return publ; 481 return publ;
482} 482}
483 483
484/** 484/**
485 * nameseq_subscribe: attach a subscription, and issue 485 * tipc_nameseq_subscribe: attach a subscription, and issue
486 * the prescribed number of events if there is any sub- 486 * the prescribed number of events if there is any sub-
487 * sequence overlapping with the requested sequence 487 * sequence overlapping with the requested sequence
488 */ 488 */
489 489
490void nameseq_subscribe(struct name_seq *nseq, struct subscription *s) 490void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
491{ 491{
492 struct sub_seq *sseq = nseq->sseqs; 492 struct sub_seq *sseq = nseq->sseqs;
493 493
@@ -498,18 +498,18 @@ void nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
498 498
499 while (sseq != &nseq->sseqs[nseq->first_free]) { 499 while (sseq != &nseq->sseqs[nseq->first_free]) {
500 struct publication *zl = sseq->zone_list; 500 struct publication *zl = sseq->zone_list;
501 if (zl && subscr_overlap(s,sseq->lower,sseq->upper)) { 501 if (zl && tipc_subscr_overlap(s,sseq->lower,sseq->upper)) {
502 struct publication *crs = zl; 502 struct publication *crs = zl;
503 int must_report = 1; 503 int must_report = 1;
504 504
505 do { 505 do {
506 subscr_report_overlap(s, 506 tipc_subscr_report_overlap(s,
507 sseq->lower, 507 sseq->lower,
508 sseq->upper, 508 sseq->upper,
509 TIPC_PUBLISHED, 509 TIPC_PUBLISHED,
510 crs->ref, 510 crs->ref,
511 crs->node, 511 crs->node,
512 must_report); 512 must_report);
513 must_report = 0; 513 must_report = 0;
514 crs = crs->zone_list_next; 514 crs = crs->zone_list_next;
515 } while (crs != zl); 515 } while (crs != zl);
@@ -538,8 +538,8 @@ static struct name_seq *nametbl_find_seq(u32 type)
538 return 0; 538 return 0;
539}; 539};
540 540
541struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper, 541struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
542 u32 scope, u32 node, u32 port, u32 key) 542 u32 scope, u32 node, u32 port, u32 key)
543{ 543{
544 struct name_seq *seq = nametbl_find_seq(type); 544 struct name_seq *seq = nametbl_find_seq(type);
545 545
@@ -552,19 +552,19 @@ struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
552 552
553 dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node); 553 dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
554 if (!seq) { 554 if (!seq) {
555 seq = nameseq_create(type, &table.types[hash(type)]); 555 seq = tipc_nameseq_create(type, &table.types[hash(type)]);
556 dbg("nametbl_insert_publ: created %x\n", seq); 556 dbg("tipc_nametbl_insert_publ: created %x\n", seq);
557 } 557 }
558 if (!seq) 558 if (!seq)
559 return 0; 559 return 0;
560 560
561 assert(seq->type == type); 561 assert(seq->type == type);
562 return nameseq_insert_publ(seq, type, lower, upper, 562 return tipc_nameseq_insert_publ(seq, type, lower, upper,
563 scope, node, port, key); 563 scope, node, port, key);
564} 564}
565 565
566struct publication *nametbl_remove_publ(u32 type, u32 lower, 566struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
567 u32 node, u32 ref, u32 key) 567 u32 node, u32 ref, u32 key)
568{ 568{
569 struct publication *publ; 569 struct publication *publ;
570 struct name_seq *seq = nametbl_find_seq(type); 570 struct name_seq *seq = nametbl_find_seq(type);
@@ -573,7 +573,7 @@ struct publication *nametbl_remove_publ(u32 type, u32 lower,
573 return 0; 573 return 0;
574 574
575 dbg("Withdrawing <%u,%u> from %x\n", type, lower, node); 575 dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
576 publ = nameseq_remove_publ(seq, lower, node, ref, key); 576 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
577 577
578 if (!seq->first_free && list_empty(&seq->subscriptions)) { 578 if (!seq->first_free && list_empty(&seq->subscriptions)) {
579 hlist_del_init(&seq->ns_list); 579 hlist_del_init(&seq->ns_list);
@@ -584,14 +584,14 @@ struct publication *nametbl_remove_publ(u32 type, u32 lower,
584} 584}
585 585
586/* 586/*
587 * nametbl_translate(): Translate tipc_name -> tipc_portid. 587 * tipc_nametbl_translate(): Translate tipc_name -> tipc_portid.
588 * Very time-critical. 588 * Very time-critical.
589 * 589 *
590 * Note: on entry 'destnode' is the search domain used during translation; 590 * Note: on entry 'destnode' is the search domain used during translation;
591 * on exit it passes back the node address of the matching port (if any) 591 * on exit it passes back the node address of the matching port (if any)
592 */ 592 */
593 593
594u32 nametbl_translate(u32 type, u32 instance, u32 *destnode) 594u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
595{ 595{
596 struct sub_seq *sseq; 596 struct sub_seq *sseq;
597 struct publication *publ = 0; 597 struct publication *publ = 0;
@@ -601,7 +601,7 @@ u32 nametbl_translate(u32 type, u32 instance, u32 *destnode)
601 if (!in_scope(*destnode, tipc_own_addr)) 601 if (!in_scope(*destnode, tipc_own_addr))
602 return 0; 602 return 0;
603 603
604 read_lock_bh(&nametbl_lock); 604 read_lock_bh(&tipc_nametbl_lock);
605 seq = nametbl_find_seq(type); 605 seq = nametbl_find_seq(type);
606 if (unlikely(!seq)) 606 if (unlikely(!seq))
607 goto not_found; 607 goto not_found;
@@ -619,7 +619,7 @@ found:
619 ref = publ->ref; 619 ref = publ->ref;
620 *destnode = publ->node; 620 *destnode = publ->node;
621 spin_unlock_bh(&seq->lock); 621 spin_unlock_bh(&seq->lock);
622 read_unlock_bh(&nametbl_lock); 622 read_unlock_bh(&tipc_nametbl_lock);
623 return ref; 623 return ref;
624 } 624 }
625 publ = sseq->cluster_list; 625 publ = sseq->cluster_list;
@@ -657,12 +657,12 @@ found:
657 spin_unlock_bh(&seq->lock); 657 spin_unlock_bh(&seq->lock);
658not_found: 658not_found:
659 *destnode = 0; 659 *destnode = 0;
660 read_unlock_bh(&nametbl_lock); 660 read_unlock_bh(&tipc_nametbl_lock);
661 return 0; 661 return 0;
662} 662}
663 663
664/** 664/**
665 * nametbl_mc_translate - find multicast destinations 665 * tipc_nametbl_mc_translate - find multicast destinations
666 * 666 *
667 * Creates list of all local ports that overlap the given multicast address; 667 * Creates list of all local ports that overlap the given multicast address;
668 * also determines if any off-node ports overlap. 668 * also determines if any off-node ports overlap.
@@ -674,15 +674,15 @@ not_found:
674 * Returns non-zero if any off-node ports overlap 674 * Returns non-zero if any off-node ports overlap
675 */ 675 */
676 676
677int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 677int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
678 struct port_list *dports) 678 struct port_list *dports)
679{ 679{
680 struct name_seq *seq; 680 struct name_seq *seq;
681 struct sub_seq *sseq; 681 struct sub_seq *sseq;
682 struct sub_seq *sseq_stop; 682 struct sub_seq *sseq_stop;
683 int res = 0; 683 int res = 0;
684 684
685 read_lock_bh(&nametbl_lock); 685 read_lock_bh(&tipc_nametbl_lock);
686 seq = nametbl_find_seq(type); 686 seq = nametbl_find_seq(type);
687 if (!seq) 687 if (!seq)
688 goto exit; 688 goto exit;
@@ -700,7 +700,7 @@ int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
700 if (publ && (publ->scope <= limit)) 700 if (publ && (publ->scope <= limit))
701 do { 701 do {
702 if (publ->node == tipc_own_addr) 702 if (publ->node == tipc_own_addr)
703 port_list_add(dports, publ->ref); 703 tipc_port_list_add(dports, publ->ref);
704 else 704 else
705 res = 1; 705 res = 1;
706 publ = publ->cluster_list_next; 706 publ = publ->cluster_list_next;
@@ -709,15 +709,15 @@ int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
709 709
710 spin_unlock_bh(&seq->lock); 710 spin_unlock_bh(&seq->lock);
711exit: 711exit:
712 read_unlock_bh(&nametbl_lock); 712 read_unlock_bh(&tipc_nametbl_lock);
713 return res; 713 return res;
714} 714}
715 715
716/** 716/**
717 * nametbl_publish_rsv - publish port name using a reserved name type 717 * tipc_nametbl_publish_rsv - publish port name using a reserved name type
718 */ 718 */
719 719
720int nametbl_publish_rsv(u32 ref, unsigned int scope, 720int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
721 struct tipc_name_seq const *seq) 721 struct tipc_name_seq const *seq)
722{ 722{
723 int res; 723 int res;
@@ -729,10 +729,10 @@ int nametbl_publish_rsv(u32 ref, unsigned int scope,
729} 729}
730 730
731/** 731/**
732 * nametbl_publish - add name publication to network name tables 732 * tipc_nametbl_publish - add name publication to network name tables
733 */ 733 */
734 734
735struct publication *nametbl_publish(u32 type, u32 lower, u32 upper, 735struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
736 u32 scope, u32 port_ref, u32 key) 736 u32 scope, u32 port_ref, u32 key)
737{ 737{
738 struct publication *publ; 738 struct publication *publ;
@@ -748,77 +748,77 @@ struct publication *nametbl_publish(u32 type, u32 lower, u32 upper,
748 return 0; 748 return 0;
749 } 749 }
750 750
751 write_lock_bh(&nametbl_lock); 751 write_lock_bh(&tipc_nametbl_lock);
752 table.local_publ_count++; 752 table.local_publ_count++;
753 publ = nametbl_insert_publ(type, lower, upper, scope, 753 publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
754 tipc_own_addr, port_ref, key); 754 tipc_own_addr, port_ref, key);
755 if (publ && (scope != TIPC_NODE_SCOPE)) { 755 if (publ && (scope != TIPC_NODE_SCOPE)) {
756 named_publish(publ); 756 tipc_named_publish(publ);
757 } 757 }
758 write_unlock_bh(&nametbl_lock); 758 write_unlock_bh(&tipc_nametbl_lock);
759 return publ; 759 return publ;
760} 760}
761 761
762/** 762/**
763 * nametbl_withdraw - withdraw name publication from network name tables 763 * tipc_nametbl_withdraw - withdraw name publication from network name tables
764 */ 764 */
765 765
766int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) 766int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
767{ 767{
768 struct publication *publ; 768 struct publication *publ;
769 769
770 dbg("nametbl_withdraw:<%d,%d,%d>\n", type, lower, key); 770 dbg("tipc_nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
771 write_lock_bh(&nametbl_lock); 771 write_lock_bh(&tipc_nametbl_lock);
772 publ = nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 772 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
773 if (publ) { 773 if (publ) {
774 table.local_publ_count--; 774 table.local_publ_count--;
775 if (publ->scope != TIPC_NODE_SCOPE) 775 if (publ->scope != TIPC_NODE_SCOPE)
776 named_withdraw(publ); 776 tipc_named_withdraw(publ);
777 write_unlock_bh(&nametbl_lock); 777 write_unlock_bh(&tipc_nametbl_lock);
778 list_del_init(&publ->pport_list); 778 list_del_init(&publ->pport_list);
779 kfree(publ); 779 kfree(publ);
780 return 1; 780 return 1;
781 } 781 }
782 write_unlock_bh(&nametbl_lock); 782 write_unlock_bh(&tipc_nametbl_lock);
783 return 0; 783 return 0;
784} 784}
785 785
786/** 786/**
787 * nametbl_subscribe - add a subscription object to the name table 787 * tipc_nametbl_subscribe - add a subscription object to the name table
788 */ 788 */
789 789
790void 790void
791nametbl_subscribe(struct subscription *s) 791tipc_nametbl_subscribe(struct subscription *s)
792{ 792{
793 u32 type = s->seq.type; 793 u32 type = s->seq.type;
794 struct name_seq *seq; 794 struct name_seq *seq;
795 795
796 write_lock_bh(&nametbl_lock); 796 write_lock_bh(&tipc_nametbl_lock);
797 seq = nametbl_find_seq(type); 797 seq = nametbl_find_seq(type);
798 if (!seq) { 798 if (!seq) {
799 seq = nameseq_create(type, &table.types[hash(type)]); 799 seq = tipc_nameseq_create(type, &table.types[hash(type)]);
800 } 800 }
801 if (seq){ 801 if (seq){
802 spin_lock_bh(&seq->lock); 802 spin_lock_bh(&seq->lock);
803 dbg("nametbl_subscribe:found %x for <%u,%u,%u>\n", 803 dbg("tipc_nametbl_subscribe:found %x for <%u,%u,%u>\n",
804 seq, type, s->seq.lower, s->seq.upper); 804 seq, type, s->seq.lower, s->seq.upper);
805 assert(seq->type == type); 805 assert(seq->type == type);
806 nameseq_subscribe(seq, s); 806 tipc_nameseq_subscribe(seq, s);
807 spin_unlock_bh(&seq->lock); 807 spin_unlock_bh(&seq->lock);
808 } 808 }
809 write_unlock_bh(&nametbl_lock); 809 write_unlock_bh(&tipc_nametbl_lock);
810} 810}
811 811
812/** 812/**
813 * nametbl_unsubscribe - remove a subscription object from name table 813 * tipc_nametbl_unsubscribe - remove a subscription object from name table
814 */ 814 */
815 815
816void 816void
817nametbl_unsubscribe(struct subscription *s) 817tipc_nametbl_unsubscribe(struct subscription *s)
818{ 818{
819 struct name_seq *seq; 819 struct name_seq *seq;
820 820
821 write_lock_bh(&nametbl_lock); 821 write_lock_bh(&tipc_nametbl_lock);
822 seq = nametbl_find_seq(s->seq.type); 822 seq = nametbl_find_seq(s->seq.type);
823 if (seq != NULL){ 823 if (seq != NULL){
824 spin_lock_bh(&seq->lock); 824 spin_lock_bh(&seq->lock);
@@ -830,7 +830,7 @@ nametbl_unsubscribe(struct subscription *s)
830 kfree(seq); 830 kfree(seq);
831 } 831 }
832 } 832 }
833 write_unlock_bh(&nametbl_lock); 833 write_unlock_bh(&tipc_nametbl_lock);
834} 834}
835 835
836 836
@@ -983,17 +983,17 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
983 } 983 }
984} 984}
985 985
986void nametbl_print(struct print_buf *buf, const char *str) 986void tipc_nametbl_print(struct print_buf *buf, const char *str)
987{ 987{
988 tipc_printf(buf, str); 988 tipc_printf(buf, str);
989 read_lock_bh(&nametbl_lock); 989 read_lock_bh(&tipc_nametbl_lock);
990 nametbl_list(buf, 0, 0, 0, 0); 990 nametbl_list(buf, 0, 0, 0, 0);
991 read_unlock_bh(&nametbl_lock); 991 read_unlock_bh(&tipc_nametbl_lock);
992} 992}
993 993
994#define MAX_NAME_TBL_QUERY 32768 994#define MAX_NAME_TBL_QUERY 32768
995 995
996struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space) 996struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
997{ 997{
998 struct sk_buff *buf; 998 struct sk_buff *buf;
999 struct tipc_name_table_query *argv; 999 struct tipc_name_table_query *argv;
@@ -1002,20 +1002,20 @@ struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
1002 int str_len; 1002 int str_len;
1003 1003
1004 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY)) 1004 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
1005 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 1005 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
1006 1006
1007 buf = cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY)); 1007 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
1008 if (!buf) 1008 if (!buf)
1009 return NULL; 1009 return NULL;
1010 1010
1011 rep_tlv = (struct tlv_desc *)buf->data; 1011 rep_tlv = (struct tlv_desc *)buf->data;
1012 printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY); 1012 tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
1013 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area); 1013 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
1014 read_lock_bh(&nametbl_lock); 1014 read_lock_bh(&tipc_nametbl_lock);
1015 nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type), 1015 nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
1016 ntohl(argv->lowbound), ntohl(argv->upbound)); 1016 ntohl(argv->lowbound), ntohl(argv->upbound));
1017 read_unlock_bh(&nametbl_lock); 1017 read_unlock_bh(&tipc_nametbl_lock);
1018 str_len = printbuf_validate(&b); 1018 str_len = tipc_printbuf_validate(&b);
1019 1019
1020 skb_put(buf, TLV_SPACE(str_len)); 1020 skb_put(buf, TLV_SPACE(str_len));
1021 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 1021 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -1023,12 +1023,12 @@ struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
1023 return buf; 1023 return buf;
1024} 1024}
1025 1025
1026void nametbl_dump(void) 1026void tipc_nametbl_dump(void)
1027{ 1027{
1028 nametbl_list(CONS, 0, 0, 0, 0); 1028 nametbl_list(TIPC_CONS, 0, 0, 0, 0);
1029} 1029}
1030 1030
1031int nametbl_init(void) 1031int tipc_nametbl_init(void)
1032{ 1032{
1033 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; 1033 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
1034 1034
@@ -1036,14 +1036,14 @@ int nametbl_init(void)
1036 if (!table.types) 1036 if (!table.types)
1037 return -ENOMEM; 1037 return -ENOMEM;
1038 1038
1039 write_lock_bh(&nametbl_lock); 1039 write_lock_bh(&tipc_nametbl_lock);
1040 memset(table.types, 0, array_size); 1040 memset(table.types, 0, array_size);
1041 table.local_publ_count = 0; 1041 table.local_publ_count = 0;
1042 write_unlock_bh(&nametbl_lock); 1042 write_unlock_bh(&tipc_nametbl_lock);
1043 return 0; 1043 return 0;
1044} 1044}
1045 1045
1046void nametbl_stop(void) 1046void tipc_nametbl_stop(void)
1047{ 1047{
1048 struct hlist_head *seq_head; 1048 struct hlist_head *seq_head;
1049 struct hlist_node *seq_node; 1049 struct hlist_node *seq_node;
@@ -1054,7 +1054,7 @@ void nametbl_stop(void)
1054 if (!table.types) 1054 if (!table.types)
1055 return; 1055 return;
1056 1056
1057 write_lock_bh(&nametbl_lock); 1057 write_lock_bh(&tipc_nametbl_lock);
1058 for (i = 0; i < tipc_nametbl_size; i++) { 1058 for (i = 0; i < tipc_nametbl_size; i++) {
1059 seq_head = &table.types[i]; 1059 seq_head = &table.types[i];
1060 hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) { 1060 hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) {
@@ -1075,5 +1075,5 @@ void nametbl_stop(void)
1075 } 1075 }
1076 kfree(table.types); 1076 kfree(table.types);
1077 table.types = NULL; 1077 table.types = NULL;
1078 write_unlock_bh(&nametbl_lock); 1078 write_unlock_bh(&tipc_nametbl_lock);
1079} 1079}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index f82693384f6..e8a3d71763c 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -85,24 +85,24 @@ struct publication {
85}; 85};
86 86
87 87
88extern rwlock_t nametbl_lock; 88extern rwlock_t tipc_nametbl_lock;
89 89
90struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space); 90struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
91u32 nametbl_translate(u32 type, u32 instance, u32 *node); 91u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
92int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 92int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
93 struct port_list *dports); 93 struct port_list *dports);
94int nametbl_publish_rsv(u32 ref, unsigned int scope, 94int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
95 struct tipc_name_seq const *seq); 95 struct tipc_name_seq const *seq);
96struct publication *nametbl_publish(u32 type, u32 lower, u32 upper, 96struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
97 u32 scope, u32 port_ref, u32 key); 97 u32 scope, u32 port_ref, u32 key);
98int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key); 98int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
99struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper, 99struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
100 u32 scope, u32 node, u32 ref, u32 key); 100 u32 scope, u32 node, u32 ref, u32 key);
101struct publication *nametbl_remove_publ(u32 type, u32 lower, 101struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
102 u32 node, u32 ref, u32 key); 102 u32 node, u32 ref, u32 key);
103void nametbl_subscribe(struct subscription *s); 103void tipc_nametbl_subscribe(struct subscription *s);
104void nametbl_unsubscribe(struct subscription *s); 104void tipc_nametbl_unsubscribe(struct subscription *s);
105int nametbl_init(void); 105int tipc_nametbl_init(void);
106void nametbl_stop(void); 106void tipc_nametbl_stop(void);
107 107
108#endif 108#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 6826b493c1d..074891ad4f0 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -58,25 +58,25 @@
58 * 1: The routing hierarchy. 58 * 1: The routing hierarchy.
59 * Comprises the structures 'zone', 'cluster', 'node', 'link' 59 * Comprises the structures 'zone', 'cluster', 'node', 'link'
60 * and 'bearer'. The whole hierarchy is protected by a big 60 * and 'bearer'. The whole hierarchy is protected by a big
61 * read/write lock, net_lock, to enssure that nothing is added 61 * read/write lock, tipc_net_lock, to enssure that nothing is added
62 * or removed while code is accessing any of these structures. 62 * or removed while code is accessing any of these structures.
63 * This layer must not be called from the two others while they 63 * This layer must not be called from the two others while they
64 * hold any of their own locks. 64 * hold any of their own locks.
65 * Neither must it itself do any upcalls to the other two before 65 * Neither must it itself do any upcalls to the other two before
66 * it has released net_lock and other protective locks. 66 * it has released tipc_net_lock and other protective locks.
67 * 67 *
68 * Within the net_lock domain there are two sub-domains;'node' and 68 * Within the tipc_net_lock domain there are two sub-domains;'node' and
69 * 'bearer', where local write operations are permitted, 69 * 'bearer', where local write operations are permitted,
70 * provided that those are protected by individual spin_locks 70 * provided that those are protected by individual spin_locks
71 * per instance. Code holding net_lock(read) and a node spin_lock 71 * per instance. Code holding tipc_net_lock(read) and a node spin_lock
72 * is permitted to poke around in both the node itself and its 72 * is permitted to poke around in both the node itself and its
73 * subordinate links. I.e, it can update link counters and queues, 73 * subordinate links. I.e, it can update link counters and queues,
74 * change link state, send protocol messages, and alter the 74 * change link state, send protocol messages, and alter the
75 * "active_links" array in the node; but it can _not_ remove a link 75 * "active_links" array in the node; but it can _not_ remove a link
76 * or a node from the overall structure. 76 * or a node from the overall structure.
77 * Correspondingly, individual bearers may change status within a 77 * Correspondingly, individual bearers may change status within a
78 * net_lock(read), protected by an individual spin_lock ber bearer 78 * tipc_net_lock(read), protected by an individual spin_lock ber bearer
79 * instance, but it needs net_lock(write) to remove/add any bearers. 79 * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
80 * 80 *
81 * 81 *
82 * 2: The transport level of the protocol. 82 * 2: The transport level of the protocol.
@@ -97,91 +97,91 @@
97 * (Nobody is using read-only access to this, so it can just as 97 * (Nobody is using read-only access to this, so it can just as
98 * well be changed to a spin_lock) 98 * well be changed to a spin_lock)
99 * - A spin lock to protect the registry of kernel/driver users (reg.c) 99 * - A spin lock to protect the registry of kernel/driver users (reg.c)
100 * - A global spin_lock (port_lock), which only task is to ensure 100 * - A global spin_lock (tipc_port_lock), which only task is to ensure
101 * consistency where more than one port is involved in an operation, 101 * consistency where more than one port is involved in an operation,
102 * i.e., whe a port is part of a linked list of ports. 102 * i.e., whe a port is part of a linked list of ports.
103 * There are two such lists; 'port_list', which is used for management, 103 * There are two such lists; 'port_list', which is used for management,
104 * and 'wait_list', which is used to queue ports during congestion. 104 * and 'wait_list', which is used to queue ports during congestion.
105 * 105 *
106 * 3: The name table (name_table.c, name_distr.c, subscription.c) 106 * 3: The name table (name_table.c, name_distr.c, subscription.c)
107 * - There is one big read/write-lock (nametbl_lock) protecting the 107 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
108 * overall name table structure. Nothing must be added/removed to 108 * overall name table structure. Nothing must be added/removed to
109 * this structure without holding write access to it. 109 * this structure without holding write access to it.
110 * - There is one local spin_lock per sub_sequence, which can be seen 110 * - There is one local spin_lock per sub_sequence, which can be seen
111 * as a sub-domain to the nametbl_lock domain. It is used only 111 * as a sub-domain to the tipc_nametbl_lock domain. It is used only
112 * for translation operations, and is needed because a translation 112 * for translation operations, and is needed because a translation
113 * steps the root of the 'publication' linked list between each lookup. 113 * steps the root of the 'publication' linked list between each lookup.
114 * This is always used within the scope of a nametbl_lock(read). 114 * This is always used within the scope of a tipc_nametbl_lock(read).
115 * - A local spin_lock protecting the queue of subscriber events. 115 * - A local spin_lock protecting the queue of subscriber events.
116*/ 116*/
117 117
118rwlock_t net_lock = RW_LOCK_UNLOCKED; 118rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED;
119struct network net = { 0 }; 119struct network tipc_net = { 0 };
120 120
121struct node *net_select_remote_node(u32 addr, u32 ref) 121struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
122{ 122{
123 return zone_select_remote_node(net.zones[tipc_zone(addr)], addr, ref); 123 return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref);
124} 124}
125 125
126u32 net_select_router(u32 addr, u32 ref) 126u32 tipc_net_select_router(u32 addr, u32 ref)
127{ 127{
128 return zone_select_router(net.zones[tipc_zone(addr)], addr, ref); 128 return tipc_zone_select_router(tipc_net.zones[tipc_zone(addr)], addr, ref);
129} 129}
130 130
131 131
132u32 net_next_node(u32 a) 132u32 tipc_net_next_node(u32 a)
133{ 133{
134 if (net.zones[tipc_zone(a)]) 134 if (tipc_net.zones[tipc_zone(a)])
135 return zone_next_node(a); 135 return tipc_zone_next_node(a);
136 return 0; 136 return 0;
137} 137}
138 138
139void net_remove_as_router(u32 router) 139void tipc_net_remove_as_router(u32 router)
140{ 140{
141 u32 z_num; 141 u32 z_num;
142 142
143 for (z_num = 1; z_num <= tipc_max_zones; z_num++) { 143 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
144 if (!net.zones[z_num]) 144 if (!tipc_net.zones[z_num])
145 continue; 145 continue;
146 zone_remove_as_router(net.zones[z_num], router); 146 tipc_zone_remove_as_router(tipc_net.zones[z_num], router);
147 } 147 }
148} 148}
149 149
150void net_send_external_routes(u32 dest) 150void tipc_net_send_external_routes(u32 dest)
151{ 151{
152 u32 z_num; 152 u32 z_num;
153 153
154 for (z_num = 1; z_num <= tipc_max_zones; z_num++) { 154 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
155 if (net.zones[z_num]) 155 if (tipc_net.zones[z_num])
156 zone_send_external_routes(net.zones[z_num], dest); 156 tipc_zone_send_external_routes(tipc_net.zones[z_num], dest);
157 } 157 }
158} 158}
159 159
160int net_init(void) 160static int net_init(void)
161{ 161{
162 u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1); 162 u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
163 163
164 memset(&net, 0, sizeof(net)); 164 memset(&tipc_net, 0, sizeof(tipc_net));
165 net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC); 165 tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC);
166 if (!net.zones) { 166 if (!tipc_net.zones) {
167 return -ENOMEM; 167 return -ENOMEM;
168 } 168 }
169 memset(net.zones, 0, sz); 169 memset(tipc_net.zones, 0, sz);
170 return TIPC_OK; 170 return TIPC_OK;
171} 171}
172 172
173void net_stop(void) 173static void net_stop(void)
174{ 174{
175 u32 z_num; 175 u32 z_num;
176 176
177 if (!net.zones) 177 if (!tipc_net.zones)
178 return; 178 return;
179 179
180 for (z_num = 1; z_num <= tipc_max_zones; z_num++) { 180 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
181 zone_delete(net.zones[z_num]); 181 tipc_zone_delete(tipc_net.zones[z_num]);
182 } 182 }
183 kfree(net.zones); 183 kfree(tipc_net.zones);
184 net.zones = 0; 184 tipc_net.zones = 0;
185} 185}
186 186
187static void net_route_named_msg(struct sk_buff *buf) 187static void net_route_named_msg(struct sk_buff *buf)
@@ -191,26 +191,26 @@ static void net_route_named_msg(struct sk_buff *buf)
191 u32 dport; 191 u32 dport;
192 192
193 if (!msg_named(msg)) { 193 if (!msg_named(msg)) {
194 msg_dbg(msg, "net->drop_nam:"); 194 msg_dbg(msg, "tipc_net->drop_nam:");
195 buf_discard(buf); 195 buf_discard(buf);
196 return; 196 return;
197 } 197 }
198 198
199 dnode = addr_domain(msg_lookup_scope(msg)); 199 dnode = addr_domain(msg_lookup_scope(msg));
200 dport = nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode); 200 dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
201 dbg("net->lookup<%u,%u>-><%u,%x>\n", 201 dbg("tipc_net->lookup<%u,%u>-><%u,%x>\n",
202 msg_nametype(msg), msg_nameinst(msg), dport, dnode); 202 msg_nametype(msg), msg_nameinst(msg), dport, dnode);
203 if (dport) { 203 if (dport) {
204 msg_set_destnode(msg, dnode); 204 msg_set_destnode(msg, dnode);
205 msg_set_destport(msg, dport); 205 msg_set_destport(msg, dport);
206 net_route_msg(buf); 206 tipc_net_route_msg(buf);
207 return; 207 return;
208 } 208 }
209 msg_dbg(msg, "net->rej:NO NAME: "); 209 msg_dbg(msg, "tipc_net->rej:NO NAME: ");
210 tipc_reject_msg(buf, TIPC_ERR_NO_NAME); 210 tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
211} 211}
212 212
213void net_route_msg(struct sk_buff *buf) 213void tipc_net_route_msg(struct sk_buff *buf)
214{ 214{
215 struct tipc_msg *msg; 215 struct tipc_msg *msg;
216 u32 dnode; 216 u32 dnode;
@@ -232,29 +232,29 @@ void net_route_msg(struct sk_buff *buf)
232 return; 232 return;
233 } 233 }
234 234
235 msg_dbg(msg, "net->rout: "); 235 msg_dbg(msg, "tipc_net->rout: ");
236 236
237 /* Handle message for this node */ 237 /* Handle message for this node */
238 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); 238 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
239 if (in_scope(dnode, tipc_own_addr)) { 239 if (in_scope(dnode, tipc_own_addr)) {
240 if (msg_isdata(msg)) { 240 if (msg_isdata(msg)) {
241 if (msg_mcast(msg)) 241 if (msg_mcast(msg))
242 port_recv_mcast(buf, NULL); 242 tipc_port_recv_mcast(buf, NULL);
243 else if (msg_destport(msg)) 243 else if (msg_destport(msg))
244 port_recv_msg(buf); 244 tipc_port_recv_msg(buf);
245 else 245 else
246 net_route_named_msg(buf); 246 net_route_named_msg(buf);
247 return; 247 return;
248 } 248 }
249 switch (msg_user(msg)) { 249 switch (msg_user(msg)) {
250 case ROUTE_DISTRIBUTOR: 250 case ROUTE_DISTRIBUTOR:
251 cluster_recv_routing_table(buf); 251 tipc_cltr_recv_routing_table(buf);
252 break; 252 break;
253 case NAME_DISTRIBUTOR: 253 case NAME_DISTRIBUTOR:
254 named_recv(buf); 254 tipc_named_recv(buf);
255 break; 255 break;
256 case CONN_MANAGER: 256 case CONN_MANAGER:
257 port_recv_proto_msg(buf); 257 tipc_port_recv_proto_msg(buf);
258 break; 258 break;
259 default: 259 default:
260 msg_dbg(msg,"DROP/NET/<REC<"); 260 msg_dbg(msg,"DROP/NET/<REC<");
@@ -265,10 +265,10 @@ void net_route_msg(struct sk_buff *buf)
265 265
266 /* Handle message for another node */ 266 /* Handle message for another node */
267 msg_dbg(msg, "NET>SEND>: "); 267 msg_dbg(msg, "NET>SEND>: ");
268 link_send(buf, dnode, msg_link_selector(msg)); 268 tipc_link_send(buf, dnode, msg_link_selector(msg));
269} 269}
270 270
271int tipc_start_net(void) 271int tipc_net_start(void)
272{ 272{
273 char addr_string[16]; 273 char addr_string[16];
274 int res; 274 int res;
@@ -277,35 +277,35 @@ int tipc_start_net(void)
277 return -ENOPROTOOPT; 277 return -ENOPROTOOPT;
278 278
279 tipc_mode = TIPC_NET_MODE; 279 tipc_mode = TIPC_NET_MODE;
280 named_reinit(); 280 tipc_named_reinit();
281 port_reinit(); 281 tipc_port_reinit();
282 282
283 if ((res = bearer_init()) || 283 if ((res = tipc_bearer_init()) ||
284 (res = net_init()) || 284 (res = net_init()) ||
285 (res = cluster_init()) || 285 (res = tipc_cltr_init()) ||
286 (res = bclink_init())) { 286 (res = tipc_bclink_init())) {
287 return res; 287 return res;
288 } 288 }
289 subscr_stop(); 289 tipc_subscr_stop();
290 cfg_stop(); 290 tipc_cfg_stop();
291 k_signal((Handler)subscr_start, 0); 291 tipc_k_signal((Handler)tipc_subscr_start, 0);
292 k_signal((Handler)cfg_init, 0); 292 tipc_k_signal((Handler)tipc_cfg_init, 0);
293 info("Started in network mode\n"); 293 info("Started in network mode\n");
294 info("Own node address %s, network identity %u\n", 294 info("Own node address %s, network identity %u\n",
295 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 295 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
296 return TIPC_OK; 296 return TIPC_OK;
297} 297}
298 298
299void tipc_stop_net(void) 299void tipc_net_stop(void)
300{ 300{
301 if (tipc_mode != TIPC_NET_MODE) 301 if (tipc_mode != TIPC_NET_MODE)
302 return; 302 return;
303 write_lock_bh(&net_lock); 303 write_lock_bh(&tipc_net_lock);
304 bearer_stop(); 304 tipc_bearer_stop();
305 tipc_mode = TIPC_NODE_MODE; 305 tipc_mode = TIPC_NODE_MODE;
306 bclink_stop(); 306 tipc_bclink_stop();
307 net_stop(); 307 net_stop();
308 write_unlock_bh(&net_lock); 308 write_unlock_bh(&tipc_net_lock);
309 info("Left network mode \n"); 309 info("Left network mode \n");
310} 310}
311 311
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 948c6d42102..f3e0b85e647 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -49,18 +49,16 @@ struct network {
49}; 49};
50 50
51 51
52extern struct network net; 52extern struct network tipc_net;
53extern rwlock_t net_lock; 53extern rwlock_t tipc_net_lock;
54 54
55int net_init(void); 55void tipc_net_remove_as_router(u32 router);
56void net_stop(void); 56void tipc_net_send_external_routes(u32 dest);
57void net_remove_as_router(u32 router); 57void tipc_net_route_msg(struct sk_buff *buf);
58void net_send_external_routes(u32 dest); 58struct node *tipc_net_select_remote_node(u32 addr, u32 ref);
59void net_route_msg(struct sk_buff *buf); 59u32 tipc_net_select_router(u32 addr, u32 ref);
60struct node *net_select_remote_node(u32 addr, u32 ref);
61u32 net_select_router(u32 addr, u32 ref);
62 60
63int tipc_start_net(void); 61int tipc_net_start(void);
64void tipc_stop_net(void); 62void tipc_net_stop(void);
65 63
66#endif 64#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 19b3f402253..eb1bb4dce7a 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -47,13 +47,13 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN); 47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
48 48
49 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN))) 49 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
50 rep_buf = cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN); 50 rep_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
51 else 51 else
52 rep_buf = cfg_do_cmd(req_userhdr->dest, 52 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest,
53 req_userhdr->cmd, 53 req_userhdr->cmd,
54 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN, 54 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
55 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN), 55 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
56 hdr_space); 56 hdr_space);
57 57
58 if (rep_buf) { 58 if (rep_buf) {
59 skb_push(rep_buf, hdr_space); 59 skb_push(rep_buf, hdr_space);
@@ -81,7 +81,7 @@ static struct genl_ops ops = {
81 81
82static int family_registered = 0; 82static int family_registered = 0;
83 83
84int netlink_start(void) 84int tipc_netlink_start(void)
85{ 85{
86 86
87 87
@@ -103,7 +103,7 @@ int netlink_start(void)
103 return -EFAULT; 103 return -EFAULT;
104} 104}
105 105
106void netlink_stop(void) 106void tipc_netlink_stop(void)
107{ 107{
108 if (family_registered) { 108 if (family_registered) {
109 genl_unregister_family(&family); 109 genl_unregister_family(&family);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 05688d01138..6d65010e5fa 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -45,17 +45,16 @@
45#include "port.h" 45#include "port.h"
46#include "bearer.h" 46#include "bearer.h"
47#include "name_distr.h" 47#include "name_distr.h"
48#include "net.h"
49 48
50void node_print(struct print_buf *buf, struct node *n_ptr, char *str); 49void node_print(struct print_buf *buf, struct node *n_ptr, char *str);
51static void node_lost_contact(struct node *n_ptr); 50static void node_lost_contact(struct node *n_ptr);
52static void node_established_contact(struct node *n_ptr); 51static void node_established_contact(struct node *n_ptr);
53 52
54struct node *nodes = NULL; /* sorted list of nodes within cluster */ 53struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */
55 54
56u32 tipc_own_tag = 0; 55u32 tipc_own_tag = 0;
57 56
58struct node *node_create(u32 addr) 57struct node *tipc_node_create(u32 addr)
59{ 58{
60 struct cluster *c_ptr; 59 struct cluster *c_ptr;
61 struct node *n_ptr; 60 struct node *n_ptr;
@@ -68,16 +67,16 @@ struct node *node_create(u32 addr)
68 n_ptr->lock = SPIN_LOCK_UNLOCKED; 67 n_ptr->lock = SPIN_LOCK_UNLOCKED;
69 INIT_LIST_HEAD(&n_ptr->nsub); 68 INIT_LIST_HEAD(&n_ptr->nsub);
70 69
71 c_ptr = cluster_find(addr); 70 c_ptr = tipc_cltr_find(addr);
72 if (c_ptr == NULL) 71 if (c_ptr == NULL)
73 c_ptr = cluster_create(addr); 72 c_ptr = tipc_cltr_create(addr);
74 if (c_ptr != NULL) { 73 if (c_ptr != NULL) {
75 n_ptr->owner = c_ptr; 74 n_ptr->owner = c_ptr;
76 cluster_attach_node(c_ptr, n_ptr); 75 tipc_cltr_attach_node(c_ptr, n_ptr);
77 n_ptr->last_router = -1; 76 n_ptr->last_router = -1;
78 77
79 /* Insert node into ordered list */ 78 /* Insert node into ordered list */
80 for (curr_node = &nodes; *curr_node; 79 for (curr_node = &tipc_nodes; *curr_node;
81 curr_node = &(*curr_node)->next) { 80 curr_node = &(*curr_node)->next) {
82 if (addr < (*curr_node)->addr) { 81 if (addr < (*curr_node)->addr) {
83 n_ptr->next = *curr_node; 82 n_ptr->next = *curr_node;
@@ -93,13 +92,13 @@ struct node *node_create(u32 addr)
93 return n_ptr; 92 return n_ptr;
94} 93}
95 94
96void node_delete(struct node *n_ptr) 95void tipc_node_delete(struct node *n_ptr)
97{ 96{
98 if (!n_ptr) 97 if (!n_ptr)
99 return; 98 return;
100 99
101#if 0 100#if 0
102 /* Not needed because links are already deleted via bearer_stop() */ 101 /* Not needed because links are already deleted via tipc_bearer_stop() */
103 102
104 u32 l_num; 103 u32 l_num;
105 104
@@ -114,12 +113,12 @@ void node_delete(struct node *n_ptr)
114 113
115 114
116/** 115/**
117 * node_link_up - handle addition of link 116 * tipc_node_link_up - handle addition of link
118 * 117 *
119 * Link becomes active (alone or shared) or standby, depending on its priority. 118 * Link becomes active (alone or shared) or standby, depending on its priority.
120 */ 119 */
121 120
122void node_link_up(struct node *n_ptr, struct link *l_ptr) 121void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
123{ 122{
124 struct link **active = &n_ptr->active_links[0]; 123 struct link **active = &n_ptr->active_links[0];
125 124
@@ -136,7 +135,7 @@ void node_link_up(struct node *n_ptr, struct link *l_ptr)
136 info("Link is standby\n"); 135 info("Link is standby\n");
137 return; 136 return;
138 } 137 }
139 link_send_duplicate(active[0], l_ptr); 138 tipc_link_send_duplicate(active[0], l_ptr);
140 if (l_ptr->priority == active[0]->priority) { 139 if (l_ptr->priority == active[0]->priority) {
141 active[0] = l_ptr; 140 active[0] = l_ptr;
142 return; 141 return;
@@ -161,7 +160,7 @@ static void node_select_active_links(struct node *n_ptr)
161 for (i = 0; i < MAX_BEARERS; i++) { 160 for (i = 0; i < MAX_BEARERS; i++) {
162 struct link *l_ptr = n_ptr->links[i]; 161 struct link *l_ptr = n_ptr->links[i];
163 162
164 if (!l_ptr || !link_is_up(l_ptr) || 163 if (!l_ptr || !tipc_link_is_up(l_ptr) ||
165 (l_ptr->priority < highest_prio)) 164 (l_ptr->priority < highest_prio))
166 continue; 165 continue;
167 166
@@ -175,14 +174,14 @@ static void node_select_active_links(struct node *n_ptr)
175} 174}
176 175
177/** 176/**
178 * node_link_down - handle loss of link 177 * tipc_node_link_down - handle loss of link
179 */ 178 */
180 179
181void node_link_down(struct node *n_ptr, struct link *l_ptr) 180void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
182{ 181{
183 struct link **active; 182 struct link **active;
184 183
185 if (!link_is_active(l_ptr)) { 184 if (!tipc_link_is_active(l_ptr)) {
186 info("Lost standby link <%s> on network plane %c\n", 185 info("Lost standby link <%s> on network plane %c\n",
187 l_ptr->name, l_ptr->b_ptr->net_plane); 186 l_ptr->name, l_ptr->b_ptr->net_plane);
188 return; 187 return;
@@ -197,40 +196,40 @@ void node_link_down(struct node *n_ptr, struct link *l_ptr)
197 active[1] = active[0]; 196 active[1] = active[0];
198 if (active[0] == l_ptr) 197 if (active[0] == l_ptr)
199 node_select_active_links(n_ptr); 198 node_select_active_links(n_ptr);
200 if (node_is_up(n_ptr)) 199 if (tipc_node_is_up(n_ptr))
201 link_changeover(l_ptr); 200 tipc_link_changeover(l_ptr);
202 else 201 else
203 node_lost_contact(n_ptr); 202 node_lost_contact(n_ptr);
204} 203}
205 204
206int node_has_active_links(struct node *n_ptr) 205int tipc_node_has_active_links(struct node *n_ptr)
207{ 206{
208 return (n_ptr && 207 return (n_ptr &&
209 ((n_ptr->active_links[0]) || (n_ptr->active_links[1]))); 208 ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
210} 209}
211 210
212int node_has_redundant_links(struct node *n_ptr) 211int tipc_node_has_redundant_links(struct node *n_ptr)
213{ 212{
214 return (node_has_active_links(n_ptr) && 213 return (tipc_node_has_active_links(n_ptr) &&
215 (n_ptr->active_links[0] != n_ptr->active_links[1])); 214 (n_ptr->active_links[0] != n_ptr->active_links[1]));
216} 215}
217 216
218int node_has_active_routes(struct node *n_ptr) 217int tipc_node_has_active_routes(struct node *n_ptr)
219{ 218{
220 return (n_ptr && (n_ptr->last_router >= 0)); 219 return (n_ptr && (n_ptr->last_router >= 0));
221} 220}
222 221
223int node_is_up(struct node *n_ptr) 222int tipc_node_is_up(struct node *n_ptr)
224{ 223{
225 return (node_has_active_links(n_ptr) || node_has_active_routes(n_ptr)); 224 return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr));
226} 225}
227 226
228struct node *node_attach_link(struct link *l_ptr) 227struct node *tipc_node_attach_link(struct link *l_ptr)
229{ 228{
230 struct node *n_ptr = node_find(l_ptr->addr); 229 struct node *n_ptr = tipc_node_find(l_ptr->addr);
231 230
232 if (!n_ptr) 231 if (!n_ptr)
233 n_ptr = node_create(l_ptr->addr); 232 n_ptr = tipc_node_create(l_ptr->addr);
234 if (n_ptr) { 233 if (n_ptr) {
235 u32 bearer_id = l_ptr->b_ptr->identity; 234 u32 bearer_id = l_ptr->b_ptr->identity;
236 char addr_string[16]; 235 char addr_string[16];
@@ -246,7 +245,7 @@ struct node *node_attach_link(struct link *l_ptr)
246 245
247 if (!n_ptr->links[bearer_id]) { 246 if (!n_ptr->links[bearer_id]) {
248 n_ptr->links[bearer_id] = l_ptr; 247 n_ptr->links[bearer_id] = l_ptr;
249 net.zones[tipc_zone(l_ptr->addr)]->links++; 248 tipc_net.zones[tipc_zone(l_ptr->addr)]->links++;
250 n_ptr->link_cnt++; 249 n_ptr->link_cnt++;
251 return n_ptr; 250 return n_ptr;
252 } 251 }
@@ -257,10 +256,10 @@ struct node *node_attach_link(struct link *l_ptr)
257 return 0; 256 return 0;
258} 257}
259 258
260void node_detach_link(struct node *n_ptr, struct link *l_ptr) 259void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
261{ 260{
262 n_ptr->links[l_ptr->b_ptr->identity] = 0; 261 n_ptr->links[l_ptr->b_ptr->identity] = 0;
263 net.zones[tipc_zone(l_ptr->addr)]->links--; 262 tipc_net.zones[tipc_zone(l_ptr->addr)]->links--;
264 n_ptr->link_cnt--; 263 n_ptr->link_cnt--;
265} 264}
266 265
@@ -315,45 +314,45 @@ static void node_established_contact(struct node *n_ptr)
315 struct cluster *c_ptr; 314 struct cluster *c_ptr;
316 315
317 dbg("node_established_contact:-> %x\n", n_ptr->addr); 316 dbg("node_established_contact:-> %x\n", n_ptr->addr);
318 if (!node_has_active_routes(n_ptr)) { 317 if (!tipc_node_has_active_routes(n_ptr)) {
319 k_signal((Handler)named_node_up, n_ptr->addr); 318 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
320 } 319 }
321 320
322 /* Syncronize broadcast acks */ 321 /* Syncronize broadcast acks */
323 n_ptr->bclink.acked = bclink_get_last_sent(); 322 n_ptr->bclink.acked = tipc_bclink_get_last_sent();
324 323
325 if (is_slave(tipc_own_addr)) 324 if (is_slave(tipc_own_addr))
326 return; 325 return;
327 if (!in_own_cluster(n_ptr->addr)) { 326 if (!in_own_cluster(n_ptr->addr)) {
328 /* Usage case 1 (see above) */ 327 /* Usage case 1 (see above) */
329 c_ptr = cluster_find(tipc_own_addr); 328 c_ptr = tipc_cltr_find(tipc_own_addr);
330 if (!c_ptr) 329 if (!c_ptr)
331 c_ptr = cluster_create(tipc_own_addr); 330 c_ptr = tipc_cltr_create(tipc_own_addr);
332 if (c_ptr) 331 if (c_ptr)
333 cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, 332 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1,
334 tipc_max_nodes); 333 tipc_max_nodes);
335 return; 334 return;
336 } 335 }
337 336
338 c_ptr = n_ptr->owner; 337 c_ptr = n_ptr->owner;
339 if (is_slave(n_ptr->addr)) { 338 if (is_slave(n_ptr->addr)) {
340 /* Usage case 2 (see above) */ 339 /* Usage case 2 (see above) */
341 cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes); 340 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
342 cluster_send_local_routes(c_ptr, n_ptr->addr); 341 tipc_cltr_send_local_routes(c_ptr, n_ptr->addr);
343 return; 342 return;
344 } 343 }
345 344
346 if (n_ptr->bclink.supported) { 345 if (n_ptr->bclink.supported) {
347 nmap_add(&cluster_bcast_nodes, n_ptr->addr); 346 tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr);
348 if (n_ptr->addr < tipc_own_addr) 347 if (n_ptr->addr < tipc_own_addr)
349 tipc_own_tag++; 348 tipc_own_tag++;
350 } 349 }
351 350
352 /* Case 3 (see above) */ 351 /* Case 3 (see above) */
353 net_send_external_routes(n_ptr->addr); 352 tipc_net_send_external_routes(n_ptr->addr);
354 cluster_send_slave_routes(c_ptr, n_ptr->addr); 353 tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr);
355 cluster_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE, 354 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
356 highest_allowed_slave); 355 tipc_highest_allowed_slave);
357} 356}
358 357
359static void node_lost_contact(struct node *n_ptr) 358static void node_lost_contact(struct node *n_ptr)
@@ -375,39 +374,39 @@ static void node_lost_contact(struct node *n_ptr)
375 n_ptr->bclink.defragm = NULL; 374 n_ptr->bclink.defragm = NULL;
376 } 375 }
377 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 376 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
378 bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); 377 tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
379 } 378 }
380 379
381 /* Update routing tables */ 380 /* Update routing tables */
382 if (is_slave(tipc_own_addr)) { 381 if (is_slave(tipc_own_addr)) {
383 net_remove_as_router(n_ptr->addr); 382 tipc_net_remove_as_router(n_ptr->addr);
384 } else { 383 } else {
385 if (!in_own_cluster(n_ptr->addr)) { 384 if (!in_own_cluster(n_ptr->addr)) {
386 /* Case 4 (see above) */ 385 /* Case 4 (see above) */
387 c_ptr = cluster_find(tipc_own_addr); 386 c_ptr = tipc_cltr_find(tipc_own_addr);
388 cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1, 387 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
389 tipc_max_nodes); 388 tipc_max_nodes);
390 } else { 389 } else {
391 /* Case 5 (see above) */ 390 /* Case 5 (see above) */
392 c_ptr = cluster_find(n_ptr->addr); 391 c_ptr = tipc_cltr_find(n_ptr->addr);
393 if (is_slave(n_ptr->addr)) { 392 if (is_slave(n_ptr->addr)) {
394 cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1, 393 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
395 tipc_max_nodes); 394 tipc_max_nodes);
396 } else { 395 } else {
397 if (n_ptr->bclink.supported) { 396 if (n_ptr->bclink.supported) {
398 nmap_remove(&cluster_bcast_nodes, 397 tipc_nmap_remove(&tipc_cltr_bcast_nodes,
399 n_ptr->addr); 398 n_ptr->addr);
400 if (n_ptr->addr < tipc_own_addr) 399 if (n_ptr->addr < tipc_own_addr)
401 tipc_own_tag--; 400 tipc_own_tag--;
402 } 401 }
403 net_remove_as_router(n_ptr->addr); 402 tipc_net_remove_as_router(n_ptr->addr);
404 cluster_bcast_lost_route(c_ptr, n_ptr->addr, 403 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr,
405 LOWEST_SLAVE, 404 LOWEST_SLAVE,
406 highest_allowed_slave); 405 tipc_highest_allowed_slave);
407 } 406 }
408 } 407 }
409 } 408 }
410 if (node_has_active_routes(n_ptr)) 409 if (tipc_node_has_active_routes(n_ptr))
411 return; 410 return;
412 411
413 info("Lost contact with %s\n", 412 info("Lost contact with %s\n",
@@ -420,35 +419,35 @@ static void node_lost_contact(struct node *n_ptr)
420 continue; 419 continue;
421 l_ptr->reset_checkpoint = l_ptr->next_in_no; 420 l_ptr->reset_checkpoint = l_ptr->next_in_no;
422 l_ptr->exp_msg_count = 0; 421 l_ptr->exp_msg_count = 0;
423 link_reset_fragments(l_ptr); 422 tipc_link_reset_fragments(l_ptr);
424 } 423 }
425 424
426 /* Notify subscribers */ 425 /* Notify subscribers */
427 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { 426 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
428 ns->node = 0; 427 ns->node = 0;
429 list_del_init(&ns->nodesub_list); 428 list_del_init(&ns->nodesub_list);
430 k_signal((Handler)ns->handle_node_down, 429 tipc_k_signal((Handler)ns->handle_node_down,
431 (unsigned long)ns->usr_handle); 430 (unsigned long)ns->usr_handle);
432 } 431 }
433} 432}
434 433
435/** 434/**
436 * node_select_next_hop - find the next-hop node for a message 435 * tipc_node_select_next_hop - find the next-hop node for a message
437 * 436 *
438 * Called by when cluster local lookup has failed. 437 * Called by when cluster local lookup has failed.
439 */ 438 */
440 439
441struct node *node_select_next_hop(u32 addr, u32 selector) 440struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
442{ 441{
443 struct node *n_ptr; 442 struct node *n_ptr;
444 u32 router_addr; 443 u32 router_addr;
445 444
446 if (!addr_domain_valid(addr)) 445 if (!tipc_addr_domain_valid(addr))
447 return 0; 446 return 0;
448 447
449 /* Look for direct link to destination processsor */ 448 /* Look for direct link to destination processsor */
450 n_ptr = node_find(addr); 449 n_ptr = tipc_node_find(addr);
451 if (n_ptr && node_has_active_links(n_ptr)) 450 if (n_ptr && tipc_node_has_active_links(n_ptr))
452 return n_ptr; 451 return n_ptr;
453 452
454 /* Cluster local system nodes *must* have direct links */ 453 /* Cluster local system nodes *must* have direct links */
@@ -456,9 +455,9 @@ struct node *node_select_next_hop(u32 addr, u32 selector)
456 return 0; 455 return 0;
457 456
458 /* Look for cluster local router with direct link to node */ 457 /* Look for cluster local router with direct link to node */
459 router_addr = node_select_router(n_ptr, selector); 458 router_addr = tipc_node_select_router(n_ptr, selector);
460 if (router_addr) 459 if (router_addr)
461 return node_select(router_addr, selector); 460 return tipc_node_select(router_addr, selector);
462 461
463 /* Slave nodes can only be accessed within own cluster via a 462 /* Slave nodes can only be accessed within own cluster via a
464 known router with direct link -- if no router was found,give up */ 463 known router with direct link -- if no router was found,give up */
@@ -467,25 +466,25 @@ struct node *node_select_next_hop(u32 addr, u32 selector)
467 466
468 /* Inter zone/cluster -- find any direct link to remote cluster */ 467 /* Inter zone/cluster -- find any direct link to remote cluster */
469 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 468 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
470 n_ptr = net_select_remote_node(addr, selector); 469 n_ptr = tipc_net_select_remote_node(addr, selector);
471 if (n_ptr && node_has_active_links(n_ptr)) 470 if (n_ptr && tipc_node_has_active_links(n_ptr))
472 return n_ptr; 471 return n_ptr;
473 472
474 /* Last resort -- look for any router to anywhere in remote zone */ 473 /* Last resort -- look for any router to anywhere in remote zone */
475 router_addr = net_select_router(addr, selector); 474 router_addr = tipc_net_select_router(addr, selector);
476 if (router_addr) 475 if (router_addr)
477 return node_select(router_addr, selector); 476 return tipc_node_select(router_addr, selector);
478 477
479 return 0; 478 return 0;
480} 479}
481 480
482/** 481/**
483 * node_select_router - select router to reach specified node 482 * tipc_node_select_router - select router to reach specified node
484 * 483 *
485 * Uses a deterministic and fair algorithm for selecting router node. 484 * Uses a deterministic and fair algorithm for selecting router node.
486 */ 485 */
487 486
488u32 node_select_router(struct node *n_ptr, u32 ref) 487u32 tipc_node_select_router(struct node *n_ptr, u32 ref)
489{ 488{
490 u32 ulim; 489 u32 ulim;
491 u32 mask; 490 u32 mask;
@@ -523,7 +522,7 @@ u32 node_select_router(struct node *n_ptr, u32 ref)
523 return tipc_addr(own_zone(), own_cluster(), r); 522 return tipc_addr(own_zone(), own_cluster(), r);
524} 523}
525 524
526void node_add_router(struct node *n_ptr, u32 router) 525void tipc_node_add_router(struct node *n_ptr, u32 router)
527{ 526{
528 u32 r_num = tipc_node(router); 527 u32 r_num = tipc_node(router);
529 528
@@ -534,7 +533,7 @@ void node_add_router(struct node *n_ptr, u32 router)
534 !n_ptr->routers[n_ptr->last_router]); 533 !n_ptr->routers[n_ptr->last_router]);
535} 534}
536 535
537void node_remove_router(struct node *n_ptr, u32 router) 536void tipc_node_remove_router(struct node *n_ptr, u32 router)
538{ 537{
539 u32 r_num = tipc_node(router); 538 u32 r_num = tipc_node(router);
540 539
@@ -547,7 +546,7 @@ void node_remove_router(struct node *n_ptr, u32 router)
547 while ((--n_ptr->last_router >= 0) && 546 while ((--n_ptr->last_router >= 0) &&
548 !n_ptr->routers[n_ptr->last_router]); 547 !n_ptr->routers[n_ptr->last_router]);
549 548
550 if (!node_is_up(n_ptr)) 549 if (!tipc_node_is_up(n_ptr))
551 node_lost_contact(n_ptr); 550 node_lost_contact(n_ptr);
552} 551}
553 552
@@ -572,16 +571,16 @@ u32 tipc_available_nodes(const u32 domain)
572 struct node *n_ptr; 571 struct node *n_ptr;
573 u32 cnt = 0; 572 u32 cnt = 0;
574 573
575 for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { 574 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
576 if (!in_scope(domain, n_ptr->addr)) 575 if (!in_scope(domain, n_ptr->addr))
577 continue; 576 continue;
578 if (node_is_up(n_ptr)) 577 if (tipc_node_is_up(n_ptr))
579 cnt++; 578 cnt++;
580 } 579 }
581 return cnt; 580 return cnt;
582} 581}
583 582
584struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space) 583struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
585{ 584{
586 u32 domain; 585 u32 domain;
587 struct sk_buff *buf; 586 struct sk_buff *buf;
@@ -589,40 +588,40 @@ struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space)
589 struct tipc_node_info node_info; 588 struct tipc_node_info node_info;
590 589
591 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 590 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
592 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 591 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
593 592
594 domain = *(u32 *)TLV_DATA(req_tlv_area); 593 domain = *(u32 *)TLV_DATA(req_tlv_area);
595 domain = ntohl(domain); 594 domain = ntohl(domain);
596 if (!addr_domain_valid(domain)) 595 if (!tipc_addr_domain_valid(domain))
597 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 596 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
598 " (network address)"); 597 " (network address)");
599 598
600 if (!nodes) 599 if (!tipc_nodes)
601 return cfg_reply_none(); 600 return tipc_cfg_reply_none();
602 601
603 /* For now, get space for all other nodes 602 /* For now, get space for all other nodes
604 (will need to modify this when slave nodes are supported */ 603 (will need to modify this when slave nodes are supported */
605 604
606 buf = cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) * 605 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) *
607 (tipc_max_nodes - 1)); 606 (tipc_max_nodes - 1));
608 if (!buf) 607 if (!buf)
609 return NULL; 608 return NULL;
610 609
611 /* Add TLVs for all nodes in scope */ 610 /* Add TLVs for all nodes in scope */
612 611
613 for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { 612 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
614 if (!in_scope(domain, n_ptr->addr)) 613 if (!in_scope(domain, n_ptr->addr))
615 continue; 614 continue;
616 node_info.addr = htonl(n_ptr->addr); 615 node_info.addr = htonl(n_ptr->addr);
617 node_info.up = htonl(node_is_up(n_ptr)); 616 node_info.up = htonl(tipc_node_is_up(n_ptr));
618 cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 617 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
619 &node_info, sizeof(node_info)); 618 &node_info, sizeof(node_info));
620 } 619 }
621 620
622 return buf; 621 return buf;
623} 622}
624 623
625struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space) 624struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
626{ 625{
627 u32 domain; 626 u32 domain;
628 struct sk_buff *buf; 627 struct sk_buff *buf;
@@ -630,22 +629,22 @@ struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
630 struct tipc_link_info link_info; 629 struct tipc_link_info link_info;
631 630
632 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 631 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
633 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 632 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
634 633
635 domain = *(u32 *)TLV_DATA(req_tlv_area); 634 domain = *(u32 *)TLV_DATA(req_tlv_area);
636 domain = ntohl(domain); 635 domain = ntohl(domain);
637 if (!addr_domain_valid(domain)) 636 if (!tipc_addr_domain_valid(domain))
638 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 637 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
639 " (network address)"); 638 " (network address)");
640 639
641 if (!nodes) 640 if (!tipc_nodes)
642 return cfg_reply_none(); 641 return tipc_cfg_reply_none();
643 642
644 /* For now, get space for 2 links to all other nodes + bcast link 643 /* For now, get space for 2 links to all other nodes + bcast link
645 (will need to modify this when slave nodes are supported */ 644 (will need to modify this when slave nodes are supported */
646 645
647 buf = cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) * 646 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) *
648 (2 * (tipc_max_nodes - 1) + 1)); 647 (2 * (tipc_max_nodes - 1) + 1));
649 if (!buf) 648 if (!buf)
650 return NULL; 649 return NULL;
651 650
@@ -654,12 +653,12 @@ struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
654 link_info.dest = tipc_own_addr & 0xfffff00; 653 link_info.dest = tipc_own_addr & 0xfffff00;
655 link_info.dest = htonl(link_info.dest); 654 link_info.dest = htonl(link_info.dest);
656 link_info.up = htonl(1); 655 link_info.up = htonl(1);
657 sprintf(link_info.str, bc_link_name); 656 sprintf(link_info.str, tipc_bclink_name);
658 cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 657 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
659 658
660 /* Add TLVs for any other links in scope */ 659 /* Add TLVs for any other links in scope */
661 660
662 for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { 661 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
663 u32 i; 662 u32 i;
664 663
665 if (!in_scope(domain, n_ptr->addr)) 664 if (!in_scope(domain, n_ptr->addr))
@@ -668,10 +667,10 @@ struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
668 if (!n_ptr->links[i]) 667 if (!n_ptr->links[i])
669 continue; 668 continue;
670 link_info.dest = htonl(n_ptr->addr); 669 link_info.dest = htonl(n_ptr->addr);
671 link_info.up = htonl(link_is_up(n_ptr->links[i])); 670 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
672 strcpy(link_info.str, n_ptr->links[i]->name); 671 strcpy(link_info.str, n_ptr->links[i]->name);
673 cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 672 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
674 &link_info, sizeof(link_info)); 673 &link_info, sizeof(link_info));
675 } 674 }
676 } 675 }
677 676
diff --git a/net/tipc/node.h b/net/tipc/node.h
index b39442badcc..29f7ae6992d 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -92,31 +92,31 @@ struct node {
92 } bclink; 92 } bclink;
93}; 93};
94 94
95extern struct node *nodes; 95extern struct node *tipc_nodes;
96extern u32 tipc_own_tag; 96extern u32 tipc_own_tag;
97 97
98struct node *node_create(u32 addr); 98struct node *tipc_node_create(u32 addr);
99void node_delete(struct node *n_ptr); 99void tipc_node_delete(struct node *n_ptr);
100struct node *node_attach_link(struct link *l_ptr); 100struct node *tipc_node_attach_link(struct link *l_ptr);
101void node_detach_link(struct node *n_ptr, struct link *l_ptr); 101void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr);
102void node_link_down(struct node *n_ptr, struct link *l_ptr); 102void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr);
103void node_link_up(struct node *n_ptr, struct link *l_ptr); 103void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr);
104int node_has_active_links(struct node *n_ptr); 104int tipc_node_has_active_links(struct node *n_ptr);
105int node_has_redundant_links(struct node *n_ptr); 105int tipc_node_has_redundant_links(struct node *n_ptr);
106u32 node_select_router(struct node *n_ptr, u32 ref); 106u32 tipc_node_select_router(struct node *n_ptr, u32 ref);
107struct node *node_select_next_hop(u32 addr, u32 selector); 107struct node *tipc_node_select_next_hop(u32 addr, u32 selector);
108int node_is_up(struct node *n_ptr); 108int tipc_node_is_up(struct node *n_ptr);
109void node_add_router(struct node *n_ptr, u32 router); 109void tipc_node_add_router(struct node *n_ptr, u32 router);
110void node_remove_router(struct node *n_ptr, u32 router); 110void tipc_node_remove_router(struct node *n_ptr, u32 router);
111struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space); 111struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
112struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space); 112struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
113 113
114static inline struct node *node_find(u32 addr) 114static inline struct node *tipc_node_find(u32 addr)
115{ 115{
116 if (likely(in_own_cluster(addr))) 116 if (likely(in_own_cluster(addr)))
117 return local_nodes[tipc_node(addr)]; 117 return tipc_local_nodes[tipc_node(addr)];
118 else if (addr_domain_valid(addr)) { 118 else if (tipc_addr_domain_valid(addr)) {
119 struct cluster *c_ptr = cluster_find(addr); 119 struct cluster *c_ptr = tipc_cltr_find(addr);
120 120
121 if (c_ptr) 121 if (c_ptr)
122 return c_ptr->nodes[tipc_node(addr)]; 122 return c_ptr->nodes[tipc_node(addr)];
@@ -124,19 +124,19 @@ static inline struct node *node_find(u32 addr)
124 return 0; 124 return 0;
125} 125}
126 126
127static inline struct node *node_select(u32 addr, u32 selector) 127static inline struct node *tipc_node_select(u32 addr, u32 selector)
128{ 128{
129 if (likely(in_own_cluster(addr))) 129 if (likely(in_own_cluster(addr)))
130 return local_nodes[tipc_node(addr)]; 130 return tipc_local_nodes[tipc_node(addr)];
131 return node_select_next_hop(addr, selector); 131 return tipc_node_select_next_hop(addr, selector);
132} 132}
133 133
134static inline void node_lock(struct node *n_ptr) 134static inline void tipc_node_lock(struct node *n_ptr)
135{ 135{
136 spin_lock_bh(&n_ptr->lock); 136 spin_lock_bh(&n_ptr->lock);
137} 137}
138 138
139static inline void node_unlock(struct node *n_ptr) 139static inline void tipc_node_unlock(struct node *n_ptr)
140{ 140{
141 spin_unlock_bh(&n_ptr->lock); 141 spin_unlock_bh(&n_ptr->lock);
142} 142}
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 79375927916..afeea121d8b 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -41,39 +41,39 @@
41#include "addr.h" 41#include "addr.h"
42 42
43/** 43/**
44 * nodesub_subscribe - create "node down" subscription for specified node 44 * tipc_nodesub_subscribe - create "node down" subscription for specified node
45 */ 45 */
46 46
47void nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 47void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
48 void *usr_handle, net_ev_handler handle_down) 48 void *usr_handle, net_ev_handler handle_down)
49{ 49{
50 node_sub->node = 0; 50 node_sub->node = 0;
51 if (addr == tipc_own_addr) 51 if (addr == tipc_own_addr)
52 return; 52 return;
53 if (!addr_node_valid(addr)) { 53 if (!tipc_addr_node_valid(addr)) {
54 warn("node_subscr with illegal %x\n", addr); 54 warn("node_subscr with illegal %x\n", addr);
55 return; 55 return;
56 } 56 }
57 57
58 node_sub->handle_node_down = handle_down; 58 node_sub->handle_node_down = handle_down;
59 node_sub->usr_handle = usr_handle; 59 node_sub->usr_handle = usr_handle;
60 node_sub->node = node_find(addr); 60 node_sub->node = tipc_node_find(addr);
61 assert(node_sub->node); 61 assert(node_sub->node);
62 node_lock(node_sub->node); 62 tipc_node_lock(node_sub->node);
63 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub); 63 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
64 node_unlock(node_sub->node); 64 tipc_node_unlock(node_sub->node);
65} 65}
66 66
67/** 67/**
68 * nodesub_unsubscribe - cancel "node down" subscription (if any) 68 * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
69 */ 69 */
70 70
71void nodesub_unsubscribe(struct node_subscr *node_sub) 71void tipc_nodesub_unsubscribe(struct node_subscr *node_sub)
72{ 72{
73 if (!node_sub->node) 73 if (!node_sub->node)
74 return; 74 return;
75 75
76 node_lock(node_sub->node); 76 tipc_node_lock(node_sub->node);
77 list_del_init(&node_sub->nodesub_list); 77 list_del_init(&node_sub->nodesub_list);
78 node_unlock(node_sub->node); 78 tipc_node_unlock(node_sub->node);
79} 79}
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index a3b87ac4859..01751c4fbb4 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -56,8 +56,8 @@ struct node_subscr {
56 struct list_head nodesub_list; 56 struct list_head nodesub_list;
57}; 57};
58 58
59void nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 59void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
60 void *usr_handle, net_ev_handler handle_down); 60 void *usr_handle, net_ev_handler handle_down);
61void nodesub_unsubscribe(struct node_subscr *node_sub); 61void tipc_nodesub_unsubscribe(struct node_subscr *node_sub);
62 62
63#endif 63#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 66caca7abe9..72aae52bfec 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -57,10 +57,10 @@
57static struct sk_buff *msg_queue_head = 0; 57static struct sk_buff *msg_queue_head = 0;
58static struct sk_buff *msg_queue_tail = 0; 58static struct sk_buff *msg_queue_tail = 0;
59 59
60spinlock_t port_list_lock = SPIN_LOCK_UNLOCKED; 60spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED;
61static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED; 61static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
62 62
63LIST_HEAD(ports); 63static LIST_HEAD(ports);
64static void port_handle_node_down(unsigned long ref); 64static void port_handle_node_down(unsigned long ref);
65static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err); 65static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
66static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err); 66static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
@@ -107,7 +107,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
107 struct sk_buff *buf; 107 struct sk_buff *buf;
108 struct sk_buff *ibuf = NULL; 108 struct sk_buff *ibuf = NULL;
109 struct port_list dports = {0, NULL, }; 109 struct port_list dports = {0, NULL, };
110 struct port *oport = port_deref(ref); 110 struct port *oport = tipc_port_deref(ref);
111 int ext_targets; 111 int ext_targets;
112 int res; 112 int res;
113 113
@@ -129,8 +129,8 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
129 129
130 /* Figure out where to send multicast message */ 130 /* Figure out where to send multicast message */
131 131
132 ext_targets = nametbl_mc_translate(seq->type, seq->lower, seq->upper, 132 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
133 TIPC_NODE_SCOPE, &dports); 133 TIPC_NODE_SCOPE, &dports);
134 134
135 /* Send message to destinations (duplicate it only if necessary) */ 135 /* Send message to destinations (duplicate it only if necessary) */
136 136
@@ -138,12 +138,12 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
138 if (dports.count != 0) { 138 if (dports.count != 0) {
139 ibuf = skb_copy(buf, GFP_ATOMIC); 139 ibuf = skb_copy(buf, GFP_ATOMIC);
140 if (ibuf == NULL) { 140 if (ibuf == NULL) {
141 port_list_free(&dports); 141 tipc_port_list_free(&dports);
142 buf_discard(buf); 142 buf_discard(buf);
143 return -ENOMEM; 143 return -ENOMEM;
144 } 144 }
145 } 145 }
146 res = bclink_send_msg(buf); 146 res = tipc_bclink_send_msg(buf);
147 if ((res < 0) && (dports.count != 0)) { 147 if ((res < 0) && (dports.count != 0)) {
148 buf_discard(ibuf); 148 buf_discard(ibuf);
149 } 149 }
@@ -153,20 +153,20 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
153 153
154 if (res >= 0) { 154 if (res >= 0) {
155 if (ibuf) 155 if (ibuf)
156 port_recv_mcast(ibuf, &dports); 156 tipc_port_recv_mcast(ibuf, &dports);
157 } else { 157 } else {
158 port_list_free(&dports); 158 tipc_port_list_free(&dports);
159 } 159 }
160 return res; 160 return res;
161} 161}
162 162
163/** 163/**
164 * port_recv_mcast - deliver multicast message to all destination ports 164 * tipc_port_recv_mcast - deliver multicast message to all destination ports
165 * 165 *
166 * If there is no port list, perform a lookup to create one 166 * If there is no port list, perform a lookup to create one
167 */ 167 */
168 168
169void port_recv_mcast(struct sk_buff *buf, struct port_list *dp) 169void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
170{ 170{
171 struct tipc_msg* msg; 171 struct tipc_msg* msg;
172 struct port_list dports = {0, NULL, }; 172 struct port_list dports = {0, NULL, };
@@ -179,7 +179,7 @@ void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
179 /* Create destination port list, if one wasn't supplied */ 179 /* Create destination port list, if one wasn't supplied */
180 180
181 if (dp == NULL) { 181 if (dp == NULL) {
182 nametbl_mc_translate(msg_nametype(msg), 182 tipc_nametbl_mc_translate(msg_nametype(msg),
183 msg_namelower(msg), 183 msg_namelower(msg),
184 msg_nameupper(msg), 184 msg_nameupper(msg),
185 TIPC_CLUSTER_SCOPE, 185 TIPC_CLUSTER_SCOPE,
@@ -192,8 +192,8 @@ void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
192 if (dp->count != 0) { 192 if (dp->count != 0) {
193 if (dp->count == 1) { 193 if (dp->count == 1) {
194 msg_set_destport(msg, dp->ports[0]); 194 msg_set_destport(msg, dp->ports[0]);
195 port_recv_msg(buf); 195 tipc_port_recv_msg(buf);
196 port_list_free(dp); 196 tipc_port_list_free(dp);
197 return; 197 return;
198 } 198 }
199 for (; cnt < dp->count; cnt++) { 199 for (; cnt < dp->count; cnt++) {
@@ -209,12 +209,12 @@ void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
209 item = item->next; 209 item = item->next;
210 } 210 }
211 msg_set_destport(buf_msg(b),item->ports[index]); 211 msg_set_destport(buf_msg(b),item->ports[index]);
212 port_recv_msg(b); 212 tipc_port_recv_msg(b);
213 } 213 }
214 } 214 }
215exit: 215exit:
216 buf_discard(buf); 216 buf_discard(buf);
217 port_list_free(dp); 217 tipc_port_list_free(dp);
218} 218}
219 219
220/** 220/**
@@ -238,14 +238,14 @@ u32 tipc_createport_raw(void *usr_handle,
238 return 0; 238 return 0;
239 } 239 }
240 memset(p_ptr, 0, sizeof(*p_ptr)); 240 memset(p_ptr, 0, sizeof(*p_ptr));
241 ref = ref_acquire(p_ptr, &p_ptr->publ.lock); 241 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
242 if (!ref) { 242 if (!ref) {
243 warn("Reference Table Exhausted\n"); 243 warn("Reference Table Exhausted\n");
244 kfree(p_ptr); 244 kfree(p_ptr);
245 return 0; 245 return 0;
246 } 246 }
247 247
248 port_lock(ref); 248 tipc_port_lock(ref);
249 p_ptr->publ.ref = ref; 249 p_ptr->publ.ref = ref;
250 msg = &p_ptr->publ.phdr; 250 msg = &p_ptr->publ.phdr;
251 msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0); 251 msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
@@ -264,12 +264,12 @@ u32 tipc_createport_raw(void *usr_handle,
264 p_ptr->wakeup = wakeup; 264 p_ptr->wakeup = wakeup;
265 p_ptr->user_port = 0; 265 p_ptr->user_port = 0;
266 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); 266 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
267 spin_lock_bh(&port_list_lock); 267 spin_lock_bh(&tipc_port_list_lock);
268 INIT_LIST_HEAD(&p_ptr->publications); 268 INIT_LIST_HEAD(&p_ptr->publications);
269 INIT_LIST_HEAD(&p_ptr->port_list); 269 INIT_LIST_HEAD(&p_ptr->port_list);
270 list_add_tail(&p_ptr->port_list, &ports); 270 list_add_tail(&p_ptr->port_list, &ports);
271 spin_unlock_bh(&port_list_lock); 271 spin_unlock_bh(&tipc_port_list_lock);
272 port_unlock(p_ptr); 272 tipc_port_unlock(p_ptr);
273 return ref; 273 return ref;
274} 274}
275 275
@@ -279,31 +279,31 @@ int tipc_deleteport(u32 ref)
279 struct sk_buff *buf = 0; 279 struct sk_buff *buf = 0;
280 280
281 tipc_withdraw(ref, 0, 0); 281 tipc_withdraw(ref, 0, 0);
282 p_ptr = port_lock(ref); 282 p_ptr = tipc_port_lock(ref);
283 if (!p_ptr) 283 if (!p_ptr)
284 return -EINVAL; 284 return -EINVAL;
285 285
286 ref_discard(ref); 286 tipc_ref_discard(ref);
287 port_unlock(p_ptr); 287 tipc_port_unlock(p_ptr);
288 288
289 k_cancel_timer(&p_ptr->timer); 289 k_cancel_timer(&p_ptr->timer);
290 if (p_ptr->publ.connected) { 290 if (p_ptr->publ.connected) {
291 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT); 291 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
292 nodesub_unsubscribe(&p_ptr->subscription); 292 tipc_nodesub_unsubscribe(&p_ptr->subscription);
293 } 293 }
294 if (p_ptr->user_port) { 294 if (p_ptr->user_port) {
295 reg_remove_port(p_ptr->user_port); 295 tipc_reg_remove_port(p_ptr->user_port);
296 kfree(p_ptr->user_port); 296 kfree(p_ptr->user_port);
297 } 297 }
298 298
299 spin_lock_bh(&port_list_lock); 299 spin_lock_bh(&tipc_port_list_lock);
300 list_del(&p_ptr->port_list); 300 list_del(&p_ptr->port_list);
301 list_del(&p_ptr->wait_list); 301 list_del(&p_ptr->wait_list);
302 spin_unlock_bh(&port_list_lock); 302 spin_unlock_bh(&tipc_port_list_lock);
303 k_term_timer(&p_ptr->timer); 303 k_term_timer(&p_ptr->timer);
304 kfree(p_ptr); 304 kfree(p_ptr);
305 dbg("Deleted port %u\n", ref); 305 dbg("Deleted port %u\n", ref);
306 net_route_msg(buf); 306 tipc_net_route_msg(buf);
307 return TIPC_OK; 307 return TIPC_OK;
308} 308}
309 309
@@ -315,7 +315,7 @@ int tipc_deleteport(u32 ref)
315 315
316struct tipc_port *tipc_get_port(const u32 ref) 316struct tipc_port *tipc_get_port(const u32 ref)
317{ 317{
318 return (struct tipc_port *)ref_deref(ref); 318 return (struct tipc_port *)tipc_ref_deref(ref);
319} 319}
320 320
321/** 321/**
@@ -327,11 +327,11 @@ void *tipc_get_handle(const u32 ref)
327 struct port *p_ptr; 327 struct port *p_ptr;
328 void * handle; 328 void * handle;
329 329
330 p_ptr = port_lock(ref); 330 p_ptr = tipc_port_lock(ref);
331 if (!p_ptr) 331 if (!p_ptr)
332 return 0; 332 return 0;
333 handle = p_ptr->publ.usr_handle; 333 handle = p_ptr->publ.usr_handle;
334 port_unlock(p_ptr); 334 tipc_port_unlock(p_ptr);
335 return handle; 335 return handle;
336} 336}
337 337
@@ -344,7 +344,7 @@ int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
344{ 344{
345 struct port *p_ptr; 345 struct port *p_ptr;
346 346
347 p_ptr = port_lock(ref); 347 p_ptr = tipc_port_lock(ref);
348 if (!p_ptr) 348 if (!p_ptr)
349 return -EINVAL; 349 return -EINVAL;
350 *isunreliable = port_unreliable(p_ptr); 350 *isunreliable = port_unreliable(p_ptr);
@@ -356,11 +356,11 @@ int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
356{ 356{
357 struct port *p_ptr; 357 struct port *p_ptr;
358 358
359 p_ptr = port_lock(ref); 359 p_ptr = tipc_port_lock(ref);
360 if (!p_ptr) 360 if (!p_ptr)
361 return -EINVAL; 361 return -EINVAL;
362 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0)); 362 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
363 port_unlock(p_ptr); 363 tipc_port_unlock(p_ptr);
364 return TIPC_OK; 364 return TIPC_OK;
365} 365}
366 366
@@ -373,7 +373,7 @@ int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
373{ 373{
374 struct port *p_ptr; 374 struct port *p_ptr;
375 375
376 p_ptr = port_lock(ref); 376 p_ptr = tipc_port_lock(ref);
377 if (!p_ptr) 377 if (!p_ptr)
378 return -EINVAL; 378 return -EINVAL;
379 *isunrejectable = port_unreturnable(p_ptr); 379 *isunrejectable = port_unreturnable(p_ptr);
@@ -385,11 +385,11 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
385{ 385{
386 struct port *p_ptr; 386 struct port *p_ptr;
387 387
388 p_ptr = port_lock(ref); 388 p_ptr = tipc_port_lock(ref);
389 if (!p_ptr) 389 if (!p_ptr)
390 return -EINVAL; 390 return -EINVAL;
391 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0)); 391 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
392 port_unlock(p_ptr); 392 tipc_port_unlock(p_ptr);
393 return TIPC_OK; 393 return TIPC_OK;
394} 394}
395 395
@@ -476,25 +476,25 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
476 /* send self-abort message when rejecting on a connected port */ 476 /* send self-abort message when rejecting on a connected port */
477 if (msg_connected(msg)) { 477 if (msg_connected(msg)) {
478 struct sk_buff *abuf = 0; 478 struct sk_buff *abuf = 0;
479 struct port *p_ptr = port_lock(msg_destport(msg)); 479 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
480 480
481 if (p_ptr) { 481 if (p_ptr) {
482 if (p_ptr->publ.connected) 482 if (p_ptr->publ.connected)
483 abuf = port_build_self_abort_msg(p_ptr, err); 483 abuf = port_build_self_abort_msg(p_ptr, err);
484 port_unlock(p_ptr); 484 tipc_port_unlock(p_ptr);
485 } 485 }
486 net_route_msg(abuf); 486 tipc_net_route_msg(abuf);
487 } 487 }
488 488
489 /* send rejected message */ 489 /* send rejected message */
490 buf_discard(buf); 490 buf_discard(buf);
491 net_route_msg(rbuf); 491 tipc_net_route_msg(rbuf);
492 return data_sz; 492 return data_sz;
493} 493}
494 494
495int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr, 495int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
496 struct iovec const *msg_sect, u32 num_sect, 496 struct iovec const *msg_sect, u32 num_sect,
497 int err) 497 int err)
498{ 498{
499 struct sk_buff *buf; 499 struct sk_buff *buf;
500 int res; 500 int res;
@@ -509,7 +509,7 @@ int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
509 509
510static void port_timeout(unsigned long ref) 510static void port_timeout(unsigned long ref)
511{ 511{
512 struct port *p_ptr = port_lock(ref); 512 struct port *p_ptr = tipc_port_lock(ref);
513 struct sk_buff *buf = 0; 513 struct sk_buff *buf = 0;
514 514
515 if (!p_ptr || !p_ptr->publ.connected) 515 if (!p_ptr || !p_ptr->publ.connected)
@@ -532,21 +532,21 @@ static void port_timeout(unsigned long ref)
532 p_ptr->probing_state = PROBING; 532 p_ptr->probing_state = PROBING;
533 k_start_timer(&p_ptr->timer, p_ptr->probing_interval); 533 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
534 } 534 }
535 port_unlock(p_ptr); 535 tipc_port_unlock(p_ptr);
536 net_route_msg(buf); 536 tipc_net_route_msg(buf);
537} 537}
538 538
539 539
540static void port_handle_node_down(unsigned long ref) 540static void port_handle_node_down(unsigned long ref)
541{ 541{
542 struct port *p_ptr = port_lock(ref); 542 struct port *p_ptr = tipc_port_lock(ref);
543 struct sk_buff* buf = 0; 543 struct sk_buff* buf = 0;
544 544
545 if (!p_ptr) 545 if (!p_ptr)
546 return; 546 return;
547 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE); 547 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
548 port_unlock(p_ptr); 548 tipc_port_unlock(p_ptr);
549 net_route_msg(buf); 549 tipc_net_route_msg(buf);
550} 550}
551 551
552 552
@@ -589,10 +589,10 @@ static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
589 0); 589 0);
590} 590}
591 591
592void port_recv_proto_msg(struct sk_buff *buf) 592void tipc_port_recv_proto_msg(struct sk_buff *buf)
593{ 593{
594 struct tipc_msg *msg = buf_msg(buf); 594 struct tipc_msg *msg = buf_msg(buf);
595 struct port *p_ptr = port_lock(msg_destport(msg)); 595 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
596 u32 err = TIPC_OK; 596 u32 err = TIPC_OK;
597 struct sk_buff *r_buf = 0; 597 struct sk_buff *r_buf = 0;
598 struct sk_buff *abort_buf = 0; 598 struct sk_buff *abort_buf = 0;
@@ -615,11 +615,11 @@ void port_recv_proto_msg(struct sk_buff *buf)
615 } 615 }
616 } 616 }
617 if (msg_type(msg) == CONN_ACK) { 617 if (msg_type(msg) == CONN_ACK) {
618 int wakeup = port_congested(p_ptr) && 618 int wakeup = tipc_port_congested(p_ptr) &&
619 p_ptr->publ.congested && 619 p_ptr->publ.congested &&
620 p_ptr->wakeup; 620 p_ptr->wakeup;
621 p_ptr->acked += msg_msgcnt(msg); 621 p_ptr->acked += msg_msgcnt(msg);
622 if (port_congested(p_ptr)) 622 if (tipc_port_congested(p_ptr))
623 goto exit; 623 goto exit;
624 p_ptr->publ.congested = 0; 624 p_ptr->publ.congested = 0;
625 if (!wakeup) 625 if (!wakeup)
@@ -659,9 +659,9 @@ void port_recv_proto_msg(struct sk_buff *buf)
659 port_incr_out_seqno(p_ptr); 659 port_incr_out_seqno(p_ptr);
660exit: 660exit:
661 if (p_ptr) 661 if (p_ptr)
662 port_unlock(p_ptr); 662 tipc_port_unlock(p_ptr);
663 net_route_msg(r_buf); 663 tipc_net_route_msg(r_buf);
664 net_route_msg(abort_buf); 664 tipc_net_route_msg(abort_buf);
665 buf_discard(buf); 665 buf_discard(buf);
666} 666}
667 667
@@ -704,7 +704,7 @@ static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
704 704
705#define MAX_PORT_QUERY 32768 705#define MAX_PORT_QUERY 32768
706 706
707struct sk_buff *port_get_ports(void) 707struct sk_buff *tipc_port_get_ports(void)
708{ 708{
709 struct sk_buff *buf; 709 struct sk_buff *buf;
710 struct tlv_desc *rep_tlv; 710 struct tlv_desc *rep_tlv;
@@ -712,20 +712,20 @@ struct sk_buff *port_get_ports(void)
712 struct port *p_ptr; 712 struct port *p_ptr;
713 int str_len; 713 int str_len;
714 714
715 buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY)); 715 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
716 if (!buf) 716 if (!buf)
717 return NULL; 717 return NULL;
718 rep_tlv = (struct tlv_desc *)buf->data; 718 rep_tlv = (struct tlv_desc *)buf->data;
719 719
720 printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY); 720 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
721 spin_lock_bh(&port_list_lock); 721 spin_lock_bh(&tipc_port_list_lock);
722 list_for_each_entry(p_ptr, &ports, port_list) { 722 list_for_each_entry(p_ptr, &ports, port_list) {
723 spin_lock_bh(p_ptr->publ.lock); 723 spin_lock_bh(p_ptr->publ.lock);
724 port_print(p_ptr, &pb, 0); 724 port_print(p_ptr, &pb, 0);
725 spin_unlock_bh(p_ptr->publ.lock); 725 spin_unlock_bh(p_ptr->publ.lock);
726 } 726 }
727 spin_unlock_bh(&port_list_lock); 727 spin_unlock_bh(&tipc_port_list_lock);
728 str_len = printbuf_validate(&pb); 728 str_len = tipc_printbuf_validate(&pb);
729 729
730 skb_put(buf, TLV_SPACE(str_len)); 730 skb_put(buf, TLV_SPACE(str_len));
731 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 731 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -752,22 +752,22 @@ struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
752 ref = *(u32 *)TLV_DATA(req_tlv_area); 752 ref = *(u32 *)TLV_DATA(req_tlv_area);
753 ref = ntohl(ref); 753 ref = ntohl(ref);
754 754
755 p_ptr = port_lock(ref); 755 p_ptr = tipc_port_lock(ref);
756 if (!p_ptr) 756 if (!p_ptr)
757 return cfg_reply_error_string("port not found"); 757 return cfg_reply_error_string("port not found");
758 758
759 buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS)); 759 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
760 if (!buf) { 760 if (!buf) {
761 port_unlock(p_ptr); 761 tipc_port_unlock(p_ptr);
762 return NULL; 762 return NULL;
763 } 763 }
764 rep_tlv = (struct tlv_desc *)buf->data; 764 rep_tlv = (struct tlv_desc *)buf->data;
765 765
766 printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS); 766 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
767 port_print(p_ptr, &pb, 1); 767 port_print(p_ptr, &pb, 1);
768 /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */ 768 /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
769 port_unlock(p_ptr); 769 tipc_port_unlock(p_ptr);
770 str_len = printbuf_validate(&pb); 770 str_len = tipc_printbuf_validate(&pb);
771 771
772 skb_put(buf, TLV_SPACE(str_len)); 772 skb_put(buf, TLV_SPACE(str_len));
773 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 773 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -777,19 +777,19 @@ struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
777 777
778#endif 778#endif
779 779
780void port_reinit(void) 780void tipc_port_reinit(void)
781{ 781{
782 struct port *p_ptr; 782 struct port *p_ptr;
783 struct tipc_msg *msg; 783 struct tipc_msg *msg;
784 784
785 spin_lock_bh(&port_list_lock); 785 spin_lock_bh(&tipc_port_list_lock);
786 list_for_each_entry(p_ptr, &ports, port_list) { 786 list_for_each_entry(p_ptr, &ports, port_list) {
787 msg = &p_ptr->publ.phdr; 787 msg = &p_ptr->publ.phdr;
788 if (msg_orignode(msg) == tipc_own_addr) 788 if (msg_orignode(msg) == tipc_own_addr)
789 break; 789 break;
790 msg_set_orignode(msg, tipc_own_addr); 790 msg_set_orignode(msg, tipc_own_addr);
791 } 791 }
792 spin_unlock_bh(&port_list_lock); 792 spin_unlock_bh(&tipc_port_list_lock);
793} 793}
794 794
795 795
@@ -820,7 +820,7 @@ static void port_dispatcher_sigh(void *dummy)
820 struct tipc_msg *msg = buf_msg(buf); 820 struct tipc_msg *msg = buf_msg(buf);
821 u32 dref = msg_destport(msg); 821 u32 dref = msg_destport(msg);
822 822
823 p_ptr = port_lock(dref); 823 p_ptr = tipc_port_lock(dref);
824 if (!p_ptr) { 824 if (!p_ptr) {
825 /* Port deleted while msg in queue */ 825 /* Port deleted while msg in queue */
826 tipc_reject_msg(buf, TIPC_ERR_NO_PORT); 826 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
@@ -976,7 +976,7 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
976 msg_queue_tail = buf; 976 msg_queue_tail = buf;
977 } else { 977 } else {
978 msg_queue_tail = msg_queue_head = buf; 978 msg_queue_tail = msg_queue_head = buf;
979 k_signal((Handler)port_dispatcher_sigh, 0); 979 tipc_k_signal((Handler)port_dispatcher_sigh, 0);
980 } 980 }
981 spin_unlock_bh(&queue_lock); 981 spin_unlock_bh(&queue_lock);
982 return TIPC_OK; 982 return TIPC_OK;
@@ -994,14 +994,14 @@ static void port_wakeup_sh(unsigned long ref)
994 tipc_continue_event cb = 0; 994 tipc_continue_event cb = 0;
995 void *uh = 0; 995 void *uh = 0;
996 996
997 p_ptr = port_lock(ref); 997 p_ptr = tipc_port_lock(ref);
998 if (p_ptr) { 998 if (p_ptr) {
999 up_ptr = p_ptr->user_port; 999 up_ptr = p_ptr->user_port;
1000 if (up_ptr) { 1000 if (up_ptr) {
1001 cb = up_ptr->continue_event_cb; 1001 cb = up_ptr->continue_event_cb;
1002 uh = up_ptr->usr_handle; 1002 uh = up_ptr->usr_handle;
1003 } 1003 }
1004 port_unlock(p_ptr); 1004 tipc_port_unlock(p_ptr);
1005 } 1005 }
1006 if (cb) 1006 if (cb)
1007 cb(uh, ref); 1007 cb(uh, ref);
@@ -1010,7 +1010,7 @@ static void port_wakeup_sh(unsigned long ref)
1010 1010
1011static void port_wakeup(struct tipc_port *p_ptr) 1011static void port_wakeup(struct tipc_port *p_ptr)
1012{ 1012{
1013 k_signal((Handler)port_wakeup_sh, p_ptr->ref); 1013 tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref);
1014} 1014}
1015 1015
1016void tipc_acknowledge(u32 ref, u32 ack) 1016void tipc_acknowledge(u32 ref, u32 ack)
@@ -1018,7 +1018,7 @@ void tipc_acknowledge(u32 ref, u32 ack)
1018 struct port *p_ptr; 1018 struct port *p_ptr;
1019 struct sk_buff *buf = 0; 1019 struct sk_buff *buf = 0;
1020 1020
1021 p_ptr = port_lock(ref); 1021 p_ptr = tipc_port_lock(ref);
1022 if (!p_ptr) 1022 if (!p_ptr)
1023 return; 1023 return;
1024 if (p_ptr->publ.connected) { 1024 if (p_ptr->publ.connected) {
@@ -1033,8 +1033,8 @@ void tipc_acknowledge(u32 ref, u32 ack)
1033 port_out_seqno(p_ptr), 1033 port_out_seqno(p_ptr),
1034 ack); 1034 ack);
1035 } 1035 }
1036 port_unlock(p_ptr); 1036 tipc_port_unlock(p_ptr);
1037 net_route_msg(buf); 1037 tipc_net_route_msg(buf);
1038} 1038}
1039 1039
1040/* 1040/*
@@ -1063,7 +1063,7 @@ int tipc_createport(u32 user_ref,
1063 return -ENOMEM; 1063 return -ENOMEM;
1064 } 1064 }
1065 ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance); 1065 ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance);
1066 p_ptr = port_lock(ref); 1066 p_ptr = tipc_port_lock(ref);
1067 if (!p_ptr) { 1067 if (!p_ptr) {
1068 kfree(up_ptr); 1068 kfree(up_ptr);
1069 return -ENOMEM; 1069 return -ENOMEM;
@@ -1081,10 +1081,10 @@ int tipc_createport(u32 user_ref,
1081 up_ptr->conn_msg_cb = conn_msg_cb; 1081 up_ptr->conn_msg_cb = conn_msg_cb;
1082 up_ptr->continue_event_cb = continue_event_cb; 1082 up_ptr->continue_event_cb = continue_event_cb;
1083 INIT_LIST_HEAD(&up_ptr->uport_list); 1083 INIT_LIST_HEAD(&up_ptr->uport_list);
1084 reg_add_port(up_ptr); 1084 tipc_reg_add_port(up_ptr);
1085 *portref = p_ptr->publ.ref; 1085 *portref = p_ptr->publ.ref;
1086 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref); 1086 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
1087 port_unlock(p_ptr); 1087 tipc_port_unlock(p_ptr);
1088 return TIPC_OK; 1088 return TIPC_OK;
1089} 1089}
1090 1090
@@ -1099,7 +1099,7 @@ int tipc_portimportance(u32 ref, unsigned int *importance)
1099{ 1099{
1100 struct port *p_ptr; 1100 struct port *p_ptr;
1101 1101
1102 p_ptr = port_lock(ref); 1102 p_ptr = tipc_port_lock(ref);
1103 if (!p_ptr) 1103 if (!p_ptr)
1104 return -EINVAL; 1104 return -EINVAL;
1105 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr); 1105 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
@@ -1114,7 +1114,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
1114 if (imp > TIPC_CRITICAL_IMPORTANCE) 1114 if (imp > TIPC_CRITICAL_IMPORTANCE)
1115 return -EINVAL; 1115 return -EINVAL;
1116 1116
1117 p_ptr = port_lock(ref); 1117 p_ptr = tipc_port_lock(ref);
1118 if (!p_ptr) 1118 if (!p_ptr)
1119 return -EINVAL; 1119 return -EINVAL;
1120 msg_set_importance(&p_ptr->publ.phdr, (u32)imp); 1120 msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
@@ -1130,7 +1130,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1130 u32 key; 1130 u32 key;
1131 int res = -EINVAL; 1131 int res = -EINVAL;
1132 1132
1133 p_ptr = port_lock(ref); 1133 p_ptr = tipc_port_lock(ref);
1134 dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, " 1134 dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
1135 "lower = %u, upper = %u\n", 1135 "lower = %u, upper = %u\n",
1136 ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper); 1136 ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
@@ -1147,8 +1147,8 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1147 res = -EADDRINUSE; 1147 res = -EADDRINUSE;
1148 goto exit; 1148 goto exit;
1149 } 1149 }
1150 publ = nametbl_publish(seq->type, seq->lower, seq->upper, 1150 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
1151 scope, p_ptr->publ.ref, key); 1151 scope, p_ptr->publ.ref, key);
1152 if (publ) { 1152 if (publ) {
1153 list_add(&publ->pport_list, &p_ptr->publications); 1153 list_add(&publ->pport_list, &p_ptr->publications);
1154 p_ptr->pub_count++; 1154 p_ptr->pub_count++;
@@ -1156,7 +1156,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1156 res = TIPC_OK; 1156 res = TIPC_OK;
1157 } 1157 }
1158exit: 1158exit:
1159 port_unlock(p_ptr); 1159 tipc_port_unlock(p_ptr);
1160 return res; 1160 return res;
1161} 1161}
1162 1162
@@ -1167,7 +1167,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1167 struct publication *tpubl; 1167 struct publication *tpubl;
1168 int res = -EINVAL; 1168 int res = -EINVAL;
1169 1169
1170 p_ptr = port_lock(ref); 1170 p_ptr = tipc_port_lock(ref);
1171 if (!p_ptr) 1171 if (!p_ptr)
1172 return -EINVAL; 1172 return -EINVAL;
1173 if (!p_ptr->publ.published) 1173 if (!p_ptr->publ.published)
@@ -1175,8 +1175,8 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1175 if (!seq) { 1175 if (!seq) {
1176 list_for_each_entry_safe(publ, tpubl, 1176 list_for_each_entry_safe(publ, tpubl,
1177 &p_ptr->publications, pport_list) { 1177 &p_ptr->publications, pport_list) {
1178 nametbl_withdraw(publ->type, publ->lower, 1178 tipc_nametbl_withdraw(publ->type, publ->lower,
1179 publ->ref, publ->key); 1179 publ->ref, publ->key);
1180 } 1180 }
1181 res = TIPC_OK; 1181 res = TIPC_OK;
1182 } else { 1182 } else {
@@ -1190,8 +1190,8 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1190 continue; 1190 continue;
1191 if (publ->upper != seq->upper) 1191 if (publ->upper != seq->upper)
1192 break; 1192 break;
1193 nametbl_withdraw(publ->type, publ->lower, 1193 tipc_nametbl_withdraw(publ->type, publ->lower,
1194 publ->ref, publ->key); 1194 publ->ref, publ->key);
1195 res = TIPC_OK; 1195 res = TIPC_OK;
1196 break; 1196 break;
1197 } 1197 }
@@ -1199,7 +1199,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1199 if (list_empty(&p_ptr->publications)) 1199 if (list_empty(&p_ptr->publications))
1200 p_ptr->publ.published = 0; 1200 p_ptr->publ.published = 0;
1201exit: 1201exit:
1202 port_unlock(p_ptr); 1202 tipc_port_unlock(p_ptr);
1203 return res; 1203 return res;
1204} 1204}
1205 1205
@@ -1209,7 +1209,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1209 struct tipc_msg *msg; 1209 struct tipc_msg *msg;
1210 int res = -EINVAL; 1210 int res = -EINVAL;
1211 1211
1212 p_ptr = port_lock(ref); 1212 p_ptr = tipc_port_lock(ref);
1213 if (!p_ptr) 1213 if (!p_ptr)
1214 return -EINVAL; 1214 return -EINVAL;
1215 if (p_ptr->publ.published || p_ptr->publ.connected) 1215 if (p_ptr->publ.published || p_ptr->publ.connected)
@@ -1234,13 +1234,13 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1234 p_ptr->publ.connected = 1; 1234 p_ptr->publ.connected = 1;
1235 k_start_timer(&p_ptr->timer, p_ptr->probing_interval); 1235 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
1236 1236
1237 nodesub_subscribe(&p_ptr->subscription,peer->node, 1237 tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
1238 (void *)(unsigned long)ref, 1238 (void *)(unsigned long)ref,
1239 (net_ev_handler)port_handle_node_down); 1239 (net_ev_handler)port_handle_node_down);
1240 res = TIPC_OK; 1240 res = TIPC_OK;
1241exit: 1241exit:
1242 port_unlock(p_ptr); 1242 tipc_port_unlock(p_ptr);
1243 p_ptr->max_pkt = link_get_max_pkt(peer->node, ref); 1243 p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
1244 return res; 1244 return res;
1245} 1245}
1246 1246
@@ -1254,16 +1254,16 @@ int tipc_disconnect(u32 ref)
1254 struct port *p_ptr; 1254 struct port *p_ptr;
1255 int res = -ENOTCONN; 1255 int res = -ENOTCONN;
1256 1256
1257 p_ptr = port_lock(ref); 1257 p_ptr = tipc_port_lock(ref);
1258 if (!p_ptr) 1258 if (!p_ptr)
1259 return -EINVAL; 1259 return -EINVAL;
1260 if (p_ptr->publ.connected) { 1260 if (p_ptr->publ.connected) {
1261 p_ptr->publ.connected = 0; 1261 p_ptr->publ.connected = 0;
1262 /* let timer expire on it's own to avoid deadlock! */ 1262 /* let timer expire on it's own to avoid deadlock! */
1263 nodesub_unsubscribe(&p_ptr->subscription); 1263 tipc_nodesub_unsubscribe(&p_ptr->subscription);
1264 res = TIPC_OK; 1264 res = TIPC_OK;
1265 } 1265 }
1266 port_unlock(p_ptr); 1266 tipc_port_unlock(p_ptr);
1267 return res; 1267 return res;
1268} 1268}
1269 1269
@@ -1275,7 +1275,7 @@ int tipc_shutdown(u32 ref)
1275 struct port *p_ptr; 1275 struct port *p_ptr;
1276 struct sk_buff *buf = 0; 1276 struct sk_buff *buf = 0;
1277 1277
1278 p_ptr = port_lock(ref); 1278 p_ptr = tipc_port_lock(ref);
1279 if (!p_ptr) 1279 if (!p_ptr)
1280 return -EINVAL; 1280 return -EINVAL;
1281 1281
@@ -1293,8 +1293,8 @@ int tipc_shutdown(u32 ref)
1293 port_out_seqno(p_ptr), 1293 port_out_seqno(p_ptr),
1294 0); 1294 0);
1295 } 1295 }
1296 port_unlock(p_ptr); 1296 tipc_port_unlock(p_ptr);
1297 net_route_msg(buf); 1297 tipc_net_route_msg(buf);
1298 return tipc_disconnect(ref); 1298 return tipc_disconnect(ref);
1299} 1299}
1300 1300
@@ -1302,11 +1302,11 @@ int tipc_isconnected(u32 ref, int *isconnected)
1302{ 1302{
1303 struct port *p_ptr; 1303 struct port *p_ptr;
1304 1304
1305 p_ptr = port_lock(ref); 1305 p_ptr = tipc_port_lock(ref);
1306 if (!p_ptr) 1306 if (!p_ptr)
1307 return -EINVAL; 1307 return -EINVAL;
1308 *isconnected = p_ptr->publ.connected; 1308 *isconnected = p_ptr->publ.connected;
1309 port_unlock(p_ptr); 1309 tipc_port_unlock(p_ptr);
1310 return TIPC_OK; 1310 return TIPC_OK;
1311} 1311}
1312 1312
@@ -1315,7 +1315,7 @@ int tipc_peer(u32 ref, struct tipc_portid *peer)
1315 struct port *p_ptr; 1315 struct port *p_ptr;
1316 int res; 1316 int res;
1317 1317
1318 p_ptr = port_lock(ref); 1318 p_ptr = tipc_port_lock(ref);
1319 if (!p_ptr) 1319 if (!p_ptr)
1320 return -EINVAL; 1320 return -EINVAL;
1321 if (p_ptr->publ.connected) { 1321 if (p_ptr->publ.connected) {
@@ -1324,23 +1324,23 @@ int tipc_peer(u32 ref, struct tipc_portid *peer)
1324 res = TIPC_OK; 1324 res = TIPC_OK;
1325 } else 1325 } else
1326 res = -ENOTCONN; 1326 res = -ENOTCONN;
1327 port_unlock(p_ptr); 1327 tipc_port_unlock(p_ptr);
1328 return res; 1328 return res;
1329} 1329}
1330 1330
1331int tipc_ref_valid(u32 ref) 1331int tipc_ref_valid(u32 ref)
1332{ 1332{
1333 /* Works irrespective of type */ 1333 /* Works irrespective of type */
1334 return !!ref_deref(ref); 1334 return !!tipc_ref_deref(ref);
1335} 1335}
1336 1336
1337 1337
1338/* 1338/*
1339 * port_recv_sections(): Concatenate and deliver sectioned 1339 * tipc_port_recv_sections(): Concatenate and deliver sectioned
1340 * message for this node. 1340 * message for this node.
1341 */ 1341 */
1342 1342
1343int port_recv_sections(struct port *sender, unsigned int num_sect, 1343int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
1344 struct iovec const *msg_sect) 1344 struct iovec const *msg_sect)
1345{ 1345{
1346 struct sk_buff *buf; 1346 struct sk_buff *buf;
@@ -1349,7 +1349,7 @@ int port_recv_sections(struct port *sender, unsigned int num_sect,
1349 res = msg_build(&sender->publ.phdr, msg_sect, num_sect, 1349 res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
1350 MAX_MSG_SIZE, !sender->user_port, &buf); 1350 MAX_MSG_SIZE, !sender->user_port, &buf);
1351 if (likely(buf)) 1351 if (likely(buf))
1352 port_recv_msg(buf); 1352 tipc_port_recv_msg(buf);
1353 return res; 1353 return res;
1354} 1354}
1355 1355
@@ -1363,18 +1363,18 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1363 u32 destnode; 1363 u32 destnode;
1364 int res; 1364 int res;
1365 1365
1366 p_ptr = port_deref(ref); 1366 p_ptr = tipc_port_deref(ref);
1367 if (!p_ptr || !p_ptr->publ.connected) 1367 if (!p_ptr || !p_ptr->publ.connected)
1368 return -EINVAL; 1368 return -EINVAL;
1369 1369
1370 p_ptr->publ.congested = 1; 1370 p_ptr->publ.congested = 1;
1371 if (!port_congested(p_ptr)) { 1371 if (!tipc_port_congested(p_ptr)) {
1372 destnode = port_peernode(p_ptr); 1372 destnode = port_peernode(p_ptr);
1373 if (likely(destnode != tipc_own_addr)) 1373 if (likely(destnode != tipc_own_addr))
1374 res = link_send_sections_fast(p_ptr, msg_sect, num_sect, 1374 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1375 destnode); 1375 destnode);
1376 else 1376 else
1377 res = port_recv_sections(p_ptr, num_sect, msg_sect); 1377 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1378 1378
1379 if (likely(res != -ELINKCONG)) { 1379 if (likely(res != -ELINKCONG)) {
1380 port_incr_out_seqno(p_ptr); 1380 port_incr_out_seqno(p_ptr);
@@ -1404,7 +1404,7 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1404 u32 sz; 1404 u32 sz;
1405 u32 res; 1405 u32 res;
1406 1406
1407 p_ptr = port_deref(ref); 1407 p_ptr = tipc_port_deref(ref);
1408 if (!p_ptr || !p_ptr->publ.connected) 1408 if (!p_ptr || !p_ptr->publ.connected)
1409 return -EINVAL; 1409 return -EINVAL;
1410 1410
@@ -1419,11 +1419,11 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1419 memcpy(buf->data, (unchar *)msg, hsz); 1419 memcpy(buf->data, (unchar *)msg, hsz);
1420 destnode = msg_destnode(msg); 1420 destnode = msg_destnode(msg);
1421 p_ptr->publ.congested = 1; 1421 p_ptr->publ.congested = 1;
1422 if (!port_congested(p_ptr)) { 1422 if (!tipc_port_congested(p_ptr)) {
1423 if (likely(destnode != tipc_own_addr)) 1423 if (likely(destnode != tipc_own_addr))
1424 res = tipc_send_buf_fast(buf, destnode); 1424 res = tipc_send_buf_fast(buf, destnode);
1425 else { 1425 else {
1426 port_recv_msg(buf); 1426 tipc_port_recv_msg(buf);
1427 res = sz; 1427 res = sz;
1428 } 1428 }
1429 if (likely(res != -ELINKCONG)) { 1429 if (likely(res != -ELINKCONG)) {
@@ -1458,7 +1458,7 @@ int tipc_forward2name(u32 ref,
1458 u32 destport = 0; 1458 u32 destport = 0;
1459 int res; 1459 int res;
1460 1460
1461 p_ptr = port_deref(ref); 1461 p_ptr = tipc_port_deref(ref);
1462 if (!p_ptr || p_ptr->publ.connected) 1462 if (!p_ptr || p_ptr->publ.connected)
1463 return -EINVAL; 1463 return -EINVAL;
1464 1464
@@ -1472,16 +1472,16 @@ int tipc_forward2name(u32 ref,
1472 msg_set_lookup_scope(msg, addr_scope(domain)); 1472 msg_set_lookup_scope(msg, addr_scope(domain));
1473 if (importance <= TIPC_CRITICAL_IMPORTANCE) 1473 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1474 msg_set_importance(msg,importance); 1474 msg_set_importance(msg,importance);
1475 destport = nametbl_translate(name->type, name->instance, &destnode); 1475 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1476 msg_set_destnode(msg, destnode); 1476 msg_set_destnode(msg, destnode);
1477 msg_set_destport(msg, destport); 1477 msg_set_destport(msg, destport);
1478 1478
1479 if (likely(destport || destnode)) { 1479 if (likely(destport || destnode)) {
1480 p_ptr->sent++; 1480 p_ptr->sent++;
1481 if (likely(destnode == tipc_own_addr)) 1481 if (likely(destnode == tipc_own_addr))
1482 return port_recv_sections(p_ptr, num_sect, msg_sect); 1482 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1483 res = link_send_sections_fast(p_ptr, msg_sect, num_sect, 1483 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1484 destnode); 1484 destnode);
1485 if (likely(res != -ELINKCONG)) 1485 if (likely(res != -ELINKCONG))
1486 return res; 1486 return res;
1487 if (port_unreliable(p_ptr)) { 1487 if (port_unreliable(p_ptr)) {
@@ -1490,8 +1490,8 @@ int tipc_forward2name(u32 ref,
1490 } 1490 }
1491 return -ELINKCONG; 1491 return -ELINKCONG;
1492 } 1492 }
1493 return port_reject_sections(p_ptr, msg, msg_sect, num_sect, 1493 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1494 TIPC_ERR_NO_NAME); 1494 TIPC_ERR_NO_NAME);
1495} 1495}
1496 1496
1497/** 1497/**
@@ -1530,7 +1530,7 @@ int tipc_forward_buf2name(u32 ref,
1530 u32 destport = 0; 1530 u32 destport = 0;
1531 int res; 1531 int res;
1532 1532
1533 p_ptr = (struct port *)ref_deref(ref); 1533 p_ptr = (struct port *)tipc_ref_deref(ref);
1534 if (!p_ptr || p_ptr->publ.connected) 1534 if (!p_ptr || p_ptr->publ.connected)
1535 return -EINVAL; 1535 return -EINVAL;
1536 1536
@@ -1545,7 +1545,7 @@ int tipc_forward_buf2name(u32 ref,
1545 msg_set_lookup_scope(msg, addr_scope(domain)); 1545 msg_set_lookup_scope(msg, addr_scope(domain));
1546 msg_set_hdr_sz(msg, LONG_H_SIZE); 1546 msg_set_hdr_sz(msg, LONG_H_SIZE);
1547 msg_set_size(msg, LONG_H_SIZE + dsz); 1547 msg_set_size(msg, LONG_H_SIZE + dsz);
1548 destport = nametbl_translate(name->type, name->instance, &destnode); 1548 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1549 msg_set_destnode(msg, destnode); 1549 msg_set_destnode(msg, destnode);
1550 msg_set_destport(msg, destport); 1550 msg_set_destport(msg, destport);
1551 msg_dbg(msg, "forw2name ==> "); 1551 msg_dbg(msg, "forw2name ==> ");
@@ -1557,7 +1557,7 @@ int tipc_forward_buf2name(u32 ref,
1557 if (likely(destport || destnode)) { 1557 if (likely(destport || destnode)) {
1558 p_ptr->sent++; 1558 p_ptr->sent++;
1559 if (destnode == tipc_own_addr) 1559 if (destnode == tipc_own_addr)
1560 return port_recv_msg(buf); 1560 return tipc_port_recv_msg(buf);
1561 res = tipc_send_buf_fast(buf, destnode); 1561 res = tipc_send_buf_fast(buf, destnode);
1562 if (likely(res != -ELINKCONG)) 1562 if (likely(res != -ELINKCONG))
1563 return res; 1563 return res;
@@ -1601,7 +1601,7 @@ int tipc_forward2port(u32 ref,
1601 struct tipc_msg *msg; 1601 struct tipc_msg *msg;
1602 int res; 1602 int res;
1603 1603
1604 p_ptr = port_deref(ref); 1604 p_ptr = tipc_port_deref(ref);
1605 if (!p_ptr || p_ptr->publ.connected) 1605 if (!p_ptr || p_ptr->publ.connected)
1606 return -EINVAL; 1606 return -EINVAL;
1607 1607
@@ -1616,8 +1616,8 @@ int tipc_forward2port(u32 ref,
1616 msg_set_importance(msg, importance); 1616 msg_set_importance(msg, importance);
1617 p_ptr->sent++; 1617 p_ptr->sent++;
1618 if (dest->node == tipc_own_addr) 1618 if (dest->node == tipc_own_addr)
1619 return port_recv_sections(p_ptr, num_sect, msg_sect); 1619 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1620 res = link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node); 1620 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
1621 if (likely(res != -ELINKCONG)) 1621 if (likely(res != -ELINKCONG))
1622 return res; 1622 return res;
1623 if (port_unreliable(p_ptr)) { 1623 if (port_unreliable(p_ptr)) {
@@ -1658,7 +1658,7 @@ int tipc_forward_buf2port(u32 ref,
1658 struct tipc_msg *msg; 1658 struct tipc_msg *msg;
1659 int res; 1659 int res;
1660 1660
1661 p_ptr = (struct port *)ref_deref(ref); 1661 p_ptr = (struct port *)tipc_ref_deref(ref);
1662 if (!p_ptr || p_ptr->publ.connected) 1662 if (!p_ptr || p_ptr->publ.connected)
1663 return -EINVAL; 1663 return -EINVAL;
1664 1664
@@ -1680,7 +1680,7 @@ int tipc_forward_buf2port(u32 ref,
1680 msg_dbg(msg, "buf2port: "); 1680 msg_dbg(msg, "buf2port: ");
1681 p_ptr->sent++; 1681 p_ptr->sent++;
1682 if (dest->node == tipc_own_addr) 1682 if (dest->node == tipc_own_addr)
1683 return port_recv_msg(buf); 1683 return tipc_port_recv_msg(buf);
1684 res = tipc_send_buf_fast(buf, dest->node); 1684 res = tipc_send_buf_fast(buf, dest->node);
1685 if (likely(res != -ELINKCONG)) 1685 if (likely(res != -ELINKCONG))
1686 return res; 1686 return res;
diff --git a/net/tipc/port.h b/net/tipc/port.h
index e829a99d3b7..839f100da64 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -37,7 +37,7 @@
37#ifndef _TIPC_PORT_H 37#ifndef _TIPC_PORT_H
38#define _TIPC_PORT_H 38#define _TIPC_PORT_H
39 39
40#include <net/tipc/tipc_port.h> 40#include "core.h"
41#include "ref.h" 41#include "ref.h"
42#include "net.h" 42#include "net.h"
43#include "msg.h" 43#include "msg.h"
@@ -110,65 +110,65 @@ struct port {
110 struct node_subscr subscription; 110 struct node_subscr subscription;
111}; 111};
112 112
113extern spinlock_t port_list_lock; 113extern spinlock_t tipc_port_list_lock;
114struct port_list; 114struct port_list;
115 115
116int port_recv_sections(struct port *p_ptr, u32 num_sect, 116int tipc_port_recv_sections(struct port *p_ptr, u32 num_sect,
117 struct iovec const *msg_sect); 117 struct iovec const *msg_sect);
118int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr, 118int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
119 struct iovec const *msg_sect, u32 num_sect, 119 struct iovec const *msg_sect, u32 num_sect,
120 int err); 120 int err);
121struct sk_buff *port_get_ports(void); 121struct sk_buff *tipc_port_get_ports(void);
122struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space); 122struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
123void port_recv_proto_msg(struct sk_buff *buf); 123void tipc_port_recv_proto_msg(struct sk_buff *buf);
124void port_recv_mcast(struct sk_buff *buf, struct port_list *dp); 124void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
125void port_reinit(void); 125void tipc_port_reinit(void);
126 126
127/** 127/**
128 * port_lock - lock port instance referred to and return its pointer 128 * tipc_port_lock - lock port instance referred to and return its pointer
129 */ 129 */
130 130
131static inline struct port *port_lock(u32 ref) 131static inline struct port *tipc_port_lock(u32 ref)
132{ 132{
133 return (struct port *)ref_lock(ref); 133 return (struct port *)tipc_ref_lock(ref);
134} 134}
135 135
136/** 136/**
137 * port_unlock - unlock a port instance 137 * tipc_port_unlock - unlock a port instance
138 * 138 *
139 * Can use pointer instead of ref_unlock() since port is already locked. 139 * Can use pointer instead of tipc_ref_unlock() since port is already locked.
140 */ 140 */
141 141
142static inline void port_unlock(struct port *p_ptr) 142static inline void tipc_port_unlock(struct port *p_ptr)
143{ 143{
144 spin_unlock_bh(p_ptr->publ.lock); 144 spin_unlock_bh(p_ptr->publ.lock);
145} 145}
146 146
147static inline struct port* port_deref(u32 ref) 147static inline struct port* tipc_port_deref(u32 ref)
148{ 148{
149 return (struct port *)ref_deref(ref); 149 return (struct port *)tipc_ref_deref(ref);
150} 150}
151 151
152static inline u32 peer_port(struct port *p_ptr) 152static inline u32 tipc_peer_port(struct port *p_ptr)
153{ 153{
154 return msg_destport(&p_ptr->publ.phdr); 154 return msg_destport(&p_ptr->publ.phdr);
155} 155}
156 156
157static inline u32 peer_node(struct port *p_ptr) 157static inline u32 tipc_peer_node(struct port *p_ptr)
158{ 158{
159 return msg_destnode(&p_ptr->publ.phdr); 159 return msg_destnode(&p_ptr->publ.phdr);
160} 160}
161 161
162static inline int port_congested(struct port *p_ptr) 162static inline int tipc_port_congested(struct port *p_ptr)
163{ 163{
164 return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2)); 164 return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
165} 165}
166 166
167/** 167/**
168 * port_recv_msg - receive message from lower layer and deliver to port user 168 * tipc_port_recv_msg - receive message from lower layer and deliver to port user
169 */ 169 */
170 170
171static inline int port_recv_msg(struct sk_buff *buf) 171static inline int tipc_port_recv_msg(struct sk_buff *buf)
172{ 172{
173 struct port *p_ptr; 173 struct port *p_ptr;
174 struct tipc_msg *msg = buf_msg(buf); 174 struct tipc_msg *msg = buf_msg(buf);
@@ -178,24 +178,24 @@ static inline int port_recv_msg(struct sk_buff *buf)
178 178
179 /* forward unresolved named message */ 179 /* forward unresolved named message */
180 if (unlikely(!destport)) { 180 if (unlikely(!destport)) {
181 net_route_msg(buf); 181 tipc_net_route_msg(buf);
182 return dsz; 182 return dsz;
183 } 183 }
184 184
185 /* validate destination & pass to port, otherwise reject message */ 185 /* validate destination & pass to port, otherwise reject message */
186 p_ptr = port_lock(destport); 186 p_ptr = tipc_port_lock(destport);
187 if (likely(p_ptr)) { 187 if (likely(p_ptr)) {
188 if (likely(p_ptr->publ.connected)) { 188 if (likely(p_ptr->publ.connected)) {
189 if ((unlikely(msg_origport(msg) != peer_port(p_ptr))) || 189 if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) ||
190 (unlikely(msg_orignode(msg) != peer_node(p_ptr))) || 190 (unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) ||
191 (unlikely(!msg_connected(msg)))) { 191 (unlikely(!msg_connected(msg)))) {
192 err = TIPC_ERR_NO_PORT; 192 err = TIPC_ERR_NO_PORT;
193 port_unlock(p_ptr); 193 tipc_port_unlock(p_ptr);
194 goto reject; 194 goto reject;
195 } 195 }
196 } 196 }
197 err = p_ptr->dispatcher(&p_ptr->publ, buf); 197 err = p_ptr->dispatcher(&p_ptr->publ, buf);
198 port_unlock(p_ptr); 198 tipc_port_unlock(p_ptr);
199 if (likely(!err)) 199 if (likely(!err))
200 return dsz; 200 return dsz;
201 } else { 201 } else {
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 944093fe246..5a13c2defe4 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -61,15 +61,15 @@
61 * because entry 0's reference field has the form XXXX|1--1. 61 * because entry 0's reference field has the form XXXX|1--1.
62 */ 62 */
63 63
64struct ref_table ref_table = { 0 }; 64struct ref_table tipc_ref_table = { 0 };
65 65
66rwlock_t reftbl_lock = RW_LOCK_UNLOCKED; 66static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED;
67 67
68/** 68/**
69 * ref_table_init - create reference table for objects 69 * tipc_ref_table_init - create reference table for objects
70 */ 70 */
71 71
72int ref_table_init(u32 requested_size, u32 start) 72int tipc_ref_table_init(u32 requested_size, u32 start)
73{ 73{
74 struct reference *table; 74 struct reference *table;
75 u32 sz = 1 << 4; 75 u32 sz = 1 << 4;
@@ -83,43 +83,43 @@ int ref_table_init(u32 requested_size, u32 start)
83 if (table == NULL) 83 if (table == NULL)
84 return -ENOMEM; 84 return -ENOMEM;
85 85
86 write_lock_bh(&reftbl_lock); 86 write_lock_bh(&ref_table_lock);
87 index_mask = sz - 1; 87 index_mask = sz - 1;
88 for (i = sz - 1; i >= 0; i--) { 88 for (i = sz - 1; i >= 0; i--) {
89 table[i].object = 0; 89 table[i].object = 0;
90 table[i].lock = SPIN_LOCK_UNLOCKED; 90 table[i].lock = SPIN_LOCK_UNLOCKED;
91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; 91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
92 } 92 }
93 ref_table.entries = table; 93 tipc_ref_table.entries = table;
94 ref_table.index_mask = index_mask; 94 tipc_ref_table.index_mask = index_mask;
95 ref_table.first_free = sz - 1; 95 tipc_ref_table.first_free = sz - 1;
96 ref_table.last_free = 1; 96 tipc_ref_table.last_free = 1;
97 write_unlock_bh(&reftbl_lock); 97 write_unlock_bh(&ref_table_lock);
98 return TIPC_OK; 98 return TIPC_OK;
99} 99}
100 100
101/** 101/**
102 * ref_table_stop - destroy reference table for objects 102 * tipc_ref_table_stop - destroy reference table for objects
103 */ 103 */
104 104
105void ref_table_stop(void) 105void tipc_ref_table_stop(void)
106{ 106{
107 if (!ref_table.entries) 107 if (!tipc_ref_table.entries)
108 return; 108 return;
109 109
110 vfree(ref_table.entries); 110 vfree(tipc_ref_table.entries);
111 ref_table.entries = 0; 111 tipc_ref_table.entries = 0;
112} 112}
113 113
114/** 114/**
115 * ref_acquire - create reference to an object 115 * tipc_ref_acquire - create reference to an object
116 * 116 *
117 * Return a unique reference value which can be translated back to the pointer 117 * Return a unique reference value which can be translated back to the pointer
118 * 'object' at a later time. Also, pass back a pointer to the lock protecting 118 * 'object' at a later time. Also, pass back a pointer to the lock protecting
119 * the object, but without locking it. 119 * the object, but without locking it.
120 */ 120 */
121 121
122u32 ref_acquire(void *object, spinlock_t **lock) 122u32 tipc_ref_acquire(void *object, spinlock_t **lock)
123{ 123{
124 struct reference *entry; 124 struct reference *entry;
125 u32 index; 125 u32 index;
@@ -127,17 +127,17 @@ u32 ref_acquire(void *object, spinlock_t **lock)
127 u32 next_plus_upper; 127 u32 next_plus_upper;
128 u32 reference = 0; 128 u32 reference = 0;
129 129
130 assert(ref_table.entries && object); 130 assert(tipc_ref_table.entries && object);
131 131
132 write_lock_bh(&reftbl_lock); 132 write_lock_bh(&ref_table_lock);
133 if (ref_table.first_free) { 133 if (tipc_ref_table.first_free) {
134 index = ref_table.first_free; 134 index = tipc_ref_table.first_free;
135 entry = &(ref_table.entries[index]); 135 entry = &(tipc_ref_table.entries[index]);
136 index_mask = ref_table.index_mask; 136 index_mask = tipc_ref_table.index_mask;
137 /* take lock in case a previous user of entry still holds it */ 137 /* take lock in case a previous user of entry still holds it */
138 spin_lock_bh(&entry->lock); 138 spin_lock_bh(&entry->lock);
139 next_plus_upper = entry->data.next_plus_upper; 139 next_plus_upper = entry->data.next_plus_upper;
140 ref_table.first_free = next_plus_upper & index_mask; 140 tipc_ref_table.first_free = next_plus_upper & index_mask;
141 reference = (next_plus_upper & ~index_mask) + index; 141 reference = (next_plus_upper & ~index_mask) + index;
142 entry->data.reference = reference; 142 entry->data.reference = reference;
143 entry->object = object; 143 entry->object = object;
@@ -145,45 +145,45 @@ u32 ref_acquire(void *object, spinlock_t **lock)
145 *lock = &entry->lock; 145 *lock = &entry->lock;
146 spin_unlock_bh(&entry->lock); 146 spin_unlock_bh(&entry->lock);
147 } 147 }
148 write_unlock_bh(&reftbl_lock); 148 write_unlock_bh(&ref_table_lock);
149 return reference; 149 return reference;
150} 150}
151 151
152/** 152/**
153 * ref_discard - invalidate references to an object 153 * tipc_ref_discard - invalidate references to an object
154 * 154 *
155 * Disallow future references to an object and free up the entry for re-use. 155 * Disallow future references to an object and free up the entry for re-use.
156 * Note: The entry's spin_lock may still be busy after discard 156 * Note: The entry's spin_lock may still be busy after discard
157 */ 157 */
158 158
159void ref_discard(u32 ref) 159void tipc_ref_discard(u32 ref)
160{ 160{
161 struct reference *entry; 161 struct reference *entry;
162 u32 index; 162 u32 index;
163 u32 index_mask; 163 u32 index_mask;
164 164
165 assert(ref_table.entries); 165 assert(tipc_ref_table.entries);
166 assert(ref != 0); 166 assert(ref != 0);
167 167
168 write_lock_bh(&reftbl_lock); 168 write_lock_bh(&ref_table_lock);
169 index_mask = ref_table.index_mask; 169 index_mask = tipc_ref_table.index_mask;
170 index = ref & index_mask; 170 index = ref & index_mask;
171 entry = &(ref_table.entries[index]); 171 entry = &(tipc_ref_table.entries[index]);
172 assert(entry->object != 0); 172 assert(entry->object != 0);
173 assert(entry->data.reference == ref); 173 assert(entry->data.reference == ref);
174 174
175 /* mark entry as unused */ 175 /* mark entry as unused */
176 entry->object = 0; 176 entry->object = 0;
177 if (ref_table.first_free == 0) 177 if (tipc_ref_table.first_free == 0)
178 ref_table.first_free = index; 178 tipc_ref_table.first_free = index;
179 else 179 else
180 /* next_plus_upper is always XXXX|0--0 for last free entry */ 180 /* next_plus_upper is always XXXX|0--0 for last free entry */
181 ref_table.entries[ref_table.last_free].data.next_plus_upper 181 tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper
182 |= index; 182 |= index;
183 ref_table.last_free = index; 183 tipc_ref_table.last_free = index;
184 184
185 /* increment upper bits of entry to invalidate subsequent references */ 185 /* increment upper bits of entry to invalidate subsequent references */
186 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1); 186 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
187 write_unlock_bh(&reftbl_lock); 187 write_unlock_bh(&ref_table_lock);
188} 188}
189 189
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
index 429cde57228..4f8f9f40dca 100644
--- a/net/tipc/ref.h
+++ b/net/tipc/ref.h
@@ -54,7 +54,7 @@ struct reference {
54}; 54};
55 55
56/** 56/**
57 * struct ref_table - table of TIPC object reference entries 57 * struct tipc_ref_table - table of TIPC object reference entries
58 * @entries: pointer to array of reference entries 58 * @entries: pointer to array of reference entries
59 * @index_mask: bitmask for array index portion of reference values 59 * @index_mask: bitmask for array index portion of reference values
60 * @first_free: array index of first unused object reference entry 60 * @first_free: array index of first unused object reference entry
@@ -68,24 +68,24 @@ struct ref_table {
68 u32 last_free; 68 u32 last_free;
69}; 69};
70 70
71extern struct ref_table ref_table; 71extern struct ref_table tipc_ref_table;
72 72
73int ref_table_init(u32 requested_size, u32 start); 73int tipc_ref_table_init(u32 requested_size, u32 start);
74void ref_table_stop(void); 74void tipc_ref_table_stop(void);
75 75
76u32 ref_acquire(void *object, spinlock_t **lock); 76u32 tipc_ref_acquire(void *object, spinlock_t **lock);
77void ref_discard(u32 ref); 77void tipc_ref_discard(u32 ref);
78 78
79 79
80/** 80/**
81 * ref_lock - lock referenced object and return pointer to it 81 * tipc_ref_lock - lock referenced object and return pointer to it
82 */ 82 */
83 83
84static inline void *ref_lock(u32 ref) 84static inline void *tipc_ref_lock(u32 ref)
85{ 85{
86 if (likely(ref_table.entries)) { 86 if (likely(tipc_ref_table.entries)) {
87 struct reference *r = 87 struct reference *r =
88 &ref_table.entries[ref & ref_table.index_mask]; 88 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
89 89
90 spin_lock_bh(&r->lock); 90 spin_lock_bh(&r->lock);
91 if (likely(r->data.reference == ref)) 91 if (likely(r->data.reference == ref))
@@ -96,31 +96,31 @@ static inline void *ref_lock(u32 ref)
96} 96}
97 97
98/** 98/**
99 * ref_unlock - unlock referenced object 99 * tipc_ref_unlock - unlock referenced object
100 */ 100 */
101 101
102static inline void ref_unlock(u32 ref) 102static inline void tipc_ref_unlock(u32 ref)
103{ 103{
104 if (likely(ref_table.entries)) { 104 if (likely(tipc_ref_table.entries)) {
105 struct reference *r = 105 struct reference *r =
106 &ref_table.entries[ref & ref_table.index_mask]; 106 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
107 107
108 if (likely(r->data.reference == ref)) 108 if (likely(r->data.reference == ref))
109 spin_unlock_bh(&r->lock); 109 spin_unlock_bh(&r->lock);
110 else 110 else
111 err("ref_unlock() invoked using obsolete reference\n"); 111 err("tipc_ref_unlock() invoked using obsolete reference\n");
112 } 112 }
113} 113}
114 114
115/** 115/**
116 * ref_deref - return pointer referenced object (without locking it) 116 * tipc_ref_deref - return pointer referenced object (without locking it)
117 */ 117 */
118 118
119static inline void *ref_deref(u32 ref) 119static inline void *tipc_ref_deref(u32 ref)
120{ 120{
121 if (likely(ref_table.entries)) { 121 if (likely(tipc_ref_table.entries)) {
122 struct reference *r = 122 struct reference *r =
123 &ref_table.entries[ref & ref_table.index_mask]; 123 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
124 124
125 if (likely(r->data.reference == ref)) 125 if (likely(r->data.reference == ref))
126 return r->object; 126 return r->object;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index d21f8c0cd25..67253bfcd70 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -42,9 +42,7 @@
42#include <linux/mm.h> 42#include <linux/mm.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/version.h>
46#include <linux/fcntl.h> 45#include <linux/fcntl.h>
47#include <linux/version.h>
48#include <asm/semaphore.h> 46#include <asm/semaphore.h>
49#include <asm/string.h> 47#include <asm/string.h>
50#include <asm/atomic.h> 48#include <asm/atomic.h>
@@ -1185,7 +1183,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1185 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) { 1183 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1186 sock->state = SS_DISCONNECTING; 1184 sock->state = SS_DISCONNECTING;
1187 /* Note: Use signal since port lock is already taken! */ 1185 /* Note: Use signal since port lock is already taken! */
1188 k_signal((Handler)async_disconnect, tport->ref); 1186 tipc_k_signal((Handler)async_disconnect, tport->ref);
1189 } 1187 }
1190 1188
1191 /* Enqueue message (finally!) */ 1189 /* Enqueue message (finally!) */
@@ -1685,11 +1683,11 @@ static struct proto tipc_proto = {
1685}; 1683};
1686 1684
1687/** 1685/**
1688 * socket_init - initialize TIPC socket interface 1686 * tipc_socket_init - initialize TIPC socket interface
1689 * 1687 *
1690 * Returns 0 on success, errno otherwise 1688 * Returns 0 on success, errno otherwise
1691 */ 1689 */
1692int socket_init(void) 1690int tipc_socket_init(void)
1693{ 1691{
1694 int res; 1692 int res;
1695 1693
@@ -1712,9 +1710,9 @@ int socket_init(void)
1712} 1710}
1713 1711
1714/** 1712/**
1715 * sock_stop - stop TIPC socket interface 1713 * tipc_socket_stop - stop TIPC socket interface
1716 */ 1714 */
1717void socket_stop(void) 1715void tipc_socket_stop(void)
1718{ 1716{
1719 if (!sockets_enabled) 1717 if (!sockets_enabled)
1720 return; 1718 return;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 80e219ba527..5ff38b9f319 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -118,14 +118,14 @@ static void subscr_send_event(struct subscription *sub,
118} 118}
119 119
120/** 120/**
121 * subscr_overlap - test for subscription overlap with the given values 121 * tipc_subscr_overlap - test for subscription overlap with the given values
122 * 122 *
123 * Returns 1 if there is overlap, otherwise 0. 123 * Returns 1 if there is overlap, otherwise 0.
124 */ 124 */
125 125
126int subscr_overlap(struct subscription *sub, 126int tipc_subscr_overlap(struct subscription *sub,
127 u32 found_lower, 127 u32 found_lower,
128 u32 found_upper) 128 u32 found_upper)
129 129
130{ 130{
131 if (found_lower < sub->seq.lower) 131 if (found_lower < sub->seq.lower)
@@ -138,22 +138,22 @@ int subscr_overlap(struct subscription *sub,
138} 138}
139 139
140/** 140/**
141 * subscr_report_overlap - issue event if there is subscription overlap 141 * tipc_subscr_report_overlap - issue event if there is subscription overlap
142 * 142 *
143 * Protected by nameseq.lock in name_table.c 143 * Protected by nameseq.lock in name_table.c
144 */ 144 */
145 145
146void subscr_report_overlap(struct subscription *sub, 146void tipc_subscr_report_overlap(struct subscription *sub,
147 u32 found_lower, 147 u32 found_lower,
148 u32 found_upper, 148 u32 found_upper,
149 u32 event, 149 u32 event,
150 u32 port_ref, 150 u32 port_ref,
151 u32 node, 151 u32 node,
152 int must) 152 int must)
153{ 153{
154 dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower, 154 dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
155 sub->seq.upper, found_lower, found_upper); 155 sub->seq.upper, found_lower, found_upper);
156 if (!subscr_overlap(sub, found_lower, found_upper)) 156 if (!tipc_subscr_overlap(sub, found_lower, found_upper))
157 return; 157 return;
158 if (!must && (sub->filter != TIPC_SUB_PORTS)) 158 if (!must && (sub->filter != TIPC_SUB_PORTS))
159 return; 159 return;
@@ -172,13 +172,13 @@ static void subscr_timeout(struct subscription *sub)
172 /* Validate subscriber reference (in case subscriber is terminating) */ 172 /* Validate subscriber reference (in case subscriber is terminating) */
173 173
174 subscriber_ref = sub->owner->ref; 174 subscriber_ref = sub->owner->ref;
175 subscriber = (struct subscriber *)ref_lock(subscriber_ref); 175 subscriber = (struct subscriber *)tipc_ref_lock(subscriber_ref);
176 if (subscriber == NULL) 176 if (subscriber == NULL)
177 return; 177 return;
178 178
179 /* Unlink subscription from name table */ 179 /* Unlink subscription from name table */
180 180
181 nametbl_unsubscribe(sub); 181 tipc_nametbl_unsubscribe(sub);
182 182
183 /* Notify subscriber of timeout, then unlink subscription */ 183 /* Notify subscriber of timeout, then unlink subscription */
184 184
@@ -192,7 +192,7 @@ static void subscr_timeout(struct subscription *sub)
192 192
193 /* Now destroy subscription */ 193 /* Now destroy subscription */
194 194
195 ref_unlock(subscriber_ref); 195 tipc_ref_unlock(subscriber_ref);
196 k_term_timer(&sub->timer); 196 k_term_timer(&sub->timer);
197 kfree(sub); 197 kfree(sub);
198 atomic_dec(&topsrv.subscription_count); 198 atomic_dec(&topsrv.subscription_count);
@@ -216,7 +216,7 @@ static void subscr_terminate(struct subscriber *subscriber)
216 216
217 /* Invalidate subscriber reference */ 217 /* Invalidate subscriber reference */
218 218
219 ref_discard(subscriber->ref); 219 tipc_ref_discard(subscriber->ref);
220 spin_unlock_bh(subscriber->lock); 220 spin_unlock_bh(subscriber->lock);
221 221
222 /* Destroy any existing subscriptions for subscriber */ 222 /* Destroy any existing subscriptions for subscriber */
@@ -227,7 +227,7 @@ static void subscr_terminate(struct subscriber *subscriber)
227 k_cancel_timer(&sub->timer); 227 k_cancel_timer(&sub->timer);
228 k_term_timer(&sub->timer); 228 k_term_timer(&sub->timer);
229 } 229 }
230 nametbl_unsubscribe(sub); 230 tipc_nametbl_unsubscribe(sub);
231 list_del(&sub->subscription_list); 231 list_del(&sub->subscription_list);
232 dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n", 232 dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n",
233 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); 233 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
@@ -315,7 +315,7 @@ static void subscr_subscribe(struct tipc_subscr *s,
315 k_start_timer(&sub->timer, sub->timeout); 315 k_start_timer(&sub->timer, sub->timeout);
316 } 316 }
317 sub->owner = subscriber; 317 sub->owner = subscriber;
318 nametbl_subscribe(sub); 318 tipc_nametbl_subscribe(sub);
319} 319}
320 320
321/** 321/**
@@ -332,7 +332,7 @@ static void subscr_conn_shutdown_event(void *usr_handle,
332 struct subscriber *subscriber; 332 struct subscriber *subscriber;
333 spinlock_t *subscriber_lock; 333 spinlock_t *subscriber_lock;
334 334
335 subscriber = ref_lock((u32)(unsigned long)usr_handle); 335 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle);
336 if (subscriber == NULL) 336 if (subscriber == NULL)
337 return; 337 return;
338 338
@@ -354,7 +354,7 @@ static void subscr_conn_msg_event(void *usr_handle,
354 struct subscriber *subscriber; 354 struct subscriber *subscriber;
355 spinlock_t *subscriber_lock; 355 spinlock_t *subscriber_lock;
356 356
357 subscriber = ref_lock((u32)(unsigned long)usr_handle); 357 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle);
358 if (subscriber == NULL) 358 if (subscriber == NULL)
359 return; 359 return;
360 360
@@ -401,7 +401,7 @@ static void subscr_named_msg_event(void *usr_handle,
401 memset(subscriber, 0, sizeof(struct subscriber)); 401 memset(subscriber, 0, sizeof(struct subscriber));
402 INIT_LIST_HEAD(&subscriber->subscription_list); 402 INIT_LIST_HEAD(&subscriber->subscription_list);
403 INIT_LIST_HEAD(&subscriber->subscriber_list); 403 INIT_LIST_HEAD(&subscriber->subscriber_list);
404 subscriber->ref = ref_acquire(subscriber, &subscriber->lock); 404 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
405 if (subscriber->ref == 0) { 405 if (subscriber->ref == 0) {
406 warn("Failed to acquire subscriber reference\n"); 406 warn("Failed to acquire subscriber reference\n");
407 kfree(subscriber); 407 kfree(subscriber);
@@ -423,7 +423,7 @@ static void subscr_named_msg_event(void *usr_handle,
423 &subscriber->port_ref); 423 &subscriber->port_ref);
424 if (subscriber->port_ref == 0) { 424 if (subscriber->port_ref == 0) {
425 warn("Memory squeeze; failed to create subscription port\n"); 425 warn("Memory squeeze; failed to create subscription port\n");
426 ref_discard(subscriber->ref); 426 tipc_ref_discard(subscriber->ref);
427 kfree(subscriber); 427 kfree(subscriber);
428 return; 428 return;
429 } 429 }
@@ -432,7 +432,7 @@ static void subscr_named_msg_event(void *usr_handle,
432 432
433 /* Add subscriber to topology server's subscriber list */ 433 /* Add subscriber to topology server's subscriber list */
434 434
435 ref_lock(subscriber->ref); 435 tipc_ref_lock(subscriber->ref);
436 spin_lock_bh(&topsrv.lock); 436 spin_lock_bh(&topsrv.lock);
437 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); 437 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
438 spin_unlock_bh(&topsrv.lock); 438 spin_unlock_bh(&topsrv.lock);
@@ -451,7 +451,7 @@ static void subscr_named_msg_event(void *usr_handle,
451 spin_unlock_bh(subscriber_lock); 451 spin_unlock_bh(subscriber_lock);
452} 452}
453 453
454int subscr_start(void) 454int tipc_subscr_start(void)
455{ 455{
456 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV}; 456 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
457 int res = -1; 457 int res = -1;
@@ -481,7 +481,7 @@ int subscr_start(void)
481 if (res) 481 if (res)
482 goto failed; 482 goto failed;
483 483
484 res = nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq); 484 res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
485 if (res) 485 if (res)
486 goto failed; 486 goto failed;
487 487
@@ -496,7 +496,7 @@ failed:
496 return res; 496 return res;
497} 497}
498 498
499void subscr_stop(void) 499void tipc_subscr_stop(void)
500{ 500{
501 struct subscriber *subscriber; 501 struct subscriber *subscriber;
502 struct subscriber *subscriber_temp; 502 struct subscriber *subscriber_temp;
@@ -507,7 +507,7 @@ void subscr_stop(void)
507 list_for_each_entry_safe(subscriber, subscriber_temp, 507 list_for_each_entry_safe(subscriber, subscriber_temp,
508 &topsrv.subscriber_list, 508 &topsrv.subscriber_list,
509 subscriber_list) { 509 subscriber_list) {
510 ref_lock(subscriber->ref); 510 tipc_ref_lock(subscriber->ref);
511 subscriber_lock = subscriber->lock; 511 subscriber_lock = subscriber->lock;
512 subscr_terminate(subscriber); 512 subscr_terminate(subscriber);
513 spin_unlock_bh(subscriber_lock); 513 spin_unlock_bh(subscriber_lock);
@@ -522,6 +522,6 @@ int tipc_ispublished(struct tipc_name const *name)
522{ 522{
523 u32 domain = 0; 523 u32 domain = 0;
524 524
525 return(nametbl_translate(name->type, name->instance,&domain) != 0); 525 return(tipc_nametbl_translate(name->type, name->instance,&domain) != 0);
526} 526}
527 527
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ccff4efcb75..1e5090465d2 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -60,21 +60,21 @@ struct subscription {
60 struct subscriber *owner; 60 struct subscriber *owner;
61}; 61};
62 62
63int subscr_overlap(struct subscription * sub, 63int tipc_subscr_overlap(struct subscription * sub,
64 u32 found_lower, 64 u32 found_lower,
65 u32 found_upper); 65 u32 found_upper);
66 66
67void subscr_report_overlap(struct subscription * sub, 67void tipc_subscr_report_overlap(struct subscription * sub,
68 u32 found_lower, 68 u32 found_lower,
69 u32 found_upper, 69 u32 found_upper,
70 u32 event, 70 u32 event,
71 u32 port_ref, 71 u32 port_ref,
72 u32 node, 72 u32 node,
73 int must_report); 73 int must_report);
74 74
75int subscr_start(void); 75int tipc_subscr_start(void);
76 76
77void subscr_stop(void); 77void tipc_subscr_stop(void);
78 78
79 79
80#endif 80#endif
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 35ec7dc8211..106200d7658 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -114,10 +114,10 @@ static void reg_callback(struct tipc_user *user_ptr)
114} 114}
115 115
116/** 116/**
117 * reg_start - activate TIPC user registry 117 * tipc_reg_start - activate TIPC user registry
118 */ 118 */
119 119
120int reg_start(void) 120int tipc_reg_start(void)
121{ 121{
122 u32 u; 122 u32 u;
123 int res; 123 int res;
@@ -127,17 +127,17 @@ int reg_start(void)
127 127
128 for (u = 1; u <= MAX_USERID; u++) { 128 for (u = 1; u <= MAX_USERID; u++) {
129 if (users[u].callback) 129 if (users[u].callback)
130 k_signal((Handler)reg_callback, 130 tipc_k_signal((Handler)reg_callback,
131 (unsigned long)&users[u]); 131 (unsigned long)&users[u]);
132 } 132 }
133 return TIPC_OK; 133 return TIPC_OK;
134} 134}
135 135
136/** 136/**
137 * reg_stop - shut down & delete TIPC user registry 137 * tipc_reg_stop - shut down & delete TIPC user registry
138 */ 138 */
139 139
140void reg_stop(void) 140void tipc_reg_stop(void)
141{ 141{
142 int id; 142 int id;
143 143
@@ -184,7 +184,7 @@ int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
184 atomic_inc(&tipc_user_count); 184 atomic_inc(&tipc_user_count);
185 185
186 if (cb && (tipc_mode != TIPC_NOT_RUNNING)) 186 if (cb && (tipc_mode != TIPC_NOT_RUNNING))
187 k_signal((Handler)reg_callback, (unsigned long)user_ptr); 187 tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
188 return TIPC_OK; 188 return TIPC_OK;
189} 189}
190 190
@@ -223,10 +223,10 @@ void tipc_detach(u32 userid)
223} 223}
224 224
225/** 225/**
226 * reg_add_port - register a user's driver port 226 * tipc_reg_add_port - register a user's driver port
227 */ 227 */
228 228
229int reg_add_port(struct user_port *up_ptr) 229int tipc_reg_add_port(struct user_port *up_ptr)
230{ 230{
231 struct tipc_user *user_ptr; 231 struct tipc_user *user_ptr;
232 232
@@ -245,10 +245,10 @@ int reg_add_port(struct user_port *up_ptr)
245} 245}
246 246
247/** 247/**
248 * reg_remove_port - deregister a user's driver port 248 * tipc_reg_remove_port - deregister a user's driver port
249 */ 249 */
250 250
251int reg_remove_port(struct user_port *up_ptr) 251int tipc_reg_remove_port(struct user_port *up_ptr)
252{ 252{
253 if (up_ptr->user_ref == 0) 253 if (up_ptr->user_ref == 0)
254 return TIPC_OK; 254 return TIPC_OK;
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
index 122ca9be367..d0e88794ed1 100644
--- a/net/tipc/user_reg.h
+++ b/net/tipc/user_reg.h
@@ -39,10 +39,10 @@
39 39
40#include "port.h" 40#include "port.h"
41 41
42int reg_start(void); 42int tipc_reg_start(void);
43void reg_stop(void); 43void tipc_reg_stop(void);
44 44
45int reg_add_port(struct user_port *up_ptr); 45int tipc_reg_add_port(struct user_port *up_ptr);
46int reg_remove_port(struct user_port *up_ptr); 46int tipc_reg_remove_port(struct user_port *up_ptr);
47 47
48#endif 48#endif
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 4eaef662d56..7c11f7f83a2 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -42,12 +42,12 @@
42#include "cluster.h" 42#include "cluster.h"
43#include "node.h" 43#include "node.h"
44 44
45struct _zone *zone_create(u32 addr) 45struct _zone *tipc_zone_create(u32 addr)
46{ 46{
47 struct _zone *z_ptr = 0; 47 struct _zone *z_ptr = 0;
48 u32 z_num; 48 u32 z_num;
49 49
50 if (!addr_domain_valid(addr)) 50 if (!tipc_addr_domain_valid(addr))
51 return 0; 51 return 0;
52 52
53 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); 53 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
@@ -55,24 +55,24 @@ struct _zone *zone_create(u32 addr)
55 memset(z_ptr, 0, sizeof(*z_ptr)); 55 memset(z_ptr, 0, sizeof(*z_ptr));
56 z_num = tipc_zone(addr); 56 z_num = tipc_zone(addr);
57 z_ptr->addr = tipc_addr(z_num, 0, 0); 57 z_ptr->addr = tipc_addr(z_num, 0, 0);
58 net.zones[z_num] = z_ptr; 58 tipc_net.zones[z_num] = z_ptr;
59 } 59 }
60 return z_ptr; 60 return z_ptr;
61} 61}
62 62
63void zone_delete(struct _zone *z_ptr) 63void tipc_zone_delete(struct _zone *z_ptr)
64{ 64{
65 u32 c_num; 65 u32 c_num;
66 66
67 if (!z_ptr) 67 if (!z_ptr)
68 return; 68 return;
69 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) { 69 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
70 cluster_delete(z_ptr->clusters[c_num]); 70 tipc_cltr_delete(z_ptr->clusters[c_num]);
71 } 71 }
72 kfree(z_ptr); 72 kfree(z_ptr);
73} 73}
74 74
75void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr) 75void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
76{ 76{
77 u32 c_num = tipc_cluster(c_ptr->addr); 77 u32 c_num = tipc_cluster(c_ptr->addr);
78 78
@@ -82,19 +82,19 @@ void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
82 z_ptr->clusters[c_num] = c_ptr; 82 z_ptr->clusters[c_num] = c_ptr;
83} 83}
84 84
85void zone_remove_as_router(struct _zone *z_ptr, u32 router) 85void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router)
86{ 86{
87 u32 c_num; 87 u32 c_num;
88 88
89 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) { 89 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
90 if (z_ptr->clusters[c_num]) { 90 if (z_ptr->clusters[c_num]) {
91 cluster_remove_as_router(z_ptr->clusters[c_num], 91 tipc_cltr_remove_as_router(z_ptr->clusters[c_num],
92 router); 92 router);
93 } 93 }
94 } 94 }
95} 95}
96 96
97void zone_send_external_routes(struct _zone *z_ptr, u32 dest) 97void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest)
98{ 98{
99 u32 c_num; 99 u32 c_num;
100 100
@@ -102,12 +102,12 @@ void zone_send_external_routes(struct _zone *z_ptr, u32 dest)
102 if (z_ptr->clusters[c_num]) { 102 if (z_ptr->clusters[c_num]) {
103 if (in_own_cluster(z_ptr->addr)) 103 if (in_own_cluster(z_ptr->addr))
104 continue; 104 continue;
105 cluster_send_ext_routes(z_ptr->clusters[c_num], dest); 105 tipc_cltr_send_ext_routes(z_ptr->clusters[c_num], dest);
106 } 106 }
107 } 107 }
108} 108}
109 109
110struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref) 110struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
111{ 111{
112 struct cluster *c_ptr; 112 struct cluster *c_ptr;
113 struct node *n_ptr; 113 struct node *n_ptr;
@@ -118,7 +118,7 @@ struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
118 c_ptr = z_ptr->clusters[tipc_cluster(addr)]; 118 c_ptr = z_ptr->clusters[tipc_cluster(addr)];
119 if (!c_ptr) 119 if (!c_ptr)
120 return 0; 120 return 0;
121 n_ptr = cluster_select_node(c_ptr, ref); 121 n_ptr = tipc_cltr_select_node(c_ptr, ref);
122 if (n_ptr) 122 if (n_ptr)
123 return n_ptr; 123 return n_ptr;
124 124
@@ -127,14 +127,14 @@ struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
127 c_ptr = z_ptr->clusters[c_num]; 127 c_ptr = z_ptr->clusters[c_num];
128 if (!c_ptr) 128 if (!c_ptr)
129 return 0; 129 return 0;
130 n_ptr = cluster_select_node(c_ptr, ref); 130 n_ptr = tipc_cltr_select_node(c_ptr, ref);
131 if (n_ptr) 131 if (n_ptr)
132 return n_ptr; 132 return n_ptr;
133 } 133 }
134 return 0; 134 return 0;
135} 135}
136 136
137u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref) 137u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
138{ 138{
139 struct cluster *c_ptr; 139 struct cluster *c_ptr;
140 u32 c_num; 140 u32 c_num;
@@ -143,14 +143,14 @@ u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
143 if (!z_ptr) 143 if (!z_ptr)
144 return 0; 144 return 0;
145 c_ptr = z_ptr->clusters[tipc_cluster(addr)]; 145 c_ptr = z_ptr->clusters[tipc_cluster(addr)];
146 router = c_ptr ? cluster_select_router(c_ptr, ref) : 0; 146 router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
147 if (router) 147 if (router)
148 return router; 148 return router;
149 149
150 /* Links to any other clusters within the zone? */ 150 /* Links to any other clusters within the zone? */
151 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) { 151 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
152 c_ptr = z_ptr->clusters[c_num]; 152 c_ptr = z_ptr->clusters[c_num];
153 router = c_ptr ? cluster_select_router(c_ptr, ref) : 0; 153 router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
154 if (router) 154 if (router)
155 return router; 155 return router;
156 } 156 }
@@ -158,12 +158,12 @@ u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
158} 158}
159 159
160 160
161u32 zone_next_node(u32 addr) 161u32 tipc_zone_next_node(u32 addr)
162{ 162{
163 struct cluster *c_ptr = cluster_find(addr); 163 struct cluster *c_ptr = tipc_cltr_find(addr);
164 164
165 if (c_ptr) 165 if (c_ptr)
166 return cluster_next_node(c_ptr, addr); 166 return tipc_cltr_next_node(c_ptr, addr);
167 return 0; 167 return 0;
168} 168}
169 169
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
index 4326f78d829..267999c5a24 100644
--- a/net/tipc/zone.h
+++ b/net/tipc/zone.h
@@ -54,18 +54,18 @@ struct _zone {
54 u32 links; 54 u32 links;
55}; 55};
56 56
57struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref); 57struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
58u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref); 58u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
59void zone_remove_as_router(struct _zone *z_ptr, u32 router); 59void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router);
60void zone_send_external_routes(struct _zone *z_ptr, u32 dest); 60void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest);
61struct _zone *zone_create(u32 addr); 61struct _zone *tipc_zone_create(u32 addr);
62void zone_delete(struct _zone *z_ptr); 62void tipc_zone_delete(struct _zone *z_ptr);
63void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr); 63void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
64u32 zone_next_node(u32 addr); 64u32 tipc_zone_next_node(u32 addr);
65 65
66static inline struct _zone *zone_find(u32 addr) 66static inline struct _zone *tipc_zone_find(u32 addr)
67{ 67{
68 return net.zones[tipc_zone(addr)]; 68 return tipc_net.zones[tipc_zone(addr)];
69} 69}
70 70
71#endif 71#endif
diff --git a/sound/oss/trident.c b/sound/oss/trident.c
index 5f0ad6bb43b..a21c663e7e1 100644
--- a/sound/oss/trident.c
+++ b/sound/oss/trident.c
@@ -278,16 +278,14 @@ static char *card_names[] = {
278}; 278};
279 279
280static struct pci_device_id trident_pci_tbl[] = { 280static struct pci_device_id trident_pci_tbl[] = {
281 {PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_DX, 281 {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_DX),
282 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TRIDENT_4D_DX}, 282 PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, TRIDENT_4D_DX},
283 {PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_NX, 283 {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_NX),
284 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TRIDENT_4D_NX}, 284 0, 0, TRIDENT_4D_NX},
285 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7018, 285 {PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7018), 0, 0, SIS_7018},
286 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_7018}, 286 {PCI_DEVICE(PCI_VENDOR_ID_ALI, PCI_DEVICE_ID_ALI_5451), 0, 0, ALI_5451},
287 {PCI_VENDOR_ID_ALI, PCI_DEVICE_ID_ALI_5451, 287 {PCI_DEVICE(PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_5050),
288 PCI_ANY_ID, PCI_ANY_ID, 0, 0, ALI_5451}, 288 0, 0, CYBER5050},
289 {PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_5050,
290 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CYBER5050},
291 {0,} 289 {0,}
292}; 290};
293 291