aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS2
-rw-r--r--Documentation/cputopology.txt41
-rw-r--r--Documentation/driver-model/overview.txt57
-rw-r--r--Documentation/filesystems/configfs/configfs_example.c2
-rw-r--r--Documentation/filesystems/ocfs2.txt1
-rw-r--r--Documentation/networking/ip-sysctl.txt17
-rw-r--r--Documentation/parport-lowlevel.txt8
-rw-r--r--Documentation/pci-error-recovery.txt472
-rw-r--r--Documentation/x86_64/boot-options.txt12
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile2
-rw-r--r--arch/arm/configs/at91rm9200dk_defconfig1
-rw-r--r--arch/arm/configs/at91rm9200ek_defconfig1
-rw-r--r--arch/arm/configs/csb337_defconfig1
-rw-r--r--arch/arm/configs/csb637_defconfig1
-rw-r--r--arch/arm/mach-pxa/pxa27x.c2
-rw-r--r--arch/arm/mach-s3c2410/Makefile5
-rw-r--r--arch/arm/mach-s3c2410/cpu.c18
-rw-r--r--arch/arm/mach-s3c2410/gpio.c72
-rw-r--r--arch/arm/mach-s3c2410/s3c2400-gpio.c45
-rw-r--r--arch/arm/mach-s3c2410/s3c2410-gpio.c93
-rw-r--r--arch/arm/mach-s3c2410/sleep.S2
-rw-r--r--arch/arm/mm/cache-v6.S18
-rw-r--r--arch/arm/mm/proc-xscale.S16
-rw-r--r--arch/arm/oprofile/common.c5
-rw-r--r--arch/i386/Kconfig1
-rw-r--r--arch/i386/kernel/acpi/boot.c13
-rw-r--r--arch/i386/kernel/apic.c5
-rw-r--r--arch/i386/kernel/cpu/amd.c8
-rw-r--r--arch/i386/kernel/cpu/centaur.c8
-rw-r--r--arch/i386/kernel/cpu/common.c11
-rw-r--r--arch/i386/kernel/cpu/cyrix.c18
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c1
-rw-r--r--arch/i386/kernel/cpu/nexgen.c8
-rw-r--r--arch/i386/kernel/cpu/rise.c8
-rw-r--r--arch/i386/kernel/cpu/transmeta.c10
-rw-r--r--arch/i386/kernel/cpu/umc.c8
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--arch/i386/kernel/process.c6
-rw-r--r--arch/i386/kernel/traps.c9
-rw-r--r--arch/i386/oprofile/backtrace.c19
-rw-r--r--arch/ia64/kernel/topology.c18
-rw-r--r--arch/parisc/Kconfig20
-rw-r--r--arch/parisc/Kconfig.debug10
-rw-r--r--arch/parisc/configs/b180_defconfig182
-rw-r--r--arch/parisc/hpux/entry_hpux.S3
-rw-r--r--arch/parisc/kernel/drivers.c3
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c3
-rw-r--r--arch/parisc/kernel/pci.c13
-rw-r--r--arch/parisc/kernel/perf.c38
-rw-r--r--arch/parisc/kernel/perf_images.h4
-rw-r--r--arch/parisc/kernel/process.c35
-rw-r--r--arch/parisc/kernel/ptrace.c8
-rw-r--r--arch/parisc/kernel/signal.c2
-rw-r--r--arch/parisc/kernel/signal32.c102
-rw-r--r--arch/parisc/kernel/signal32.h127
-rw-r--r--arch/parisc/kernel/syscall.S2
-rw-r--r--arch/parisc/kernel/syscall_table.S19
-rw-r--r--arch/parisc/kernel/traps.c3
-rw-r--r--arch/parisc/math-emu/decode_exc.c1
-rw-r--r--arch/parisc/mm/init.c35
-rw-r--r--arch/s390/kernel/compat_wrapper.S6
-rw-r--r--arch/sparc64/boot/.gitignore4
-rw-r--r--arch/sparc64/defconfig21
-rw-r--r--arch/v850/kernel/simcons.c25
-rw-r--r--arch/x86_64/Kconfig.debug7
-rw-r--r--arch/x86_64/defconfig21
-rw-r--r--arch/x86_64/kernel/apic.c89
-rw-r--r--arch/x86_64/kernel/entry.S7
-rw-r--r--arch/x86_64/kernel/io_apic.c8
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--arch/x86_64/kernel/nmi.c7
-rw-r--r--arch/x86_64/kernel/pci-dma.c3
-rw-r--r--arch/x86_64/kernel/pci-gart.c19
-rw-r--r--arch/x86_64/kernel/pci-nommu.c7
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/pmtimer.c25
-rw-r--r--arch/x86_64/kernel/process.c4
-rw-r--r--arch/x86_64/kernel/setup.c8
-rw-r--r--arch/x86_64/kernel/time.c59
-rw-r--r--arch/x86_64/kernel/traps.c21
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S10
-rw-r--r--arch/x86_64/lib/clear_page.S38
-rw-r--r--arch/x86_64/lib/copy_page.S87
-rw-r--r--arch/x86_64/lib/copy_user.S247
-rw-r--r--arch/x86_64/lib/memcpy.S93
-rw-r--r--arch/x86_64/lib/memset.S94
-rw-r--r--arch/x86_64/mm/fault.c3
-rw-r--r--arch/x86_64/mm/srat.c45
-rw-r--r--arch/x86_64/pci/mmconfig.c2
-rw-r--r--arch/xtensa/platform-iss/console.c4
-rw-r--r--block/elevator.c3
-rw-r--r--block/ll_rw_blk.c40
-rw-r--r--drivers/acpi/processor_idle.c6
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/topology.c148
-rw-r--r--drivers/block/Kconfig8
-rw-r--r--drivers/block/pktcdvd.c58
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/char/cyclades.c6
-rw-r--r--drivers/char/drm/ati_pcigart.c17
-rw-r--r--drivers/char/drm/drmP.h5
-rw-r--r--drivers/char/drm/drm_auth.c20
-rw-r--r--drivers/char/drm/drm_bufs.c80
-rw-r--r--drivers/char/drm/drm_context.c52
-rw-r--r--drivers/char/drm/drm_drv.c4
-rw-r--r--drivers/char/drm/drm_fops.c12
-rw-r--r--drivers/char/drm/drm_ioctl.c18
-rw-r--r--drivers/char/drm/drm_irq.c16
-rw-r--r--drivers/char/drm/drm_pciids.h2
-rw-r--r--drivers/char/drm/drm_proc.c28
-rw-r--r--drivers/char/drm/drm_stub.c4
-rw-r--r--drivers/char/drm/drm_vm.c12
-rw-r--r--drivers/char/drm/i810_dma.c2
-rw-r--r--drivers/char/drm/i810_drv.h2
-rw-r--r--drivers/char/drm/i830_dma.c2
-rw-r--r--drivers/char/drm/i830_drv.h3
-rw-r--r--drivers/char/drm/i915_dma.c42
-rw-r--r--drivers/char/drm/i915_drm.h33
-rw-r--r--drivers/char/drm/i915_drv.h6
-rw-r--r--drivers/char/drm/i915_mem.c31
-rw-r--r--drivers/char/drm/radeon_cp.c2
-rw-r--r--drivers/char/drm/savage_bci.c4
-rw-r--r--drivers/char/drm/savage_drv.h1
-rw-r--r--drivers/char/drm/via_dma.c10
-rw-r--r--drivers/char/drm/via_dmablit.c6
-rw-r--r--drivers/char/drm/via_drv.h7
-rw-r--r--drivers/char/drm/via_irq.c2
-rw-r--r--drivers/char/esp.c4
-rw-r--r--drivers/char/ip2/i2cmd.c1
-rw-r--r--drivers/char/ip2main.c67
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c24
-rw-r--r--drivers/char/rio/cirrus.h142
-rw-r--r--drivers/char/rio/defaults.h7
-rw-r--r--drivers/char/rio/link.h34
-rw-r--r--drivers/char/rio/list.h140
-rw-r--r--drivers/char/rio/parmmap.h5
-rw-r--r--drivers/char/rio/phb.h133
-rw-r--r--drivers/char/rio/pkt.h27
-rw-r--r--drivers/char/rio/qbuf.h4
-rw-r--r--drivers/char/rio/riotypes.h66
-rw-r--r--drivers/char/rio/rup.h5
-rw-r--r--drivers/char/rio/sam.h4
-rw-r--r--drivers/char/rocket.c2
-rw-r--r--drivers/char/sx.c6
-rw-r--r--drivers/char/tty_io.c77
-rw-r--r--drivers/char/watchdog/sbc_epx_c3.c13
-rw-r--r--drivers/edac/Kconfig3
-rw-r--r--drivers/edac/e752x_edac.c8
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/ide/Kconfig25
-rw-r--r--drivers/ide/ide-disk.c8
-rw-r--r--drivers/ide/ide-io.c5
-rw-r--r--drivers/ide/ide-iops.c1
-rw-r--r--drivers/ide/ide-probe.c51
-rw-r--r--drivers/ide/ide.c1
-rw-r--r--drivers/ide/pci/aec62xx.c15
-rw-r--r--drivers/ide/pci/hpt366.c4
-rw-r--r--drivers/ide/pci/it821x.c2
-rw-r--r--drivers/ide/pci/pdc202xx_new.c6
-rw-r--r--drivers/ide/pci/pdc202xx_old.c15
-rw-r--r--drivers/ide/pci/piix.c4
-rw-r--r--drivers/isdn/hisax/hisax.h2
-rw-r--r--drivers/isdn/sc/ioctl.c4
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/md.c48
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/md/raid5.c3
-rw-r--r--drivers/md/raid6main.c152
-rw-r--r--drivers/message/i2o/core.h3
-rw-r--r--drivers/message/i2o/i2o_scsi.c2
-rw-r--r--drivers/message/i2o/pci.c47
-rw-r--r--drivers/mmc/au1xmmc.c59
-rw-r--r--drivers/mmc/mmc.c28
-rw-r--r--drivers/mmc/mmc_block.c8
-rw-r--r--drivers/mmc/mmci.c11
-rw-r--r--drivers/mmc/pxamci.c9
-rw-r--r--drivers/mmc/wbsd.c8
-rw-r--r--drivers/mtd/maps/dc21285.c9
-rw-r--r--drivers/net/3c59x.c33
-rw-r--r--drivers/net/ppp_generic.c3
-rw-r--r--drivers/net/tg3.c30
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/parisc/ccio-dma.c7
-rw-r--r--drivers/parisc/dino.c4
-rw-r--r--drivers/parisc/hppb.c3
-rw-r--r--drivers/parisc/iosapic.c8
-rw-r--r--drivers/parisc/lasi.c5
-rw-r--r--drivers/parisc/lba_pci.c6
-rw-r--r--drivers/parisc/pdc_stable.c356
-rw-r--r--drivers/parisc/sba_iommu.c3
-rw-r--r--drivers/parisc/superio.c41
-rw-r--r--drivers/parisc/wax.c2
-rw-r--r--drivers/parport/Kconfig9
-rw-r--r--drivers/parport/Makefile1
-rw-r--r--drivers/parport/ieee1284.c10
-rw-r--r--drivers/parport/parport_gsc.c2
-rw-r--r--drivers/parport/parport_ip32.c2253
-rw-r--r--drivers/parport/parport_serial.c4
-rw-r--r--drivers/parport/probe.c4
-rw-r--r--drivers/s390/block/Kconfig14
-rw-r--r--drivers/s390/block/Makefile2
-rw-r--r--drivers/s390/block/dasd.c76
-rw-r--r--drivers/s390/block/dasd_3990_erp.c3
-rw-r--r--drivers/s390/block/dasd_eckd.h1
-rw-r--r--drivers/s390/block/dasd_eer.c1090
-rw-r--r--drivers/s390/block/dasd_int.h37
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/serial/8250_pci.c25
-rw-r--r--drivers/serial/Kconfig29
-rw-r--r--drivers/serial/jsm/jsm.h1
-rw-r--r--drivers/serial/jsm/jsm_driver.c3
-rw-r--r--drivers/serial/jsm/jsm_tty.c209
-rw-r--r--drivers/serial/mcfserial.c3
-rw-r--r--drivers/serial/serial_core.c2
-rw-r--r--drivers/telephony/ixj.c14
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/video/console/sticore.c45
-rw-r--r--fs/9p/conv.c28
-rw-r--r--fs/9p/mux.c15
-rw-r--r--fs/9p/vfs_inode.c6
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/configfs/configfs_internal.h11
-rw-r--r--fs/configfs/dir.c36
-rw-r--r--fs/configfs/file.c19
-rw-r--r--fs/configfs/inode.c120
-rw-r--r--fs/configfs/mount.c28
-rw-r--r--fs/configfs/symlink.c4
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/direct-io.c9
-rw-r--r--fs/ext2/acl.c2
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/super.c5
-rw-r--r--fs/ext3/acl.c2
-rw-r--r--fs/fat/file.c50
-rw-r--r--fs/fat/misc.c14
-rw-r--r--fs/fcntl.c7
-rw-r--r--fs/file.c3
-rw-r--r--fs/fuse/dev.c40
-rw-r--r--fs/jbd/transaction.c10
-rw-r--r--fs/jffs/intrep.c2
-rw-r--r--fs/libfs.c1
-rw-r--r--fs/lockd/clntproc.c11
-rw-r--r--fs/namei.c37
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/ocfs2/buffer_head_io.c10
-rw-r--r--fs/ocfs2/cluster/heartbeat.c5
-rw-r--r--fs/ocfs2/cluster/tcp.c16
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h1
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c18
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c24
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c250
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c13
-rw-r--r--fs/ocfs2/dlm/userdlm.c2
-rw-r--r--fs/ocfs2/extent_map.c12
-rw-r--r--fs/ocfs2/file.c10
-rw-r--r--fs/ocfs2/inode.c6
-rw-r--r--fs/ocfs2/inode.h4
-rw-r--r--fs/ocfs2/journal.c32
-rw-r--r--fs/ocfs2/ocfs2.h3
-rw-r--r--fs/ocfs2/super.c11
-rw-r--r--fs/ocfs2/sysfile.c6
-rw-r--r--fs/ocfs2/uptodate.c12
-rw-r--r--fs/ocfs2/uptodate.h2
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--fs/quota_v2.c2
-rw-r--r--fs/reiserfs/super.c2
-rw-r--r--fs/udf/balloc.c7
-rw-r--r--fs/udf/namei.c4
-rw-r--r--fs/ufs/inode.c2
-rw-r--r--fs/ufs/super.c10
-rw-r--r--fs/ufs/truncate.c72
-rw-r--r--include/asm-arm/arch-s3c2410/hardware.h7
-rw-r--r--include/asm-arm/arch-s3c2410/regs-gpio.h22
-rw-r--r--include/asm-arm/checksum.h2
-rw-r--r--include/asm-cris/bitops.h2
-rw-r--r--include/asm-frv/bitops.h2
-rw-r--r--include/asm-h8300/bitops.h2
-rw-r--r--include/asm-i386/system.h2
-rw-r--r--include/asm-i386/topology.h9
-rw-r--r--include/asm-ia64/ide.h8
-rw-r--r--include/asm-ia64/topology.h7
-rw-r--r--include/asm-parisc/atomic.h84
-rw-r--r--include/asm-parisc/cacheflush.h6
-rw-r--r--include/asm-parisc/compat_ucontext.h3
-rw-r--r--include/asm-parisc/grfioctl.h2
-rw-r--r--include/asm-parisc/pci.h17
-rw-r--r--include/asm-parisc/pgalloc.h1
-rw-r--r--include/asm-parisc/pgtable.h2
-rw-r--r--include/asm-parisc/rt_sigframe.h4
-rw-r--r--include/asm-parisc/unistd.h21
-rw-r--r--include/asm-s390/dasd.h13
-rw-r--r--include/asm-s390/io.h6
-rw-r--r--include/asm-s390/timer.h8
-rw-r--r--include/asm-v850/bitops.h2
-rw-r--r--include/asm-x86_64/apic.h1
-rw-r--r--include/asm-x86_64/cpufeature.h2
-rw-r--r--include/asm-x86_64/hardirq.h21
-rw-r--r--include/asm-x86_64/kexec.h3
-rw-r--r--include/asm-x86_64/proto.h11
-rw-r--r--include/asm-x86_64/system.h2
-rw-r--r--include/asm-x86_64/topology.h9
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/configfs.h2
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/elfcore.h1
-rw-r--r--include/linux/i2o.h6
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/jbd.h4
-rw-r--r--include/linux/kbd_kern.h5
-rw-r--r--include/linux/list.h14
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/mmc/mmc.h35
-rw-r--r--include/linux/mmc/protocol.h2
-rw-r--r--include/linux/netfilter_ipv4/ipt_connbytes.h4
-rw-r--r--include/linux/netfilter_ipv4/ipt_policy.h22
-rw-r--r--include/linux/netfilter_ipv6/ip6t_policy.h22
-rw-r--r--include/linux/parport.h6
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/pktcdvd.h8
-rw-r--r--include/linux/quotaops.h1
-rw-r--r--include/linux/rcupdate.h5
-rw-r--r--include/linux/reiserfs_acl.h6
-rw-r--r--include/linux/security.h4
-rw-r--r--include/linux/sunrpc/auth.h10
-rw-r--r--include/linux/suspend.h4
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_flip.h7
-rw-r--r--include/linux/ufs_fs.h14
-rw-r--r--include/linux/ufs_fs_sb.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h15
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/sock.h8
-rw-r--r--init/Kconfig11
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/intermodule.c3
-rw-r--r--kernel/kprobes.c36
-rw-r--r--kernel/module.c3
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/time.c13
-rw-r--r--lib/int_sqrt.c2
-rw-r--r--lib/ts_bm.c40
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/slab.c176
-rw-r--r--net/802/psnap.c2
-rw-r--r--net/Kconfig7
-rw-r--r--net/bridge/netfilter/ebt_ulog.c10
-rw-r--r--net/bridge/netfilter/ebtables.c7
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/utils.c4
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/multipath_wrandom.c8
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_tftp.c1
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c7
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c26
-rw-r--r--net/ipv4/netfilter/ipt_policy.c11
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/netfilter/ip6_tables.c7
-rw-r--r--net/ipv6/netfilter/ip6t_policy.c7
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nfnetlink_log.c20
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c12
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth.c25
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c40
-rw-r--r--net/sunrpc/auth_unix.c6
-rw-r--r--net/sunrpc/rpc_pipe.c102
-rw-r--r--scripts/kconfig/Makefile12
-rw-r--r--security/keys/keyctl.c15
-rw-r--r--security/selinux/Kconfig2
-rw-r--r--security/selinux/Makefile4
-rw-r--r--security/selinux/hooks.c21
-rw-r--r--sound/arm/aaci.c14
390 files changed, 8381 insertions, 2867 deletions
diff --git a/CREDITS b/CREDITS
index 8e577ce4abeb..6957ef4efab3 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3101,7 +3101,7 @@ S: Minto, NSW, 2566
3101S: Australia 3101S: Australia
3102 3102
3103N: Stephen Smalley 3103N: Stephen Smalley
3104E: sds@epoch.ncsc.mil 3104E: sds@tycho.nsa.gov
3105D: portions of the Linux Security Module (LSM) framework and security modules 3105D: portions of the Linux Security Module (LSM) framework and security modules
3106 3106
3107N: Chris Smith 3107N: Chris Smith
diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt
new file mode 100644
index 000000000000..ff280e2e1613
--- /dev/null
+++ b/Documentation/cputopology.txt
@@ -0,0 +1,41 @@
1
2Export cpu topology info by sysfs. Items (attributes) are similar
3to /proc/cpuinfo.
4
51) /sys/devices/system/cpu/cpuX/topology/physical_package_id:
6represent the physical package id of cpu X;
72) /sys/devices/system/cpu/cpuX/topology/core_id:
8represent the cpu core id to cpu X;
93) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
10represent the thread siblings to cpu X in the same core;
114) /sys/devices/system/cpu/cpuX/topology/core_siblings:
12represent the thread siblings to cpu X in the same physical package;
13
14To implement it in an architecture-neutral way, a new source file,
15driver/base/topology.c, is to export the 5 attributes.
16
17If one architecture wants to support this feature, it just needs to
18implement 4 defines, typically in file include/asm-XXX/topology.h.
19The 4 defines are:
20#define topology_physical_package_id(cpu)
21#define topology_core_id(cpu)
22#define topology_thread_siblings(cpu)
23#define topology_core_siblings(cpu)
24
25The type of **_id is int.
26The type of siblings is cpumask_t.
27
28To be consistent on all architectures, the 4 attributes should have
29deafult values if their values are unavailable. Below is the rule.
301) physical_package_id: If cpu has no physical package id, -1 is the
31default value.
322) core_id: If cpu doesn't support multi-core, its core id is 0.
333) thread_siblings: Just include itself, if the cpu doesn't support
34HT/multi-thread.
354) core_siblings: Just include itself, if the cpu doesn't support
36multi-core and HT/Multi-thread.
37
38So be careful when declaring the 4 defines in include/asm-XXX/topology.h.
39
40If an attribute isn't defined on an architecture, it won't be exported.
41
diff --git a/Documentation/driver-model/overview.txt b/Documentation/driver-model/overview.txt
index 44662735cf81..ac4a7a737e43 100644
--- a/Documentation/driver-model/overview.txt
+++ b/Documentation/driver-model/overview.txt
@@ -1,50 +1,43 @@
1The Linux Kernel Device Model 1The Linux Kernel Device Model
2 2
3Patrick Mochel <mochel@osdl.org> 3Patrick Mochel <mochel@digitalimplant.org>
4 4
526 August 2002 5Drafted 26 August 2002
6Updated 31 January 2006
6 7
7 8
8Overview 9Overview
9~~~~~~~~ 10~~~~~~~~
10 11
11This driver model is a unification of all the current, disparate driver models 12The Linux Kernel Driver Model is a unification of all the disparate driver
12that are currently in the kernel. It is intended to augment the 13models that were previously used in the kernel. It is intended to augment the
13bus-specific drivers for bridges and devices by consolidating a set of data 14bus-specific drivers for bridges and devices by consolidating a set of data
14and operations into globally accessible data structures. 15and operations into globally accessible data structures.
15 16
16Current driver models implement some sort of tree-like structure (sometimes 17Traditional driver models implemented some sort of tree-like structure
17just a list) for the devices they control. But, there is no linkage between 18(sometimes just a list) for the devices they control. There wasn't any
18the different bus types. 19uniformity across the different bus types.
19 20
20A common data structure can provide this linkage with little overhead: when a 21The current driver model provides a comon, uniform data model for describing
21bus driver discovers a particular device, it can insert it into the global 22a bus and the devices that can appear under the bus. The unified bus
22tree as well as its local tree. In fact, the local tree becomes just a subset 23model includes a set of common attributes which all busses carry, and a set
23of the global tree. 24of common callbacks, such as device discovery during bus probing, bus
24 25shutdown, bus power management, etc.
25Common data fields can also be moved out of the local bus models into the
26global model. Some of the manipulations of these fields can also be
27consolidated. Most likely, manipulation functions will become a set
28of helper functions, which the bus drivers wrap around to include any
29bus-specific items.
30
31The common device and bridge interface currently reflects the goals of the
32modern PC: namely the ability to do seamless Plug and Play, power management,
33and hot plug. (The model dictated by Intel and Microsoft (read: ACPI) ensures
34us that any device in the system may fit any of these criteria.)
35
36In reality, not every bus will be able to support such operations. But, most
37buses will support a majority of those operations, and all future buses will.
38In other words, a bus that doesn't support an operation is the exception,
39instead of the other way around.
40 26
27The common device and bridge interface reflects the goals of the modern
28computer: namely the ability to do seamless device "plug and play", power
29management, and hot plug. In particular, the model dictated by Intel and
30Microsoft (namely ACPI) ensures that almost every device on almost any bus
31on an x86-compatible system can work within this paradigm. Of course,
32not every bus is able to support all such operations, although most
33buses support a most of those operations.
41 34
42 35
43Downstream Access 36Downstream Access
44~~~~~~~~~~~~~~~~~ 37~~~~~~~~~~~~~~~~~
45 38
46Common data fields have been moved out of individual bus layers into a common 39Common data fields have been moved out of individual bus layers into a common
47data structure. But, these fields must still be accessed by the bus layers, 40data structure. These fields must still be accessed by the bus layers,
48and sometimes by the device-specific drivers. 41and sometimes by the device-specific drivers.
49 42
50Other bus layers are encouraged to do what has been done for the PCI layer. 43Other bus layers are encouraged to do what has been done for the PCI layer.
@@ -53,7 +46,7 @@ struct pci_dev now looks like this:
53struct pci_dev { 46struct pci_dev {
54 ... 47 ...
55 48
56 struct device device; 49 struct device dev;
57}; 50};
58 51
59Note first that it is statically allocated. This means only one allocation on 52Note first that it is statically allocated. This means only one allocation on
@@ -64,9 +57,9 @@ the two.
64 57
65The PCI bus layer freely accesses the fields of struct device. It knows about 58The PCI bus layer freely accesses the fields of struct device. It knows about
66the structure of struct pci_dev, and it should know the structure of struct 59the structure of struct pci_dev, and it should know the structure of struct
67device. PCI devices that have been converted generally do not touch the fields 60device. Individual PCI device drivers that have been converted the the current
68of struct device. More precisely, device-specific drivers should not touch 61driver model generally do not and should not touch the fields of struct device,
69fields of struct device unless there is a strong compelling reason to do so. 62unless there is a strong compelling reason to do so.
70 63
71This abstraction is prevention of unnecessary pain during transitional phases. 64This abstraction is prevention of unnecessary pain during transitional phases.
72If the name of the field changes or is removed, then every downstream driver 65If the name of the field changes or is removed, then every downstream driver
diff --git a/Documentation/filesystems/configfs/configfs_example.c b/Documentation/filesystems/configfs/configfs_example.c
index f3c6e4946f98..3d4713a6c207 100644
--- a/Documentation/filesystems/configfs/configfs_example.c
+++ b/Documentation/filesystems/configfs/configfs_example.c
@@ -320,6 +320,7 @@ static struct config_item_type simple_children_type = {
320 .ct_item_ops = &simple_children_item_ops, 320 .ct_item_ops = &simple_children_item_ops,
321 .ct_group_ops = &simple_children_group_ops, 321 .ct_group_ops = &simple_children_group_ops,
322 .ct_attrs = simple_children_attrs, 322 .ct_attrs = simple_children_attrs,
323 .ct_owner = THIS_MODULE,
323}; 324};
324 325
325static struct configfs_subsystem simple_children_subsys = { 326static struct configfs_subsystem simple_children_subsys = {
@@ -403,6 +404,7 @@ static struct config_item_type group_children_type = {
403 .ct_item_ops = &group_children_item_ops, 404 .ct_item_ops = &group_children_item_ops,
404 .ct_group_ops = &group_children_group_ops, 405 .ct_group_ops = &group_children_group_ops,
405 .ct_attrs = group_children_attrs, 406 .ct_attrs = group_children_attrs,
407 .ct_owner = THIS_MODULE,
406}; 408};
407 409
408static struct configfs_subsystem group_children_subsys = { 410static struct configfs_subsystem group_children_subsys = {
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index f2595caf052e..4389c684a80a 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -35,6 +35,7 @@ Features which OCFS2 does not support yet:
35 be cluster coherent. 35 be cluster coherent.
36 - quotas 36 - quotas
37 - cluster aware flock 37 - cluster aware flock
38 - cluster aware lockf
38 - Directory change notification (F_NOTIFY) 39 - Directory change notification (F_NOTIFY)
39 - Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease) 40 - Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease)
40 - POSIX ACLs 41 - POSIX ACLs
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 2b7cf19a06ad..26364d06ae92 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -427,6 +427,23 @@ icmp_ignore_bogus_error_responses - BOOLEAN
427 will avoid log file clutter. 427 will avoid log file clutter.
428 Default: FALSE 428 Default: FALSE
429 429
430icmp_errors_use_inbound_ifaddr - BOOLEAN
431
432 If zero, icmp error messages are sent with the primary address of
433 the exiting interface.
434
435 If non-zero, the message will be sent with the primary address of
436 the interface that received the packet that caused the icmp error.
437 This is the behaviour network many administrators will expect from
438 a router. And it can make debugging complicated network layouts
439 much easier.
440
441 Note that if no primary address exists for the interface selected,
442 then the primary address of the first non-loopback interface that
443 has one will be used regarldess of this setting.
444
445 Default: 0
446
430igmp_max_memberships - INTEGER 447igmp_max_memberships - INTEGER
431 Change the maximum number of multicast groups we can subscribe to. 448 Change the maximum number of multicast groups we can subscribe to.
432 Default: 20 449 Default: 20
diff --git a/Documentation/parport-lowlevel.txt b/Documentation/parport-lowlevel.txt
index 1d40008a1926..8f2302415eff 100644
--- a/Documentation/parport-lowlevel.txt
+++ b/Documentation/parport-lowlevel.txt
@@ -1068,7 +1068,7 @@ SYNOPSIS
1068 1068
1069struct parport_operations { 1069struct parport_operations {
1070 ... 1070 ...
1071 void (*write_status) (struct parport *port, unsigned char s); 1071 void (*write_control) (struct parport *port, unsigned char s);
1072 ... 1072 ...
1073}; 1073};
1074 1074
@@ -1097,9 +1097,9 @@ SYNOPSIS
1097 1097
1098struct parport_operations { 1098struct parport_operations {
1099 ... 1099 ...
1100 void (*frob_control) (struct parport *port, 1100 unsigned char (*frob_control) (struct parport *port,
1101 unsigned char mask, 1101 unsigned char mask,
1102 unsigned char val); 1102 unsigned char val);
1103 ... 1103 ...
1104}; 1104};
1105 1105
diff --git a/Documentation/pci-error-recovery.txt b/Documentation/pci-error-recovery.txt
index d089967e4948..634d3e5b5756 100644
--- a/Documentation/pci-error-recovery.txt
+++ b/Documentation/pci-error-recovery.txt
@@ -1,246 +1,396 @@
1 1
2 PCI Error Recovery 2 PCI Error Recovery
3 ------------------ 3 ------------------
4 May 31, 2005 4 February 2, 2006
5 5
6 Current document maintainer: 6 Current document maintainer:
7 Linas Vepstas <linas@austin.ibm.com> 7 Linas Vepstas <linas@austin.ibm.com>
8 8
9 9
10Some PCI bus controllers are able to detect certain "hard" PCI errors 10Many PCI bus controllers are able to detect a variety of hardware
11on the bus, such as parity errors on the data and address busses, as 11PCI errors on the bus, such as parity errors on the data and address
12well as SERR and PERR errors. These chipsets are then able to disable 12busses, as well as SERR and PERR errors. Some of the more advanced
13I/O to/from the affected device, so that, for example, a bad DMA 13chipsets are able to deal with these errors; these include PCI-E chipsets,
14address doesn't end up corrupting system memory. These same chipsets 14and the PCI-host bridges found on IBM Power4 and Power5-based pSeries
15are also able to reset the affected PCI device, and return it to 15boxes. A typical action taken is to disconnect the affected device,
16working condition. This document describes a generic API form 16halting all I/O to it. The goal of a disconnection is to avoid system
17performing error recovery. 17corruption; for example, to halt system memory corruption due to DMA's
18 18to "wild" addresses. Typically, a reconnection mechanism is also
19The core idea is that after a PCI error has been detected, there must 19offered, so that the affected PCI device(s) are reset and put back
20be a way for the kernel to coordinate with all affected device drivers 20into working condition. The reset phase requires coordination
21so that the pci card can be made operational again, possibly after 21between the affected device drivers and the PCI controller chip.
22performing a full electrical #RST of the PCI card. The API below 22This document describes a generic API for notifying device drivers
23provides a generic API for device drivers to be notified of PCI 23of a bus disconnection, and then performing error recovery.
24errors, and to be notified of, and respond to, a reset sequence. 24This API is currently implemented in the 2.6.16 and later kernels.
25 25
26Preliminary sketch of API, cut-n-pasted-n-modified email from 26Reporting and recovery is performed in several steps. First, when
27Ben Herrenschmidt, circa 5 april 2005 27a PCI hardware error has resulted in a bus disconnect, that event
28is reported as soon as possible to all affected device drivers,
29including multiple instances of a device driver on multi-function
30cards. This allows device drivers to avoid deadlocking in spinloops,
31waiting for some i/o-space register to change, when it never will.
32It also gives the drivers a chance to defer incoming I/O as
33needed.
34
35Next, recovery is performed in several stages. Most of the complexity
36is forced by the need to handle multi-function devices, that is,
37devices that have multiple device drivers associated with them.
38In the first stage, each driver is allowed to indicate what type
39of reset it desires, the choices being a simple re-enabling of I/O
40or requesting a hard reset (a full electrical #RST of the PCI card).
41If any driver requests a full reset, that is what will be done.
42
43After a full reset and/or a re-enabling of I/O, all drivers are
44again notified, so that they may then perform any device setup/config
45that may be required. After these have all completed, a final
46"resume normal operations" event is sent out.
47
48The biggest reason for choosing a kernel-based implementation rather
49than a user-space implementation was the need to deal with bus
50disconnects of PCI devices attached to storage media, and, in particular,
51disconnects from devices holding the root file system. If the root
52file system is disconnected, a user-space mechanism would have to go
53through a large number of contortions to complete recovery. Almost all
54of the current Linux file systems are not tolerant of disconnection
55from/reconnection to their underlying block device. By contrast,
56bus errors are easy to manage in the device driver. Indeed, most
57device drivers already handle very similar recovery procedures;
58for example, the SCSI-generic layer already provides significant
59mechanisms for dealing with SCSI bus errors and SCSI bus resets.
60
61
62Detailed Design
63---------------
64Design and implementation details below, based on a chain of
65public email discussions with Ben Herrenschmidt, circa 5 April 2005.
28 66
29The error recovery API support is exposed to the driver in the form of 67The error recovery API support is exposed to the driver in the form of
30a structure of function pointers pointed to by a new field in struct 68a structure of function pointers pointed to by a new field in struct
31pci_driver. The absence of this pointer in pci_driver denotes an 69pci_driver. A driver that fails to provide the structure is "non-aware",
32"non-aware" driver, behaviour on these is platform dependant. 70and the actual recovery steps taken are platform dependent. The
33Platforms like ppc64 can try to simulate pci hotplug remove/add. 71arch/powerpc implementation will simulate a PCI hotplug remove/add.
34
35The definition of "pci_error_token" is not covered here. It is based on
36Seto's work on the synchronous error detection. We still need to define
37functions for extracting infos out of an opaque error token. This is
38separate from this API.
39 72
40This structure has the form: 73This structure has the form:
41
42struct pci_error_handlers 74struct pci_error_handlers
43{ 75{
44 int (*error_detected)(struct pci_dev *dev, pci_error_token error); 76 int (*error_detected)(struct pci_dev *dev, enum pci_channel_state);
45 int (*mmio_enabled)(struct pci_dev *dev); 77 int (*mmio_enabled)(struct pci_dev *dev);
46 int (*resume)(struct pci_dev *dev);
47 int (*link_reset)(struct pci_dev *dev); 78 int (*link_reset)(struct pci_dev *dev);
48 int (*slot_reset)(struct pci_dev *dev); 79 int (*slot_reset)(struct pci_dev *dev);
80 void (*resume)(struct pci_dev *dev);
49}; 81};
50 82
51A driver doesn't have to implement all of these callbacks. The 83The possible channel states are:
52only mandatory one is error_detected(). If a callback is not 84enum pci_channel_state {
53implemented, the corresponding feature is considered unsupported. 85 pci_channel_io_normal, /* I/O channel is in normal state */
54For example, if mmio_enabled() and resume() aren't there, then the 86 pci_channel_io_frozen, /* I/O to channel is blocked */
55driver is assumed as not doing any direct recovery and requires 87 pci_channel_io_perm_failure, /* PCI card is dead */
88};
89
90Possible return values are:
91enum pci_ers_result {
92 PCI_ERS_RESULT_NONE, /* no result/none/not supported in device driver */
93 PCI_ERS_RESULT_CAN_RECOVER, /* Device driver can recover without slot reset */
94 PCI_ERS_RESULT_NEED_RESET, /* Device driver wants slot to be reset. */
95 PCI_ERS_RESULT_DISCONNECT, /* Device has completely failed, is unrecoverable */
96 PCI_ERS_RESULT_RECOVERED, /* Device driver is fully recovered and operational */
97};
98
99A driver does not have to implement all of these callbacks; however,
100if it implements any, it must implement error_detected(). If a callback
101is not implemented, the corresponding feature is considered unsupported.
102For example, if mmio_enabled() and resume() aren't there, then it
103is assumed that the driver is not doing any direct recovery and requires
56a reset. If link_reset() is not implemented, the card is assumed as 104a reset. If link_reset() is not implemented, the card is assumed as
57not caring about link resets, in which case, if recover is supported, 105not care about link resets. Typically a driver will want to know about
58the core can try recover (but not slot_reset() unless it really did 106a slot_reset().
59reset the slot). If slot_reset() is not supported, link_reset() can 107
60be called instead on a slot reset. 108The actual steps taken by a platform to recover from a PCI error
61 109event will be platform-dependent, but will follow the general
62At first, the call will always be : 110sequence described below.
63 111
64 1) error_detected() 112STEP 0: Error Event
65 113-------------------
66 Error detected. This is sent once after an error has been detected. At 114PCI bus error is detect by the PCI hardware. On powerpc, the slot
67this point, the device might not be accessible anymore depending on the 115is isolated, in that all I/O is blocked: all reads return 0xffffffff,
68platform (the slot will be isolated on ppc64). The driver may already 116all writes are ignored.
69have "noticed" the error because of a failing IO, but this is the proper 117
70"synchronisation point", that is, it gives a chance to the driver to 118
71cleanup, waiting for pending stuff (timers, whatever, etc...) to 119STEP 1: Notification
72complete; it can take semaphores, schedule, etc... everything but touch 120--------------------
73the device. Within this function and after it returns, the driver 121Platform calls the error_detected() callback on every instance of
122every driver affected by the error.
123
124At this point, the device might not be accessible anymore, depending on
125the platform (the slot will be isolated on powerpc). The driver may
126already have "noticed" the error because of a failing I/O, but this
127is the proper "synchronization point", that is, it gives the driver
128a chance to cleanup, waiting for pending stuff (timers, whatever, etc...)
129to complete; it can take semaphores, schedule, etc... everything but
130touch the device. Within this function and after it returns, the driver
74shouldn't do any new IOs. Called in task context. This is sort of a 131shouldn't do any new IOs. Called in task context. This is sort of a
75"quiesce" point. See note about interrupts at the end of this doc. 132"quiesce" point. See note about interrupts at the end of this doc.
76 133
77 Result codes: 134All drivers participating in this system must implement this call.
78 - PCIERR_RESULT_CAN_RECOVER: 135The driver must return one of the following result codes:
79 Driever returns this if it thinks it might be able to recover 136 - PCI_ERS_RESULT_CAN_RECOVER:
137 Driver returns this if it thinks it might be able to recover
80 the HW by just banging IOs or if it wants to be given 138 the HW by just banging IOs or if it wants to be given
81 a chance to extract some diagnostic informations (see 139 a chance to extract some diagnostic information (see
82 below). 140 mmio_enable, below).
83 - PCIERR_RESULT_NEED_RESET: 141 - PCI_ERS_RESULT_NEED_RESET:
84 Driver returns this if it thinks it can't recover unless the 142 Driver returns this if it can't recover without a hard
85 slot is reset. 143 slot reset.
86 - PCIERR_RESULT_DISCONNECT: 144 - PCI_ERS_RESULT_DISCONNECT:
87 Return this if driver thinks it won't recover at all, 145 Driver returns this if it doesn't want to recover at all.
88 (this will detach the driver ? or just leave it 146
89 dangling ? to be decided) 147The next step taken will depend on the result codes returned by the
90 148drivers.
91So at this point, we have called error_detected() for all drivers 149
92on the segment that had the error. On ppc64, the slot is isolated. What 150If all drivers on the segment/slot return PCI_ERS_RESULT_CAN_RECOVER,
93happens now typically depends on the result from the drivers. If all 151then the platform should re-enable IOs on the slot (or do nothing in
94drivers on the segment/slot return PCIERR_RESULT_CAN_RECOVER, we would 152particular, if the platform doesn't isolate slots), and recovery
95re-enable IOs on the slot (or do nothing special if the platform doesn't 153proceeds to STEP 2 (MMIO Enable).
96isolate slots) and call 2). If not and we can reset slots, we go to 4), 154
97if neither, we have a dead slot. If it's an hotplug slot, we might 155If any driver requested a slot reset (by returning PCI_ERS_RESULT_NEED_RESET),
98"simulate" reset by triggering HW unplug/replug though. 156then recovery proceeds to STEP 4 (Slot Reset).
99 157
100>>> Current ppc64 implementation assumes that a device driver will 158If the platform is unable to recover the slot, the next step
101>>> *not* schedule or semaphore in this routine; the current ppc64 159is STEP 6 (Permanent Failure).
160
161>>> The current powerpc implementation assumes that a device driver will
162>>> *not* schedule or semaphore in this routine; the current powerpc
102>>> implementation uses one kernel thread to notify all devices; 163>>> implementation uses one kernel thread to notify all devices;
103>>> thus, of one device sleeps/schedules, all devices are affected. 164>>> thus, if one device sleeps/schedules, all devices are affected.
104>>> Doing better requires complex multi-threaded logic in the error 165>>> Doing better requires complex multi-threaded logic in the error
105>>> recovery implementation (e.g. waiting for all notification threads 166>>> recovery implementation (e.g. waiting for all notification threads
106>>> to "join" before proceeding with recovery.) This seems excessively 167>>> to "join" before proceeding with recovery.) This seems excessively
107>>> complex and not worth implementing. 168>>> complex and not worth implementing.
108 169
109>>> The current ppc64 implementation doesn't much care if the device 170>>> The current powerpc implementation doesn't much care if the device
110>>> attempts i/o at this point, or not. I/O's will fail, returning 171>>> attempts I/O at this point, or not. I/O's will fail, returning
111>>> a value of 0xff on read, and writes will be dropped. If the device 172>>> a value of 0xff on read, and writes will be dropped. If the device
112>>> driver attempts more than 10K I/O's to a frozen adapter, it will 173>>> driver attempts more than 10K I/O's to a frozen adapter, it will
113>>> assume that the device driver has gone into an infinite loop, and 174>>> assume that the device driver has gone into an infinite loop, and
114>>> it will panic the the kernel. 175>>> it will panic the the kernel. There doesn't seem to be any other
176>>> way of stopping a device driver that insists on spinning on I/O.
115 177
116 2) mmio_enabled() 178STEP 2: MMIO Enabled
179-------------------
180The platform re-enables MMIO to the device (but typically not the
181DMA), and then calls the mmio_enabled() callback on all affected
182device drivers.
117 183
118 This is the "early recovery" call. IOs are allowed again, but DMA is 184This is the "early recovery" call. IOs are allowed again, but DMA is
119not (hrm... to be discussed, I prefer not), with some restrictions. This 185not (hrm... to be discussed, I prefer not), with some restrictions. This
120is NOT a callback for the driver to start operations again, only to 186is NOT a callback for the driver to start operations again, only to
121peek/poke at the device, extract diagnostic information, if any, and 187peek/poke at the device, extract diagnostic information, if any, and
122eventually do things like trigger a device local reset or some such, 188eventually do things like trigger a device local reset or some such,
123but not restart operations. This is sent if all drivers on a segment 189but not restart operations. This is callback is made if all drivers on
124agree that they can try to recover and no automatic link reset was 190a segment agree that they can try to recover and if no automatic link reset
125performed by the HW. If the platform can't just re-enable IOs without 191was performed by the HW. If the platform can't just re-enable IOs without
126a slot reset or a link reset, it doesn't call this callback and goes 192a slot reset or a link reset, it wont call this callback, and instead
127directly to 3) or 4). All IOs should be done _synchronously_ from 193will have gone directly to STEP 3 (Link Reset) or STEP 4 (Slot Reset)
128within this callback, errors triggered by them will be returned via 194
129the normal pci_check_whatever() api, no new error_detected() callback 195>>> The following is proposed; no platform implements this yet:
130will be issued due to an error happening here. However, such an error 196>>> Proposal: All I/O's should be done _synchronously_ from within
131might cause IOs to be re-blocked for the whole segment, and thus 197>>> this callback, errors triggered by them will be returned via
132invalidate the recovery that other devices on the same segment might 198>>> the normal pci_check_whatever() API, no new error_detected()
133have done, forcing the whole segment into one of the next states, 199>>> callback will be issued due to an error happening here. However,
134that is link reset or slot reset. 200>>> such an error might cause IOs to be re-blocked for the whole
135 201>>> segment, and thus invalidate the recovery that other devices
136 Result codes: 202>>> on the same segment might have done, forcing the whole segment
137 - PCIERR_RESULT_RECOVERED 203>>> into one of the next states, that is, link reset or slot reset.
204
205The driver should return one of the following result codes:
206 - PCI_ERS_RESULT_RECOVERED
138 Driver returns this if it thinks the device is fully 207 Driver returns this if it thinks the device is fully
139 functionnal and thinks it is ready to start 208 functional and thinks it is ready to start
140 normal driver operations again. There is no 209 normal driver operations again. There is no
141 guarantee that the driver will actually be 210 guarantee that the driver will actually be
142 allowed to proceed, as another driver on the 211 allowed to proceed, as another driver on the
143 same segment might have failed and thus triggered a 212 same segment might have failed and thus triggered a
144 slot reset on platforms that support it. 213 slot reset on platforms that support it.
145 214
146 - PCIERR_RESULT_NEED_RESET 215 - PCI_ERS_RESULT_NEED_RESET
147 Driver returns this if it thinks the device is not 216 Driver returns this if it thinks the device is not
148 recoverable in it's current state and it needs a slot 217 recoverable in it's current state and it needs a slot
149 reset to proceed. 218 reset to proceed.
150 219
151 - PCIERR_RESULT_DISCONNECT 220 - PCI_ERS_RESULT_DISCONNECT
152 Same as above. Total failure, no recovery even after 221 Same as above. Total failure, no recovery even after
153 reset driver dead. (To be defined more precisely) 222 reset driver dead. (To be defined more precisely)
154 223
155>>> The current ppc64 implementation does not implement this callback. 224The next step taken depends on the results returned by the drivers.
225If all drivers returned PCI_ERS_RESULT_RECOVERED, then the platform
226proceeds to either STEP3 (Link Reset) or to STEP 5 (Resume Operations).
227
228If any driver returned PCI_ERS_RESULT_NEED_RESET, then the platform
229proceeds to STEP 4 (Slot Reset)
156 230
157 3) link_reset() 231>>> The current powerpc implementation does not implement this callback.
158 232
159 This is called after the link has been reset. This is typically 233
160a PCI Express specific state at this point and is done whenever a 234STEP 3: Link Reset
161non-fatal error has been detected that can be "solved" by resetting 235------------------
162the link. This call informs the driver of the reset and the driver 236The platform resets the link, and then calls the link_reset() callback
163should check if the device appears to be in working condition. 237on all affected device drivers. This is a PCI-Express specific state
164This function acts a bit like 2) mmio_enabled(), in that the driver 238and is done whenever a non-fatal error has been detected that can be
165is not supposed to restart normal driver I/O operations right away. 239"solved" by resetting the link. This call informs the driver of the
166Instead, it should just "probe" the device to check it's recoverability 240reset and the driver should check to see if the device appears to be
167status. If all is right, then the core will call resume() once all 241in working condition.
168drivers have ack'd link_reset(). 242
243The driver is not supposed to restart normal driver I/O operations
244at this point. It should limit itself to "probing" the device to
245check it's recoverability status. If all is right, then the platform
246will call resume() once all drivers have ack'd link_reset().
169 247
170 Result codes: 248 Result codes:
171 (identical to mmio_enabled) 249 (identical to STEP 3 (MMIO Enabled)
250
251The platform then proceeds to either STEP 4 (Slot Reset) or STEP 5
252(Resume Operations).
253
254>>> The current powerpc implementation does not implement this callback.
255
256
257STEP 4: Slot Reset
258------------------
259The platform performs a soft or hard reset of the device, and then
260calls the slot_reset() callback.
261
262A soft reset consists of asserting the adapter #RST line and then
263restoring the PCI BAR's and PCI configuration header to a state
264that is equivalent to what it would be after a fresh system
265power-on followed by power-on BIOS/system firmware initialization.
266If the platform supports PCI hotplug, then the reset might be
267performed by toggling the slot electrical power off/on.
172 268
173>>> The current ppc64 implementation does not implement this callback. 269It is important for the platform to restore the PCI config space
270to the "fresh poweron" state, rather than the "last state". After
271a slot reset, the device driver will almost always use its standard
272device initialization routines, and an unusual config space setup
273may result in hung devices, kernel panics, or silent data corruption.
174 274
175 4) slot_reset() 275This call gives drivers the chance to re-initialize the hardware
276(re-download firmware, etc.). At this point, the driver may assume
277that he card is in a fresh state and is fully functional. In
278particular, interrupt generation should work normally.
176 279
177 This is called after the slot has been soft or hard reset by the 280Drivers should not yet restart normal I/O processing operations
178platform. A soft reset consists of asserting the adapter #RST line 281at this point. If all device drivers report success on this
179and then restoring the PCI BARs and PCI configuration header. If the 282callback, the platform will call resume() to complete the sequence,
180platform supports PCI hotplug, then it might instead perform a hard 283and let the driver restart normal I/O processing.
181reset by toggling power on the slot off/on. This call gives drivers
182the chance to re-initialize the hardware (re-download firmware, etc.),
183but drivers shouldn't restart normal I/O processing operations at
184this point. (See note about interrupts; interrupts aren't guaranteed
185to be delivered until the resume() callback has been called). If all
186device drivers report success on this callback, the patform will call
187resume() to complete the error handling and let the driver restart
188normal I/O processing.
189 284
190A driver can still return a critical failure for this function if 285A driver can still return a critical failure for this function if
191it can't get the device operational after reset. If the platform 286it can't get the device operational after reset. If the platform
192previously tried a soft reset, it migh now try a hard reset (power 287previously tried a soft reset, it might now try a hard reset (power
193cycle) and then call slot_reset() again. It the device still can't 288cycle) and then call slot_reset() again. It the device still can't
194be recovered, there is nothing more that can be done; the platform 289be recovered, there is nothing more that can be done; the platform
195will typically report a "permanent failure" in such a case. The 290will typically report a "permanent failure" in such a case. The
196device will be considered "dead" in this case. 291device will be considered "dead" in this case.
197 292
198 Result codes: 293Drivers for multi-function cards will need to coordinate among
199 - PCIERR_RESULT_DISCONNECT 294themselves as to which driver instance will perform any "one-shot"
200 Same as above. 295or global device initialization. For example, the Symbios sym53cxx2
296driver performs device init only from PCI function 0:
201 297
202>>> The current ppc64 implementation does not try a power-cycle reset 298+ if (PCI_FUNC(pdev->devfn) == 0)
203>>> if the driver returned PCIERR_RESULT_DISCONNECT. However, it should. 299+ sym_reset_scsi_bus(np, 0);
204 300
205 5) resume() 301 Result codes:
206 302 - PCI_ERS_RESULT_DISCONNECT
207 This is called if all drivers on the segment have returned 303 Same as above.
208PCIERR_RESULT_RECOVERED from one of the 3 prevous callbacks.
209That basically tells the driver to restart activity, tht everything
210is back and running. No result code is taken into account here. If
211a new error happens, it will restart a new error handling process.
212 304
213That's it. I think this covers all the possibilities. The way those 305Platform proceeds either to STEP 5 (Resume Operations) or STEP 6 (Permanent
214callbacks are called is platform policy. A platform with no slot reset 306Failure).
215capability for example may want to just "ignore" drivers that can't 307
308>>> The current powerpc implementation does not currently try a
309>>> power-cycle reset if the driver returned PCI_ERS_RESULT_DISCONNECT.
310>>> However, it probably should.
311
312
313STEP 5: Resume Operations
314-------------------------
315The platform will call the resume() callback on all affected device
316drivers if all drivers on the segment have returned
317PCI_ERS_RESULT_RECOVERED from one of the 3 previous callbacks.
318The goal of this callback is to tell the driver to restart activity,
319that everything is back and running. This callback does not return
320a result code.
321
322At this point, if a new error happens, the platform will restart
323a new error recovery sequence.
324
325STEP 6: Permanent Failure
326-------------------------
327A "permanent failure" has occurred, and the platform cannot recover
328the device. The platform will call error_detected() with a
329pci_channel_state value of pci_channel_io_perm_failure.
330
331The device driver should, at this point, assume the worst. It should
332cancel all pending I/O, refuse all new I/O, returning -EIO to
333higher layers. The device driver should then clean up all of its
334memory and remove itself from kernel operations, much as it would
335during system shutdown.
336
337The platform will typically notify the system operator of the
338permanent failure in some way. If the device is hotplug-capable,
339the operator will probably want to remove and replace the device.
340Note, however, not all failures are truly "permanent". Some are
341caused by over-heating, some by a poorly seated card. Many
342PCI error events are caused by software bugs, e.g. DMA's to
343wild addresses or bogus split transactions due to programming
344errors. See the discussion in powerpc/eeh-pci-error-recovery.txt
345for additional detail on real-life experience of the causes of
346software errors.
347
348
349Conclusion; General Remarks
350---------------------------
351The way those callbacks are called is platform policy. A platform with
352no slot reset capability may want to just "ignore" drivers that can't
216recover (disconnect them) and try to let other cards on the same segment 353recover (disconnect them) and try to let other cards on the same segment
217recover. Keep in mind that in most real life cases, though, there will 354recover. Keep in mind that in most real life cases, though, there will
218be only one driver per segment. 355be only one driver per segment.
219 356
220Now, there is a note about interrupts. If you get an interrupt and your 357Now, a note about interrupts. If you get an interrupt and your
221device is dead or has been isolated, there is a problem :) 358device is dead or has been isolated, there is a problem :)
222 359The current policy is to turn this into a platform policy.
223After much thinking, I decided to leave that to the platform. That is, 360That is, the recovery API only requires that:
224the recovery API only precies that:
225 361
226 - There is no guarantee that interrupt delivery can proceed from any 362 - There is no guarantee that interrupt delivery can proceed from any
227device on the segment starting from the error detection and until the 363device on the segment starting from the error detection and until the
228restart callback is sent, at which point interrupts are expected to be 364resume callback is sent, at which point interrupts are expected to be
229fully operational. 365fully operational.
230 366
231 - There is no guarantee that interrupt delivery is stopped, that is, ad 367 - There is no guarantee that interrupt delivery is stopped, that is,
232river that gets an interrupts after detecting an error, or that detects 368a driver that gets an interrupt after detecting an error, or that detects
233and error within the interrupt handler such that it prevents proper 369an error within the interrupt handler such that it prevents proper
234ack'ing of the interrupt (and thus removal of the source) should just 370ack'ing of the interrupt (and thus removal of the source) should just
235return IRQ_NOTHANDLED. It's up to the platform to deal with taht 371return IRQ_NOTHANDLED. It's up to the platform to deal with that
236condition, typically by masking the irq source during the duration of 372condition, typically by masking the IRQ source during the duration of
237the error handling. It is expected that the platform "knows" which 373the error handling. It is expected that the platform "knows" which
238interrupts are routed to error-management capable slots and can deal 374interrupts are routed to error-management capable slots and can deal
239with temporarily disabling that irq number during error processing (this 375with temporarily disabling that IRQ number during error processing (this
240isn't terribly complex). That means some IRQ latency for other devices 376isn't terribly complex). That means some IRQ latency for other devices
241sharing the interrupt, but there is simply no other way. High end 377sharing the interrupt, but there is simply no other way. High end
242platforms aren't supposed to share interrupts between many devices 378platforms aren't supposed to share interrupts between many devices
243anyway :) 379anyway :)
244 380
245 381>>> Implementation details for the powerpc platform are discussed in
246Revised: 31 May 2005 Linas Vepstas <linas@austin.ibm.com> 382>>> the file Documentation/powerpc/eeh-pci-error-recovery.txt
383
384>>> As of this writing, there are six device drivers with patches
385>>> implementing error recovery. Not all of these patches are in
386>>> mainline yet. These may be used as "examples":
387>>>
388>>> drivers/scsi/ipr.c
389>>> drivers/scsi/sym53cxx_2
390>>> drivers/next/e100.c
391>>> drivers/net/e1000
392>>> drivers/net/ixgb
393>>> drivers/net/s2io.c
394
395The End
396-------
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index 9c5fc15d03d1..153740f460a6 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -40,6 +40,18 @@ APICs
40 no_timer_check Don't check the IO-APIC timer. This can work around 40 no_timer_check Don't check the IO-APIC timer. This can work around
41 problems with incorrect timer initialization on some boards. 41 problems with incorrect timer initialization on some boards.
42 42
43 apicmaintimer Run time keeping from the local APIC timer instead
44 of using the PIT/HPET interrupt for this. This is useful
45 when the PIT/HPET interrupts are unreliable.
46
47 noapicmaintimer Don't do time keeping using the APIC timer.
48 Useful when this option was auto selected, but doesn't work.
49
50 apicpmtimer
51 Do APIC timer calibration using the pmtimer. Implies
52 apicmaintimer. Useful when your PIT timer is totally
53 broken.
54
43Early Console 55Early Console
44 56
45 syntax: earlyprintk=vga 57 syntax: earlyprintk=vga
diff --git a/MAINTAINERS b/MAINTAINERS
index 42955fe1ffa0..11d44daa6025 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -557,7 +557,8 @@ S: Supported
557 557
558CONFIGFS 558CONFIGFS
559P: Joel Becker 559P: Joel Becker
560M: Joel Becker <joel.becker@oracle.com> 560M: joel.becker@oracle.com
561L: linux-kernel@vger.kernel.org
561S: Supported 562S: Supported
562 563
563CIRRUS LOGIC GENERIC FBDEV DRIVER 564CIRRUS LOGIC GENERIC FBDEV DRIVER
@@ -1984,7 +1985,6 @@ M: philb@gnu.org
1984P: Tim Waugh 1985P: Tim Waugh
1985M: tim@cyberelk.net 1986M: tim@cyberelk.net
1986P: David Campbell 1987P: David Campbell
1987M: campbell@torque.net
1988P: Andrea Arcangeli 1988P: Andrea Arcangeli
1989M: andrea@suse.de 1989M: andrea@suse.de
1990L: linux-parport@lists.infradead.org 1990L: linux-parport@lists.infradead.org
@@ -2298,7 +2298,7 @@ S: Supported
2298 2298
2299SELINUX SECURITY MODULE 2299SELINUX SECURITY MODULE
2300P: Stephen Smalley 2300P: Stephen Smalley
2301M: sds@epoch.ncsc.mil 2301M: sds@tycho.nsa.gov
2302P: James Morris 2302P: James Morris
2303M: jmorris@namei.org 2303M: jmorris@namei.org
2304L: linux-kernel@vger.kernel.org (kernel issues) 2304L: linux-kernel@vger.kernel.org (kernel issues)
diff --git a/Makefile b/Makefile
index 252a659896f3..cd5b619db9d8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 16 3SUBLEVEL = 16
4EXTRAVERSION =-rc1 4EXTRAVERSION =-rc2
5NAME=Sliding Snow Leopard 5NAME=Sliding Snow Leopard
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/configs/at91rm9200dk_defconfig b/arch/arm/configs/at91rm9200dk_defconfig
index 5cdd13acf8ff..1fe73d198888 100644
--- a/arch/arm/configs/at91rm9200dk_defconfig
+++ b/arch/arm/configs/at91rm9200dk_defconfig
@@ -85,7 +85,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
85# CONFIG_ARCH_CLPS711X is not set 85# CONFIG_ARCH_CLPS711X is not set
86# CONFIG_ARCH_CO285 is not set 86# CONFIG_ARCH_CO285 is not set
87# CONFIG_ARCH_EBSA110 is not set 87# CONFIG_ARCH_EBSA110 is not set
88# CONFIG_ARCH_CAMELOT is not set
89# CONFIG_ARCH_FOOTBRIDGE is not set 88# CONFIG_ARCH_FOOTBRIDGE is not set
90# CONFIG_ARCH_INTEGRATOR is not set 89# CONFIG_ARCH_INTEGRATOR is not set
91# CONFIG_ARCH_IOP3XX is not set 90# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/at91rm9200ek_defconfig b/arch/arm/configs/at91rm9200ek_defconfig
index 20838ccf1da7..b7d934cdb1b7 100644
--- a/arch/arm/configs/at91rm9200ek_defconfig
+++ b/arch/arm/configs/at91rm9200ek_defconfig
@@ -85,7 +85,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
85# CONFIG_ARCH_CLPS711X is not set 85# CONFIG_ARCH_CLPS711X is not set
86# CONFIG_ARCH_CO285 is not set 86# CONFIG_ARCH_CO285 is not set
87# CONFIG_ARCH_EBSA110 is not set 87# CONFIG_ARCH_EBSA110 is not set
88# CONFIG_ARCH_CAMELOT is not set
89# CONFIG_ARCH_FOOTBRIDGE is not set 88# CONFIG_ARCH_FOOTBRIDGE is not set
90# CONFIG_ARCH_INTEGRATOR is not set 89# CONFIG_ARCH_INTEGRATOR is not set
91# CONFIG_ARCH_IOP3XX is not set 90# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/csb337_defconfig b/arch/arm/configs/csb337_defconfig
index 885a3184830a..94bd9932a402 100644
--- a/arch/arm/configs/csb337_defconfig
+++ b/arch/arm/configs/csb337_defconfig
@@ -85,7 +85,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
85# CONFIG_ARCH_CLPS711X is not set 85# CONFIG_ARCH_CLPS711X is not set
86# CONFIG_ARCH_CO285 is not set 86# CONFIG_ARCH_CO285 is not set
87# CONFIG_ARCH_EBSA110 is not set 87# CONFIG_ARCH_EBSA110 is not set
88# CONFIG_ARCH_CAMELOT is not set
89# CONFIG_ARCH_FOOTBRIDGE is not set 88# CONFIG_ARCH_FOOTBRIDGE is not set
90# CONFIG_ARCH_INTEGRATOR is not set 89# CONFIG_ARCH_INTEGRATOR is not set
91# CONFIG_ARCH_IOP3XX is not set 90# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/csb637_defconfig b/arch/arm/configs/csb637_defconfig
index 95a96a5462a0..1519124c5501 100644
--- a/arch/arm/configs/csb637_defconfig
+++ b/arch/arm/configs/csb637_defconfig
@@ -85,7 +85,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
85# CONFIG_ARCH_CLPS711X is not set 85# CONFIG_ARCH_CLPS711X is not set
86# CONFIG_ARCH_CO285 is not set 86# CONFIG_ARCH_CO285 is not set
87# CONFIG_ARCH_EBSA110 is not set 87# CONFIG_ARCH_EBSA110 is not set
88# CONFIG_ARCH_CAMELOT is not set
89# CONFIG_ARCH_FOOTBRIDGE is not set 88# CONFIG_ARCH_FOOTBRIDGE is not set
90# CONFIG_ARCH_INTEGRATOR is not set 89# CONFIG_ARCH_INTEGRATOR is not set
91# CONFIG_ARCH_IOP3XX is not set 90# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index b41b1efaa2cf..3baa70819f24 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -44,7 +44,7 @@ unsigned int get_clk_frequency_khz( int info)
44 44
45 /* Read clkcfg register: it has turbo, b, half-turbo (and f) */ 45 /* Read clkcfg register: it has turbo, b, half-turbo (and f) */
46 asm( "mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg) ); 46 asm( "mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg) );
47 t = clkcfg & (1 << 1); 47 t = clkcfg & (1 << 0);
48 ht = clkcfg & (1 << 2); 48 ht = clkcfg & (1 << 2);
49 b = clkcfg & (1 << 3); 49 b = clkcfg & (1 << 3);
50 50
diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile
index b4f1e051c768..1217bf00309c 100644
--- a/arch/arm/mach-s3c2410/Makefile
+++ b/arch/arm/mach-s3c2410/Makefile
@@ -10,9 +10,13 @@ obj-m :=
10obj-n := 10obj-n :=
11obj- := 11obj- :=
12 12
13# S3C2400 support files
14obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o
15
13# S3C2410 support files 16# S3C2410 support files
14 17
15obj-$(CONFIG_CPU_S3C2410) += s3c2410.o 18obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
19obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o
16obj-$(CONFIG_S3C2410_DMA) += dma.o 20obj-$(CONFIG_S3C2410_DMA) += dma.o
17 21
18# Power Management support 22# Power Management support
@@ -25,6 +29,7 @@ obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o
25obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o 29obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o
26obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o 30obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o
27obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o 31obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o
32obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o
28 33
29# bast extras 34# bast extras
30 35
diff --git a/arch/arm/mach-s3c2410/cpu.c b/arch/arm/mach-s3c2410/cpu.c
index 687fe371369d..00a379334b60 100644
--- a/arch/arm/mach-s3c2410/cpu.c
+++ b/arch/arm/mach-s3c2410/cpu.c
@@ -40,6 +40,7 @@
40 40
41#include "cpu.h" 41#include "cpu.h"
42#include "clock.h" 42#include "clock.h"
43#include "s3c2400.h"
43#include "s3c2410.h" 44#include "s3c2410.h"
44#include "s3c2440.h" 45#include "s3c2440.h"
45 46
@@ -55,6 +56,7 @@ struct cpu_table {
55 56
56/* table of supported CPUs */ 57/* table of supported CPUs */
57 58
59static const char name_s3c2400[] = "S3C2400";
58static const char name_s3c2410[] = "S3C2410"; 60static const char name_s3c2410[] = "S3C2410";
59static const char name_s3c2440[] = "S3C2440"; 61static const char name_s3c2440[] = "S3C2440";
60static const char name_s3c2410a[] = "S3C2410A"; 62static const char name_s3c2410a[] = "S3C2410A";
@@ -96,7 +98,16 @@ static struct cpu_table cpu_ids[] __initdata = {
96 .init_uarts = s3c2440_init_uarts, 98 .init_uarts = s3c2440_init_uarts,
97 .init = s3c2440_init, 99 .init = s3c2440_init,
98 .name = name_s3c2440a 100 .name = name_s3c2440a
99 } 101 },
102 {
103 .idcode = 0x0, /* S3C2400 doesn't have an idcode */
104 .idmask = 0xffffffff,
105 .map_io = s3c2400_map_io,
106 .init_clocks = s3c2400_init_clocks,
107 .init_uarts = s3c2400_init_uarts,
108 .init = s3c2400_init,
109 .name = name_s3c2400
110 },
100}; 111};
101 112
102/* minimal IO mapping */ 113/* minimal IO mapping */
@@ -148,12 +159,15 @@ static struct cpu_table *cpu;
148 159
149void __init s3c24xx_init_io(struct map_desc *mach_desc, int size) 160void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
150{ 161{
151 unsigned long idcode; 162 unsigned long idcode = 0x0;
152 163
153 /* initialise the io descriptors we need for initialisation */ 164 /* initialise the io descriptors we need for initialisation */
154 iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc)); 165 iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc));
155 166
167#ifndef CONFIG_CPU_S3C2400
156 idcode = __raw_readl(S3C2410_GSTATUS1); 168 idcode = __raw_readl(S3C2410_GSTATUS1);
169#endif
170
157 cpu = s3c_lookup_cpu(idcode); 171 cpu = s3c_lookup_cpu(idcode);
158 172
159 if (cpu == NULL) { 173 if (cpu == NULL) {
diff --git a/arch/arm/mach-s3c2410/gpio.c b/arch/arm/mach-s3c2410/gpio.c
index 23ea3d5fa09c..cd39e8684584 100644
--- a/arch/arm/mach-s3c2410/gpio.c
+++ b/arch/arm/mach-s3c2410/gpio.c
@@ -31,6 +31,7 @@
31 * 05-Nov-2004 BJD EXPORT_SYMBOL() added for all code 31 * 05-Nov-2004 BJD EXPORT_SYMBOL() added for all code
32 * 13-Mar-2005 BJD Updates for __iomem 32 * 13-Mar-2005 BJD Updates for __iomem
33 * 26-Oct-2005 BJD Added generic configuration types 33 * 26-Oct-2005 BJD Added generic configuration types
34 * 15-Jan-2006 LCVR Added support for the S3C2400
34 */ 35 */
35 36
36 37
@@ -48,7 +49,7 @@
48 49
49void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function) 50void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function)
50{ 51{
51 void __iomem *base = S3C2410_GPIO_BASE(pin); 52 void __iomem *base = S3C24XX_GPIO_BASE(pin);
52 unsigned long mask; 53 unsigned long mask;
53 unsigned long con; 54 unsigned long con;
54 unsigned long flags; 55 unsigned long flags;
@@ -95,7 +96,7 @@ EXPORT_SYMBOL(s3c2410_gpio_cfgpin);
95 96
96unsigned int s3c2410_gpio_getcfg(unsigned int pin) 97unsigned int s3c2410_gpio_getcfg(unsigned int pin)
97{ 98{
98 void __iomem *base = S3C2410_GPIO_BASE(pin); 99 void __iomem *base = S3C24XX_GPIO_BASE(pin);
99 unsigned long mask; 100 unsigned long mask;
100 101
101 if (pin < S3C2410_GPIO_BANKB) { 102 if (pin < S3C2410_GPIO_BANKB) {
@@ -111,7 +112,7 @@ EXPORT_SYMBOL(s3c2410_gpio_getcfg);
111 112
112void s3c2410_gpio_pullup(unsigned int pin, unsigned int to) 113void s3c2410_gpio_pullup(unsigned int pin, unsigned int to)
113{ 114{
114 void __iomem *base = S3C2410_GPIO_BASE(pin); 115 void __iomem *base = S3C24XX_GPIO_BASE(pin);
115 unsigned long offs = S3C2410_GPIO_OFFSET(pin); 116 unsigned long offs = S3C2410_GPIO_OFFSET(pin);
116 unsigned long flags; 117 unsigned long flags;
117 unsigned long up; 118 unsigned long up;
@@ -133,7 +134,7 @@ EXPORT_SYMBOL(s3c2410_gpio_pullup);
133 134
134void s3c2410_gpio_setpin(unsigned int pin, unsigned int to) 135void s3c2410_gpio_setpin(unsigned int pin, unsigned int to)
135{ 136{
136 void __iomem *base = S3C2410_GPIO_BASE(pin); 137 void __iomem *base = S3C24XX_GPIO_BASE(pin);
137 unsigned long offs = S3C2410_GPIO_OFFSET(pin); 138 unsigned long offs = S3C2410_GPIO_OFFSET(pin);
138 unsigned long flags; 139 unsigned long flags;
139 unsigned long dat; 140 unsigned long dat;
@@ -152,7 +153,7 @@ EXPORT_SYMBOL(s3c2410_gpio_setpin);
152 153
153unsigned int s3c2410_gpio_getpin(unsigned int pin) 154unsigned int s3c2410_gpio_getpin(unsigned int pin)
154{ 155{
155 void __iomem *base = S3C2410_GPIO_BASE(pin); 156 void __iomem *base = S3C24XX_GPIO_BASE(pin);
156 unsigned long offs = S3C2410_GPIO_OFFSET(pin); 157 unsigned long offs = S3C2410_GPIO_OFFSET(pin);
157 158
158 return __raw_readl(base + 0x04) & (1<< offs); 159 return __raw_readl(base + 0x04) & (1<< offs);
@@ -166,70 +167,13 @@ unsigned int s3c2410_modify_misccr(unsigned int clear, unsigned int change)
166 unsigned long misccr; 167 unsigned long misccr;
167 168
168 local_irq_save(flags); 169 local_irq_save(flags);
169 misccr = __raw_readl(S3C2410_MISCCR); 170 misccr = __raw_readl(S3C24XX_MISCCR);
170 misccr &= ~clear; 171 misccr &= ~clear;
171 misccr ^= change; 172 misccr ^= change;
172 __raw_writel(misccr, S3C2410_MISCCR); 173 __raw_writel(misccr, S3C24XX_MISCCR);
173 local_irq_restore(flags); 174 local_irq_restore(flags);
174 175
175 return misccr; 176 return misccr;
176} 177}
177 178
178EXPORT_SYMBOL(s3c2410_modify_misccr); 179EXPORT_SYMBOL(s3c2410_modify_misccr);
179
180int s3c2410_gpio_getirq(unsigned int pin)
181{
182 if (pin < S3C2410_GPF0 || pin > S3C2410_GPG15_EINT23)
183 return -1; /* not valid interrupts */
184
185 if (pin < S3C2410_GPG0 && pin > S3C2410_GPF7)
186 return -1; /* not valid pin */
187
188 if (pin < S3C2410_GPF4)
189 return (pin - S3C2410_GPF0) + IRQ_EINT0;
190
191 if (pin < S3C2410_GPG0)
192 return (pin - S3C2410_GPF4) + IRQ_EINT4;
193
194 return (pin - S3C2410_GPG0) + IRQ_EINT8;
195}
196
197EXPORT_SYMBOL(s3c2410_gpio_getirq);
198
199int s3c2410_gpio_irqfilter(unsigned int pin, unsigned int on,
200 unsigned int config)
201{
202 void __iomem *reg = S3C2410_EINFLT0;
203 unsigned long flags;
204 unsigned long val;
205
206 if (pin < S3C2410_GPG8 || pin > S3C2410_GPG15)
207 return -1;
208
209 config &= 0xff;
210
211 pin -= S3C2410_GPG8_EINT16;
212 reg += pin & ~3;
213
214 local_irq_save(flags);
215
216 /* update filter width and clock source */
217
218 val = __raw_readl(reg);
219 val &= ~(0xff << ((pin & 3) * 8));
220 val |= config << ((pin & 3) * 8);
221 __raw_writel(val, reg);
222
223 /* update filter enable */
224
225 val = __raw_readl(S3C2410_EXTINT2);
226 val &= ~(1 << ((pin * 4) + 3));
227 val |= on << ((pin * 4) + 3);
228 __raw_writel(val, S3C2410_EXTINT2);
229
230 local_irq_restore(flags);
231
232 return 0;
233}
234
235EXPORT_SYMBOL(s3c2410_gpio_irqfilter);
diff --git a/arch/arm/mach-s3c2410/s3c2400-gpio.c b/arch/arm/mach-s3c2410/s3c2400-gpio.c
new file mode 100644
index 000000000000..5127f39fa9bf
--- /dev/null
+++ b/arch/arm/mach-s3c2410/s3c2400-gpio.c
@@ -0,0 +1,45 @@
1/* linux/arch/arm/mach-s3c2410/gpio.c
2 *
3 * Copyright (c) 2006 Lucas Correia Villa Real <lucasvr@gobolinux.org>
4 *
5 * S3C2400 GPIO support
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Changelog
22 * 15-Jan-2006 LCVR Splitted from gpio.c, adding support for the S3C2400
23 */
24
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/interrupt.h>
29#include <linux/ioport.h>
30
31#include <asm/hardware.h>
32#include <asm/irq.h>
33#include <asm/io.h>
34
35#include <asm/arch/regs-gpio.h>
36
37int s3c2400_gpio_getirq(unsigned int pin)
38{
39 if (pin < S3C2410_GPE0 || pin > S3C2400_GPE7_EINT7)
40 return -1; /* not valid interrupts */
41
42 return (pin - S3C2410_GPE0) + IRQ_EINT0;
43}
44
45EXPORT_SYMBOL(s3c2400_gpio_getirq);
diff --git a/arch/arm/mach-s3c2410/s3c2410-gpio.c b/arch/arm/mach-s3c2410/s3c2410-gpio.c
new file mode 100644
index 000000000000..d5e1caea1d23
--- /dev/null
+++ b/arch/arm/mach-s3c2410/s3c2410-gpio.c
@@ -0,0 +1,93 @@
1/* linux/arch/arm/mach-s3c2410/gpio.c
2 *
3 * Copyright (c) 2004-2006 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C2410 GPIO support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 * Changelog
23 * 15-Jan-2006 LCVR Splitted from gpio.c
24 */
25
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/interrupt.h>
30#include <linux/ioport.h>
31
32#include <asm/hardware.h>
33#include <asm/irq.h>
34#include <asm/io.h>
35
36#include <asm/arch/regs-gpio.h>
37
38int s3c2410_gpio_irqfilter(unsigned int pin, unsigned int on,
39 unsigned int config)
40{
41 void __iomem *reg = S3C2410_EINFLT0;
42 unsigned long flags;
43 unsigned long val;
44
45 if (pin < S3C2410_GPG8 || pin > S3C2410_GPG15)
46 return -1;
47
48 config &= 0xff;
49
50 pin -= S3C2410_GPG8_EINT16;
51 reg += pin & ~3;
52
53 local_irq_save(flags);
54
55 /* update filter width and clock source */
56
57 val = __raw_readl(reg);
58 val &= ~(0xff << ((pin & 3) * 8));
59 val |= config << ((pin & 3) * 8);
60 __raw_writel(val, reg);
61
62 /* update filter enable */
63
64 val = __raw_readl(S3C2410_EXTINT2);
65 val &= ~(1 << ((pin * 4) + 3));
66 val |= on << ((pin * 4) + 3);
67 __raw_writel(val, S3C2410_EXTINT2);
68
69 local_irq_restore(flags);
70
71 return 0;
72}
73
74EXPORT_SYMBOL(s3c2410_gpio_irqfilter);
75
76int s3c2410_gpio_getirq(unsigned int pin)
77{
78 if (pin < S3C2410_GPF0 || pin > S3C2410_GPG15_EINT23)
79 return -1; /* not valid interrupts */
80
81 if (pin < S3C2410_GPG0 && pin > S3C2410_GPF7)
82 return -1; /* not valid pin */
83
84 if (pin < S3C2410_GPF4)
85 return (pin - S3C2410_GPF0) + IRQ_EINT0;
86
87 if (pin < S3C2410_GPG0)
88 return (pin - S3C2410_GPF4) + IRQ_EINT4;
89
90 return (pin - S3C2410_GPG0) + IRQ_EINT8;
91}
92
93EXPORT_SYMBOL(s3c2410_gpio_getirq);
diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S
index e9a055b779b7..832fb86a03b4 100644
--- a/arch/arm/mach-s3c2410/sleep.S
+++ b/arch/arm/mach-s3c2410/sleep.S
@@ -72,7 +72,7 @@ ENTRY(s3c2410_cpu_suspend)
72 @@ prepare cpu to sleep 72 @@ prepare cpu to sleep
73 73
74 ldr r4, =S3C2410_REFRESH 74 ldr r4, =S3C2410_REFRESH
75 ldr r5, =S3C2410_MISCCR 75 ldr r5, =S3C24XX_MISCCR
76 ldr r6, =S3C2410_CLKCON 76 ldr r6, =S3C2410_CLKCON
77 ldr r7, [ r4 ] @ get REFRESH (and ensure in TLB) 77 ldr r7, [ r4 ] @ get REFRESH (and ensure in TLB)
78 ldr r8, [ r5 ] @ get MISCCR (and ensure in TLB) 78 ldr r8, [ r5 ] @ get MISCCR (and ensure in TLB)
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 72966d90e956..d921c1024ae0 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -92,22 +92,16 @@ ENTRY(v6_coherent_kern_range)
92 * - the Icache does not read data from the write buffer 92 * - the Icache does not read data from the write buffer
93 */ 93 */
94ENTRY(v6_coherent_user_range) 94ENTRY(v6_coherent_user_range)
95 bic r0, r0, #CACHE_LINE_SIZE - 1 95
961:
97#ifdef HARVARD_CACHE 96#ifdef HARVARD_CACHE
98 mcr p15, 0, r0, c7, c10, 1 @ clean D line 97 bic r0, r0, #CACHE_LINE_SIZE - 1
981: mcr p15, 0, r0, c7, c10, 1 @ clean D line
99 mcr p15, 0, r0, c7, c5, 1 @ invalidate I line 99 mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
100#endif 100 add r0, r0, #CACHE_LINE_SIZE
101 mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
102 add r0, r0, #BTB_FLUSH_SIZE
103 mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
104 add r0, r0, #BTB_FLUSH_SIZE
105 mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
106 add r0, r0, #BTB_FLUSH_SIZE
107 mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
108 add r0, r0, #BTB_FLUSH_SIZE
109 cmp r0, r1 101 cmp r0, r1
110 blo 1b 102 blo 1b
103#endif
104 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
111#ifdef HARVARD_CACHE 105#ifdef HARVARD_CACHE
112 mov r0, #0 106 mov r0, #0
113 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 107 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 861b35947280..2d3823ec3153 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -241,7 +241,15 @@ ENTRY(xscale_flush_user_cache_range)
241 * it also trashes the mini I-cache used by JTAG debuggers. 241 * it also trashes the mini I-cache used by JTAG debuggers.
242 */ 242 */
243ENTRY(xscale_coherent_kern_range) 243ENTRY(xscale_coherent_kern_range)
244 /* FALLTHROUGH */ 244 bic r0, r0, #CACHELINESIZE - 1
2451: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
246 add r0, r0, #CACHELINESIZE
247 cmp r0, r1
248 blo 1b
249 mov r0, #0
250 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
251 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
252 mov pc, lr
245 253
246/* 254/*
247 * coherent_user_range(start, end) 255 * coherent_user_range(start, end)
@@ -252,18 +260,16 @@ ENTRY(xscale_coherent_kern_range)
252 * 260 *
253 * - start - virtual start address 261 * - start - virtual start address
254 * - end - virtual end address 262 * - end - virtual end address
255 *
256 * Note: single I-cache line invalidation isn't used here since
257 * it also trashes the mini I-cache used by JTAG debuggers.
258 */ 263 */
259ENTRY(xscale_coherent_user_range) 264ENTRY(xscale_coherent_user_range)
260 bic r0, r0, #CACHELINESIZE - 1 265 bic r0, r0, #CACHELINESIZE - 1
2611: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 2661: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
267 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry
262 add r0, r0, #CACHELINESIZE 268 add r0, r0, #CACHELINESIZE
263 cmp r0, r1 269 cmp r0, r1
264 blo 1b 270 blo 1b
265 mov r0, #0 271 mov r0, #0
266 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 272 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
267 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 273 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
268 mov pc, lr 274 mov pc, lr
269 275
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 1415930ceee1..6f8bc1f0e6a1 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -137,8 +137,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
137 if (spec) { 137 if (spec) {
138 init_MUTEX(&op_arm_sem); 138 init_MUTEX(&op_arm_sem);
139 139
140 if (spec->init() < 0) 140 ret = spec->init();
141 return -ENODEV; 141 if (ret < 0)
142 return ret;
142 143
143 op_arm_model = spec; 144 op_arm_model = spec;
144 init_driverfs(); 145 init_driverfs();
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index d86c865a7cd2..0afec8566e7b 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -442,6 +442,7 @@ config HIGHMEM4G
442 442
443config HIGHMEM64G 443config HIGHMEM64G
444 bool "64GB" 444 bool "64GB"
445 depends on X86_CMPXCHG64
445 help 446 help
446 Select this if you have a 32-bit processor and more than 4 447 Select this if you have a 32-bit processor and more than 4
447 gigabytes of physical RAM. 448 gigabytes of physical RAM.
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index f21fa0d4482f..79577f0ace98 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -248,10 +248,17 @@ acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
248 248
249 acpi_table_print_madt_entry(header); 249 acpi_table_print_madt_entry(header);
250 250
251 /* Register even disabled CPUs for cpu hotplug */ 251 /* Record local apic id only when enabled */
252 252 if (processor->flags.enabled)
253 x86_acpiid_to_apicid[processor->acpi_id] = processor->id; 253 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
254 254
255 /*
256 * We need to register disabled CPU as well to permit
257 * counting disabled CPUs. This allows us to size
258 * cpus_possible_map more accurately, to permit
259 * to not preallocating memory for all NR_CPUS
260 * when we use CPU hotplug.
261 */
255 mp_register_lapic(processor->id, /* APIC ID */ 262 mp_register_lapic(processor->id, /* APIC ID */
256 processor->flags.enabled); /* Enabled? */ 263 processor->flags.enabled); /* Enabled? */
257 264
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index acd3f1e34ca6..98a5c23cf3df 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -75,8 +75,10 @@ void ack_bad_irq(unsigned int irq)
75 * holds up an irq slot - in excessive cases (when multiple 75 * holds up an irq slot - in excessive cases (when multiple
76 * unexpected vectors occur) that might lock up the APIC 76 * unexpected vectors occur) that might lock up the APIC
77 * completely. 77 * completely.
78 * But only ack when the APIC is enabled -AK
78 */ 79 */
79 ack_APIC_irq(); 80 if (!cpu_has_apic)
81 ack_APIC_irq();
80} 82}
81 83
82void __init apic_intr_init(void) 84void __init apic_intr_init(void)
@@ -1303,6 +1305,7 @@ int __init APIC_init_uniprocessor (void)
1303 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { 1305 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1304 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", 1306 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1305 boot_cpu_physical_apicid); 1307 boot_cpu_physical_apicid);
1308 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1306 return -1; 1309 return -1;
1307 } 1310 }
1308 1311
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 333578a4e91a..0810f81f2a05 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -282,3 +282,11 @@ int __init amd_init_cpu(void)
282} 282}
283 283
284//early_arch_initcall(amd_init_cpu); 284//early_arch_initcall(amd_init_cpu);
285
286static int __init amd_exit_cpu(void)
287{
288 cpu_devs[X86_VENDOR_AMD] = NULL;
289 return 0;
290}
291
292late_initcall(amd_exit_cpu);
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c
index 0dd92a23d622..f52669ecb93f 100644
--- a/arch/i386/kernel/cpu/centaur.c
+++ b/arch/i386/kernel/cpu/centaur.c
@@ -470,3 +470,11 @@ int __init centaur_init_cpu(void)
470} 470}
471 471
472//early_arch_initcall(centaur_init_cpu); 472//early_arch_initcall(centaur_init_cpu);
473
474static int __init centaur_exit_cpu(void)
475{
476 cpu_devs[X86_VENDOR_CENTAUR] = NULL;
477 return 0;
478}
479
480late_initcall(centaur_exit_cpu);
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 15aee26ec2b6..7eb9213734a3 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -44,6 +44,7 @@ static void default_init(struct cpuinfo_x86 * c)
44 44
45static struct cpu_dev default_cpu = { 45static struct cpu_dev default_cpu = {
46 .c_init = default_init, 46 .c_init = default_init,
47 .c_vendor = "Unknown",
47}; 48};
48static struct cpu_dev * this_cpu = &default_cpu; 49static struct cpu_dev * this_cpu = &default_cpu;
49 50
@@ -150,6 +151,7 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
150{ 151{
151 char *v = c->x86_vendor_id; 152 char *v = c->x86_vendor_id;
152 int i; 153 int i;
154 static int printed;
153 155
154 for (i = 0; i < X86_VENDOR_NUM; i++) { 156 for (i = 0; i < X86_VENDOR_NUM; i++) {
155 if (cpu_devs[i]) { 157 if (cpu_devs[i]) {
@@ -159,10 +161,17 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
159 c->x86_vendor = i; 161 c->x86_vendor = i;
160 if (!early) 162 if (!early)
161 this_cpu = cpu_devs[i]; 163 this_cpu = cpu_devs[i];
162 break; 164 return;
163 } 165 }
164 } 166 }
165 } 167 }
168 if (!printed) {
169 printed++;
170 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
171 printk(KERN_ERR "CPU: Your system may be unstable.\n");
172 }
173 c->x86_vendor = X86_VENDOR_UNKNOWN;
174 this_cpu = &default_cpu;
166} 175}
167 176
168 177
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index 75015975d038..00f2e058797c 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -345,7 +345,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
345/* 345/*
346 * Handle National Semiconductor branded processors 346 * Handle National Semiconductor branded processors
347 */ 347 */
348static void __devinit init_nsc(struct cpuinfo_x86 *c) 348static void __init init_nsc(struct cpuinfo_x86 *c)
349{ 349{
350 /* There may be GX1 processors in the wild that are branded 350 /* There may be GX1 processors in the wild that are branded
351 * NSC and not Cyrix. 351 * NSC and not Cyrix.
@@ -444,6 +444,14 @@ int __init cyrix_init_cpu(void)
444 444
445//early_arch_initcall(cyrix_init_cpu); 445//early_arch_initcall(cyrix_init_cpu);
446 446
447static int __init cyrix_exit_cpu(void)
448{
449 cpu_devs[X86_VENDOR_CYRIX] = NULL;
450 return 0;
451}
452
453late_initcall(cyrix_exit_cpu);
454
447static struct cpu_dev nsc_cpu_dev __initdata = { 455static struct cpu_dev nsc_cpu_dev __initdata = {
448 .c_vendor = "NSC", 456 .c_vendor = "NSC",
449 .c_ident = { "Geode by NSC" }, 457 .c_ident = { "Geode by NSC" },
@@ -458,3 +466,11 @@ int __init nsc_init_cpu(void)
458} 466}
459 467
460//early_arch_initcall(nsc_init_cpu); 468//early_arch_initcall(nsc_init_cpu);
469
470static int __init nsc_exit_cpu(void)
471{
472 cpu_devs[X86_VENDOR_NSC] = NULL;
473 return 0;
474}
475
476late_initcall(nsc_exit_cpu);
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index af591c73345f..ffe58cee0c48 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -152,6 +152,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
152 return 0; 152 return 0;
153} 153}
154 154
155/* will only be called once; __init is safe here */
155static int __init find_num_cache_leaves(void) 156static int __init find_num_cache_leaves(void)
156{ 157{
157 unsigned int eax, ebx, ecx, edx; 158 unsigned int eax, ebx, ecx, edx;
diff --git a/arch/i386/kernel/cpu/nexgen.c b/arch/i386/kernel/cpu/nexgen.c
index 30898a260a5c..ad87fa58058d 100644
--- a/arch/i386/kernel/cpu/nexgen.c
+++ b/arch/i386/kernel/cpu/nexgen.c
@@ -61,3 +61,11 @@ int __init nexgen_init_cpu(void)
61} 61}
62 62
63//early_arch_initcall(nexgen_init_cpu); 63//early_arch_initcall(nexgen_init_cpu);
64
65static int __init nexgen_exit_cpu(void)
66{
67 cpu_devs[X86_VENDOR_NEXGEN] = NULL;
68 return 0;
69}
70
71late_initcall(nexgen_exit_cpu);
diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c
index 8602425628ca..d08d5a2811c8 100644
--- a/arch/i386/kernel/cpu/rise.c
+++ b/arch/i386/kernel/cpu/rise.c
@@ -51,3 +51,11 @@ int __init rise_init_cpu(void)
51} 51}
52 52
53//early_arch_initcall(rise_init_cpu); 53//early_arch_initcall(rise_init_cpu);
54
55static int __init rise_exit_cpu(void)
56{
57 cpu_devs[X86_VENDOR_RISE] = NULL;
58 return 0;
59}
60
61late_initcall(rise_exit_cpu);
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index fc426380366b..bdbeb77f4e22 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -84,7 +84,7 @@ static void __init init_transmeta(struct cpuinfo_x86 *c)
84#endif 84#endif
85} 85}
86 86
87static void transmeta_identify(struct cpuinfo_x86 * c) 87static void __init transmeta_identify(struct cpuinfo_x86 * c)
88{ 88{
89 u32 xlvl; 89 u32 xlvl;
90 generic_identify(c); 90 generic_identify(c);
@@ -111,3 +111,11 @@ int __init transmeta_init_cpu(void)
111} 111}
112 112
113//early_arch_initcall(transmeta_init_cpu); 113//early_arch_initcall(transmeta_init_cpu);
114
115static int __init transmeta_exit_cpu(void)
116{
117 cpu_devs[X86_VENDOR_TRANSMETA] = NULL;
118 return 0;
119}
120
121late_initcall(transmeta_exit_cpu);
diff --git a/arch/i386/kernel/cpu/umc.c b/arch/i386/kernel/cpu/umc.c
index 264fcad559d5..2cd988f6dc55 100644
--- a/arch/i386/kernel/cpu/umc.c
+++ b/arch/i386/kernel/cpu/umc.c
@@ -31,3 +31,11 @@ int __init umc_init_cpu(void)
31} 31}
32 32
33//early_arch_initcall(umc_init_cpu); 33//early_arch_initcall(umc_init_cpu);
34
35static int __init umc_exit_cpu(void)
36{
37 cpu_devs[X86_VENDOR_UMC] = NULL;
38 return 0;
39}
40
41late_initcall(umc_exit_cpu);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index d661703ac1cb..63f39a7e2c96 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void)
138 if (nmi_watchdog == NMI_LOCAL_APIC) 138 if (nmi_watchdog == NMI_LOCAL_APIC)
139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
140 140
141 for (cpu = 0; cpu < NR_CPUS; cpu++) 141 for_each_cpu(cpu)
142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; 142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
143 local_irq_enable(); 143 local_irq_enable();
144 mdelay((10*1000)/nmi_hz); // wait 10 ticks 144 mdelay((10*1000)/nmi_hz); // wait 10 ticks
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 2185377fdde1..0480454ebffa 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -297,8 +297,10 @@ void show_regs(struct pt_regs * regs)
297 297
298 if (user_mode(regs)) 298 if (user_mode(regs))
299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); 299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
300 printk(" EFLAGS: %08lx %s (%s)\n", 300 printk(" EFLAGS: %08lx %s (%s %.*s)\n",
301 regs->eflags, print_tainted(), system_utsname.release); 301 regs->eflags, print_tainted(), system_utsname.release,
302 (int)strcspn(system_utsname.version, " "),
303 system_utsname.version);
302 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 304 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
303 regs->eax,regs->ebx,regs->ecx,regs->edx); 305 regs->eax,regs->ebx,regs->ecx,regs->edx);
304 printk("ESI: %08lx EDI: %08lx EBP: %08lx", 306 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 0aaebf3e1cfa..b814dbdcc91e 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -166,7 +166,8 @@ static void show_trace_log_lvl(struct task_struct *task,
166 stack = (unsigned long*)context->previous_esp; 166 stack = (unsigned long*)context->previous_esp;
167 if (!stack) 167 if (!stack)
168 break; 168 break;
169 printk(KERN_EMERG " =======================\n"); 169 printk(log_lvl);
170 printk(" =======================\n");
170 } 171 }
171} 172}
172 173
@@ -239,9 +240,11 @@ void show_registers(struct pt_regs *regs)
239 } 240 }
240 print_modules(); 241 print_modules();
241 printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n" 242 printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
242 "EFLAGS: %08lx (%s) \n", 243 "EFLAGS: %08lx (%s %.*s) \n",
243 smp_processor_id(), 0xffff & regs->xcs, regs->eip, 244 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
244 print_tainted(), regs->eflags, system_utsname.release); 245 print_tainted(), regs->eflags, system_utsname.release,
246 (int)strcspn(system_utsname.version, " "),
247 system_utsname.version);
245 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip); 248 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
246 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", 249 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
247 regs->eax, regs->ebx, regs->ecx, regs->edx); 250 regs->eax, regs->ebx, regs->ecx, regs->edx);
diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c
index 21654be3f73f..acc18138fb22 100644
--- a/arch/i386/oprofile/backtrace.c
+++ b/arch/i386/oprofile/backtrace.c
@@ -49,7 +49,9 @@ dump_backtrace(struct frame_head * head)
49 * | stack | 49 * | stack |
50 * --------------- saved regs->ebp value if valid (frame_head address) 50 * --------------- saved regs->ebp value if valid (frame_head address)
51 * . . 51 * . .
52 * --------------- struct pt_regs stored on stack (struct pt_regs *) 52 * --------------- saved regs->rsp value if x86_64
53 * | |
54 * --------------- struct pt_regs * stored on stack if 32-bit
53 * | | 55 * | |
54 * . . 56 * . .
55 * | | 57 * | |
@@ -57,13 +59,26 @@ dump_backtrace(struct frame_head * head)
57 * | | 59 * | |
58 * | | \/ Lower addresses 60 * | | \/ Lower addresses
59 * 61 *
60 * Thus, &pt_regs <-> stack base restricts the valid(ish) ebp values 62 * Thus, regs (or regs->rsp for x86_64) <-> stack base restricts the
63 * valid(ish) ebp values. Note: (1) for x86_64, NMI and several other
64 * exceptions use special stacks, maintained by the interrupt stack table
65 * (IST). These stacks are set up in trap_init() in
66 * arch/x86_64/kernel/traps.c. Thus, for x86_64, regs now does not point
67 * to the kernel stack; instead, it points to some location on the NMI
68 * stack. On the other hand, regs->rsp is the stack pointer saved when the
69 * NMI occurred. (2) For 32-bit, regs->esp is not valid because the
70 * processor does not save %esp on the kernel stack when interrupts occur
71 * in the kernel mode.
61 */ 72 */
62#ifdef CONFIG_FRAME_POINTER 73#ifdef CONFIG_FRAME_POINTER
63static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs) 74static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
64{ 75{
65 unsigned long headaddr = (unsigned long)head; 76 unsigned long headaddr = (unsigned long)head;
77#ifdef CONFIG_X86_64
78 unsigned long stack = (unsigned long)regs->rsp;
79#else
66 unsigned long stack = (unsigned long)regs; 80 unsigned long stack = (unsigned long)regs;
81#endif
67 unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE; 82 unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE;
68 83
69 return headaddr > stack && headaddr < stack_base; 84 return headaddr > stack && headaddr < stack_base;
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 706b7734e191..6e5eea19fa67 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -71,31 +71,33 @@ static int __init topology_init(void)
71 int i, err = 0; 71 int i, err = 0;
72 72
73#ifdef CONFIG_NUMA 73#ifdef CONFIG_NUMA
74 sysfs_nodes = kmalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL); 74 sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL);
75 if (!sysfs_nodes) { 75 if (!sysfs_nodes) {
76 err = -ENOMEM; 76 err = -ENOMEM;
77 goto out; 77 goto out;
78 } 78 }
79 memset(sysfs_nodes, 0, sizeof(struct node) * MAX_NUMNODES);
80 79
81 /* MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? */ 80 /*
82 for_each_online_node(i) 81 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
82 */
83 for_each_online_node(i) {
83 if ((err = register_node(&sysfs_nodes[i], i, 0))) 84 if ((err = register_node(&sysfs_nodes[i], i, 0)))
84 goto out; 85 goto out;
86 }
85#endif 87#endif
86 88
87 sysfs_cpus = kmalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL); 89 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
88 if (!sysfs_cpus) { 90 if (!sysfs_cpus) {
89 err = -ENOMEM; 91 err = -ENOMEM;
90 goto out; 92 goto out;
91 } 93 }
92 memset(sysfs_cpus, 0, sizeof(struct ia64_cpu) * NR_CPUS);
93 94
94 for_each_present_cpu(i) 95 for_each_present_cpu(i) {
95 if((err = arch_register_cpu(i))) 96 if((err = arch_register_cpu(i)))
96 goto out; 97 goto out;
98 }
97out: 99out:
98 return err; 100 return err;
99} 101}
100 102
101__initcall(topology_init); 103subsys_initcall(topology_init);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index e77a06e9621e..7c914a4c67c3 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -149,14 +149,20 @@ config HOTPLUG_CPU
149 default y if SMP 149 default y if SMP
150 select HOTPLUG 150 select HOTPLUG
151 151
152config ARCH_SELECT_MEMORY_MODEL
153 def_bool y
154 depends on 64BIT
155
152config ARCH_DISCONTIGMEM_ENABLE 156config ARCH_DISCONTIGMEM_ENABLE
153 bool "Discontiguous memory support (EXPERIMENTAL)" 157 def_bool y
154 depends on 64BIT && EXPERIMENTAL 158 depends on 64BIT
155 help 159
156 Say Y to support efficient handling of discontiguous physical memory, 160config ARCH_FLATMEM_ENABLE
157 for architectures which are either NUMA (Non-Uniform Memory Access) 161 def_bool y
158 or have huge holes in the physical address space for other reasons. 162
159 See <file:Documentation/vm/numa> for more. 163config ARCH_DISCONTIGMEM_DEFAULT
164 def_bool y
165 depends on ARCH_DISCONTIGMEM_ENABLE
160 166
161source "kernel/Kconfig.hz" 167source "kernel/Kconfig.hz"
162source "mm/Kconfig" 168source "mm/Kconfig"
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index 8caaed187a1f..9166bd117267 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -11,4 +11,14 @@ config DEBUG_RWLOCK
11 too many attempts. If you suspect a rwlock problem or a kernel 11 too many attempts. If you suspect a rwlock problem or a kernel
12 hacker asks for this option then say Y. Otherwise say N. 12 hacker asks for this option then say Y. Otherwise say N.
13 13
14config DEBUG_RODATA
15 bool "Write protect kernel read-only data structures"
16 depends on DEBUG_KERNEL
17 help
18 Mark the kernel read-only data as write-protected in the pagetables,
19 in order to catch accidental (and incorrect) writes to such const
20 data. This option may have a slight performance impact because a
21 portion of the kernel code won't be covered by a TLB anymore.
22 If in doubt, say "N".
23
14endmenu 24endmenu
diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig
index 8819e7e6ae3f..37e98241ce4b 100644
--- a/arch/parisc/configs/b180_defconfig
+++ b/arch/parisc/configs/b180_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.14-rc5-pa1 3# Linux kernel version: 2.6.16-rc1-pa0
4# Fri Oct 21 23:06:10 2005 4# Tue Jan 17 08:21:01 2006
5# 5#
6CONFIG_PARISC=y 6CONFIG_PARISC=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -29,8 +29,6 @@ CONFIG_SYSVIPC=y
29# CONFIG_BSD_PROCESS_ACCT is not set 29# CONFIG_BSD_PROCESS_ACCT is not set
30CONFIG_SYSCTL=y 30CONFIG_SYSCTL=y
31# CONFIG_AUDIT is not set 31# CONFIG_AUDIT is not set
32# CONFIG_HOTPLUG is not set
33CONFIG_KOBJECT_UEVENT=y
34CONFIG_IKCONFIG=y 32CONFIG_IKCONFIG=y
35CONFIG_IKCONFIG_PROC=y 33CONFIG_IKCONFIG_PROC=y
36CONFIG_INITRAMFS_SOURCE="" 34CONFIG_INITRAMFS_SOURCE=""
@@ -38,8 +36,10 @@ CONFIG_INITRAMFS_SOURCE=""
38CONFIG_KALLSYMS=y 36CONFIG_KALLSYMS=y
39# CONFIG_KALLSYMS_ALL is not set 37# CONFIG_KALLSYMS_ALL is not set
40# CONFIG_KALLSYMS_EXTRA_PASS is not set 38# CONFIG_KALLSYMS_EXTRA_PASS is not set
39CONFIG_HOTPLUG=y
41CONFIG_PRINTK=y 40CONFIG_PRINTK=y
42CONFIG_BUG=y 41CONFIG_BUG=y
42CONFIG_ELF_CORE=y
43CONFIG_BASE_FULL=y 43CONFIG_BASE_FULL=y
44CONFIG_FUTEX=y 44CONFIG_FUTEX=y
45CONFIG_EPOLL=y 45CONFIG_EPOLL=y
@@ -48,8 +48,10 @@ CONFIG_CC_ALIGN_FUNCTIONS=0
48CONFIG_CC_ALIGN_LABELS=0 48CONFIG_CC_ALIGN_LABELS=0
49CONFIG_CC_ALIGN_LOOPS=0 49CONFIG_CC_ALIGN_LOOPS=0
50CONFIG_CC_ALIGN_JUMPS=0 50CONFIG_CC_ALIGN_JUMPS=0
51CONFIG_SLAB=y
51# CONFIG_TINY_SHMEM is not set 52# CONFIG_TINY_SHMEM is not set
52CONFIG_BASE_SMALL=0 53CONFIG_BASE_SMALL=0
54# CONFIG_SLOB is not set
53 55
54# 56#
55# Loadable module support 57# Loadable module support
@@ -57,10 +59,28 @@ CONFIG_BASE_SMALL=0
57CONFIG_MODULES=y 59CONFIG_MODULES=y
58# CONFIG_MODULE_UNLOAD is not set 60# CONFIG_MODULE_UNLOAD is not set
59CONFIG_OBSOLETE_MODPARM=y 61CONFIG_OBSOLETE_MODPARM=y
62CONFIG_MODVERSIONS=y
60# CONFIG_MODULE_SRCVERSION_ALL is not set 63# CONFIG_MODULE_SRCVERSION_ALL is not set
61# CONFIG_KMOD is not set 64# CONFIG_KMOD is not set
62 65
63# 66#
67# Block layer
68#
69
70#
71# IO Schedulers
72#
73CONFIG_IOSCHED_NOOP=y
74CONFIG_IOSCHED_AS=y
75CONFIG_IOSCHED_DEADLINE=y
76CONFIG_IOSCHED_CFQ=y
77# CONFIG_DEFAULT_AS is not set
78# CONFIG_DEFAULT_DEADLINE is not set
79CONFIG_DEFAULT_CFQ=y
80# CONFIG_DEFAULT_NOOP is not set
81CONFIG_DEFAULT_IOSCHED="cfq"
82
83#
64# Processor type and features 84# Processor type and features
65# 85#
66# CONFIG_PA7000 is not set 86# CONFIG_PA7000 is not set
@@ -77,6 +97,7 @@ CONFIG_HZ=250
77CONFIG_FLATMEM=y 97CONFIG_FLATMEM=y
78CONFIG_FLAT_NODE_MEM_MAP=y 98CONFIG_FLAT_NODE_MEM_MAP=y
79# CONFIG_SPARSEMEM_STATIC is not set 99# CONFIG_SPARSEMEM_STATIC is not set
100CONFIG_SPLIT_PTLOCK_CPUS=4096
80# CONFIG_PREEMPT is not set 101# CONFIG_PREEMPT is not set
81# CONFIG_HPUX is not set 102# CONFIG_HPUX is not set
82 103
@@ -84,8 +105,8 @@ CONFIG_FLAT_NODE_MEM_MAP=y
84# Bus options (PCI, PCMCIA, EISA, GSC, ISA) 105# Bus options (PCI, PCMCIA, EISA, GSC, ISA)
85# 106#
86CONFIG_GSC=y 107CONFIG_GSC=y
87# CONFIG_HPPB is not set 108CONFIG_HPPB=y
88# CONFIG_IOMMU_CCIO is not set 109CONFIG_IOMMU_CCIO=y
89CONFIG_GSC_LASI=y 110CONFIG_GSC_LASI=y
90CONFIG_GSC_WAX=y 111CONFIG_GSC_WAX=y
91CONFIG_EISA=y 112CONFIG_EISA=y
@@ -165,8 +186,11 @@ CONFIG_IPV6=y
165# CONFIG_LLC2 is not set 186# CONFIG_LLC2 is not set
166# CONFIG_IPX is not set 187# CONFIG_IPX is not set
167# CONFIG_ATALK is not set 188# CONFIG_ATALK is not set
189
190#
191# QoS and/or fair queueing
192#
168# CONFIG_NET_SCHED is not set 193# CONFIG_NET_SCHED is not set
169# CONFIG_NET_CLS_ROUTE is not set
170 194
171# 195#
172# Network testing 196# Network testing
@@ -205,6 +229,7 @@ CONFIG_STANDALONE=y
205CONFIG_PARPORT=y 229CONFIG_PARPORT=y
206CONFIG_PARPORT_PC=y 230CONFIG_PARPORT_PC=y
207# CONFIG_PARPORT_SERIAL is not set 231# CONFIG_PARPORT_SERIAL is not set
232CONFIG_PARPORT_NOT_PC=y
208CONFIG_PARPORT_GSC=y 233CONFIG_PARPORT_GSC=y
209# CONFIG_PARPORT_1284 is not set 234# CONFIG_PARPORT_1284 is not set
210 235
@@ -230,14 +255,6 @@ CONFIG_BLK_DEV_RAM_COUNT=16
230CONFIG_CDROM_PKTCDVD=m 255CONFIG_CDROM_PKTCDVD=m
231CONFIG_CDROM_PKTCDVD_BUFFERS=8 256CONFIG_CDROM_PKTCDVD_BUFFERS=8
232# CONFIG_CDROM_PKTCDVD_WCACHE is not set 257# CONFIG_CDROM_PKTCDVD_WCACHE is not set
233
234#
235# IO Schedulers
236#
237CONFIG_IOSCHED_NOOP=y
238CONFIG_IOSCHED_AS=y
239CONFIG_IOSCHED_DEADLINE=y
240CONFIG_IOSCHED_CFQ=y
241CONFIG_ATA_OVER_ETH=y 258CONFIG_ATA_OVER_ETH=y
242 259
243# 260#
@@ -281,6 +298,7 @@ CONFIG_SCSI_SPI_ATTRS=y
281# 298#
282# SCSI low-level drivers 299# SCSI low-level drivers
283# 300#
301# CONFIG_ISCSI_TCP is not set
284# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 302# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
285# CONFIG_SCSI_3W_9XXX is not set 303# CONFIG_SCSI_3W_9XXX is not set
286# CONFIG_SCSI_ACARD is not set 304# CONFIG_SCSI_ACARD is not set
@@ -313,21 +331,19 @@ CONFIG_SCSI_SYM53C8XX_2=y
313CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 331CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
314CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 332CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
315CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 333CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
316# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set 334CONFIG_SCSI_SYM53C8XX_MMIO=y
317# CONFIG_SCSI_IPR is not set 335# CONFIG_SCSI_IPR is not set
318# CONFIG_SCSI_ZALON is not set 336CONFIG_SCSI_ZALON=y
337CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS=8
338CONFIG_SCSI_NCR53C8XX_MAX_TAGS=32
339CONFIG_SCSI_NCR53C8XX_SYNC=40
340# CONFIG_SCSI_NCR53C8XX_PROFILE is not set
319# CONFIG_SCSI_PAS16 is not set 341# CONFIG_SCSI_PAS16 is not set
320# CONFIG_SCSI_PSI240I is not set 342# CONFIG_SCSI_PSI240I is not set
321# CONFIG_SCSI_QLOGIC_FAS is not set 343# CONFIG_SCSI_QLOGIC_FAS is not set
322# CONFIG_SCSI_QLOGIC_FC is not set 344# CONFIG_SCSI_QLOGIC_FC is not set
323# CONFIG_SCSI_QLOGIC_1280 is not set 345# CONFIG_SCSI_QLOGIC_1280 is not set
324CONFIG_SCSI_QLA2XXX=y 346# CONFIG_SCSI_QLA_FC is not set
325# CONFIG_SCSI_QLA21XX is not set
326# CONFIG_SCSI_QLA22XX is not set
327# CONFIG_SCSI_QLA2300 is not set
328# CONFIG_SCSI_QLA2322 is not set
329# CONFIG_SCSI_QLA6312 is not set
330# CONFIG_SCSI_QLA24XX is not set
331# CONFIG_SCSI_LPFC is not set 347# CONFIG_SCSI_LPFC is not set
332# CONFIG_SCSI_SIM710 is not set 348# CONFIG_SCSI_SIM710 is not set
333# CONFIG_SCSI_SYM53C416 is not set 349# CONFIG_SCSI_SYM53C416 is not set
@@ -397,7 +413,7 @@ CONFIG_NETDEVICES=y
397# 413#
398CONFIG_NET_ETHERNET=y 414CONFIG_NET_ETHERNET=y
399# CONFIG_MII is not set 415# CONFIG_MII is not set
400# CONFIG_LASI_82596 is not set 416CONFIG_LASI_82596=y
401# CONFIG_HAPPYMEAL is not set 417# CONFIG_HAPPYMEAL is not set
402# CONFIG_SUNGEM is not set 418# CONFIG_SUNGEM is not set
403# CONFIG_CASSINI is not set 419# CONFIG_CASSINI is not set
@@ -464,6 +480,7 @@ CONFIG_NET_RADIO=y
464# Wireless 802.11b ISA/PCI cards support 480# Wireless 802.11b ISA/PCI cards support
465# 481#
466# CONFIG_HERMES is not set 482# CONFIG_HERMES is not set
483# CONFIG_ATMEL is not set
467 484
468# 485#
469# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support 486# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
@@ -527,7 +544,7 @@ CONFIG_KEYBOARD_ATKBD_HP_KEYCODES=y
527# CONFIG_KEYBOARD_XTKBD is not set 544# CONFIG_KEYBOARD_XTKBD is not set
528# CONFIG_KEYBOARD_NEWTON is not set 545# CONFIG_KEYBOARD_NEWTON is not set
529# CONFIG_KEYBOARD_HIL_OLD is not set 546# CONFIG_KEYBOARD_HIL_OLD is not set
530# CONFIG_KEYBOARD_HIL is not set 547CONFIG_KEYBOARD_HIL=y
531CONFIG_INPUT_MOUSE=y 548CONFIG_INPUT_MOUSE=y
532CONFIG_MOUSE_PS2=y 549CONFIG_MOUSE_PS2=y
533# CONFIG_MOUSE_SERIAL is not set 550# CONFIG_MOUSE_SERIAL is not set
@@ -535,7 +552,7 @@ CONFIG_MOUSE_PS2=y
535# CONFIG_MOUSE_LOGIBM is not set 552# CONFIG_MOUSE_LOGIBM is not set
536# CONFIG_MOUSE_PC110PAD is not set 553# CONFIG_MOUSE_PC110PAD is not set
537# CONFIG_MOUSE_VSXXXAA is not set 554# CONFIG_MOUSE_VSXXXAA is not set
538# CONFIG_MOUSE_HIL is not set 555CONFIG_MOUSE_HIL=y
539# CONFIG_INPUT_JOYSTICK is not set 556# CONFIG_INPUT_JOYSTICK is not set
540# CONFIG_INPUT_TOUCHSCREEN is not set 557# CONFIG_INPUT_TOUCHSCREEN is not set
541CONFIG_INPUT_MISC=y 558CONFIG_INPUT_MISC=y
@@ -549,7 +566,8 @@ CONFIG_SERIO=y
549# CONFIG_SERIO_SERPORT is not set 566# CONFIG_SERIO_SERPORT is not set
550# CONFIG_SERIO_PARKBD is not set 567# CONFIG_SERIO_PARKBD is not set
551CONFIG_SERIO_GSCPS2=y 568CONFIG_SERIO_GSCPS2=y
552# CONFIG_HP_SDC is not set 569CONFIG_HP_SDC=y
570CONFIG_HIL_MLC=y
553# CONFIG_SERIO_PCIPS2 is not set 571# CONFIG_SERIO_PCIPS2 is not set
554CONFIG_SERIO_LIBPS2=y 572CONFIG_SERIO_LIBPS2=y
555# CONFIG_SERIO_RAW is not set 573# CONFIG_SERIO_RAW is not set
@@ -569,6 +587,7 @@ CONFIG_HW_CONSOLE=y
569CONFIG_SERIAL_8250=y 587CONFIG_SERIAL_8250=y
570CONFIG_SERIAL_8250_CONSOLE=y 588CONFIG_SERIAL_8250_CONSOLE=y
571CONFIG_SERIAL_8250_NR_UARTS=13 589CONFIG_SERIAL_8250_NR_UARTS=13
590CONFIG_SERIAL_8250_RUNTIME_UARTS=4
572CONFIG_SERIAL_8250_EXTENDED=y 591CONFIG_SERIAL_8250_EXTENDED=y
573CONFIG_SERIAL_8250_MANY_PORTS=y 592CONFIG_SERIAL_8250_MANY_PORTS=y
574CONFIG_SERIAL_8250_SHARE_IRQ=y 593CONFIG_SERIAL_8250_SHARE_IRQ=y
@@ -582,11 +601,10 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
582# 601#
583# Non-8250 serial port support 602# Non-8250 serial port support
584# 603#
585# CONFIG_SERIAL_MUX is not set 604CONFIG_SERIAL_MUX=y
586# CONFIG_PDC_CONSOLE is not set 605CONFIG_SERIAL_MUX_CONSOLE=y
587CONFIG_SERIAL_CORE=y 606CONFIG_SERIAL_CORE=y
588CONFIG_SERIAL_CORE_CONSOLE=y 607CONFIG_SERIAL_CORE_CONSOLE=y
589# CONFIG_SERIAL_JSM is not set
590CONFIG_UNIX98_PTYS=y 608CONFIG_UNIX98_PTYS=y
591CONFIG_LEGACY_PTYS=y 609CONFIG_LEGACY_PTYS=y
592CONFIG_LEGACY_PTY_COUNT=256 610CONFIG_LEGACY_PTY_COUNT=256
@@ -626,6 +644,12 @@ CONFIG_GEN_RTC=y
626# CONFIG_I2C is not set 644# CONFIG_I2C is not set
627 645
628# 646#
647# SPI support
648#
649# CONFIG_SPI is not set
650# CONFIG_SPI_MASTER is not set
651
652#
629# Dallas's 1-wire bus 653# Dallas's 1-wire bus
630# 654#
631# CONFIG_W1 is not set 655# CONFIG_W1 is not set
@@ -661,7 +685,6 @@ CONFIG_FB=y
661CONFIG_FB_CFB_FILLRECT=y 685CONFIG_FB_CFB_FILLRECT=y
662CONFIG_FB_CFB_COPYAREA=y 686CONFIG_FB_CFB_COPYAREA=y
663CONFIG_FB_CFB_IMAGEBLIT=y 687CONFIG_FB_CFB_IMAGEBLIT=y
664CONFIG_FB_SOFT_CURSOR=y
665# CONFIG_FB_MACMODES is not set 688# CONFIG_FB_MACMODES is not set
666# CONFIG_FB_MODE_HELPERS is not set 689# CONFIG_FB_MODE_HELPERS is not set
667# CONFIG_FB_TILEBLITTING is not set 690# CONFIG_FB_TILEBLITTING is not set
@@ -671,6 +694,7 @@ CONFIG_FB_SOFT_CURSOR=y
671# CONFIG_FB_ASILIANT is not set 694# CONFIG_FB_ASILIANT is not set
672# CONFIG_FB_IMSTT is not set 695# CONFIG_FB_IMSTT is not set
673CONFIG_FB_STI=y 696CONFIG_FB_STI=y
697# CONFIG_FB_S1D13XXX is not set
674# CONFIG_FB_NVIDIA is not set 698# CONFIG_FB_NVIDIA is not set
675# CONFIG_FB_RIVA is not set 699# CONFIG_FB_RIVA is not set
676# CONFIG_FB_MATROX is not set 700# CONFIG_FB_MATROX is not set
@@ -683,9 +707,7 @@ CONFIG_FB_STI=y
683# CONFIG_FB_KYRO is not set 707# CONFIG_FB_KYRO is not set
684# CONFIG_FB_3DFX is not set 708# CONFIG_FB_3DFX is not set
685# CONFIG_FB_VOODOO1 is not set 709# CONFIG_FB_VOODOO1 is not set
686# CONFIG_FB_CYBLA is not set
687# CONFIG_FB_TRIDENT is not set 710# CONFIG_FB_TRIDENT is not set
688# CONFIG_FB_S1D13XXX is not set
689# CONFIG_FB_VIRTUAL is not set 711# CONFIG_FB_VIRTUAL is not set
690 712
691# 713#
@@ -695,6 +717,7 @@ CONFIG_DUMMY_CONSOLE=y
695CONFIG_DUMMY_CONSOLE_COLUMNS=160 717CONFIG_DUMMY_CONSOLE_COLUMNS=160
696CONFIG_DUMMY_CONSOLE_ROWS=64 718CONFIG_DUMMY_CONSOLE_ROWS=64
697CONFIG_FRAMEBUFFER_CONSOLE=y 719CONFIG_FRAMEBUFFER_CONSOLE=y
720# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
698CONFIG_STI_CONSOLE=y 721CONFIG_STI_CONSOLE=y
699# CONFIG_FONTS is not set 722# CONFIG_FONTS is not set
700CONFIG_FONT_8x8=y 723CONFIG_FONT_8x8=y
@@ -713,7 +736,85 @@ CONFIG_LOGO_PARISC_CLUT224=y
713# 736#
714# Sound 737# Sound
715# 738#
716# CONFIG_SOUND is not set 739CONFIG_SOUND=y
740
741#
742# Advanced Linux Sound Architecture
743#
744CONFIG_SND=y
745CONFIG_SND_TIMER=y
746CONFIG_SND_PCM=y
747CONFIG_SND_SEQUENCER=y
748# CONFIG_SND_SEQ_DUMMY is not set
749CONFIG_SND_OSSEMUL=y
750CONFIG_SND_MIXER_OSS=y
751CONFIG_SND_PCM_OSS=y
752CONFIG_SND_SEQUENCER_OSS=y
753CONFIG_SND_SUPPORT_OLD_API=y
754# CONFIG_SND_VERBOSE_PRINTK is not set
755# CONFIG_SND_DEBUG is not set
756
757#
758# Generic devices
759#
760# CONFIG_SND_DUMMY is not set
761# CONFIG_SND_VIRMIDI is not set
762# CONFIG_SND_MTPAV is not set
763# CONFIG_SND_SERIAL_U16550 is not set
764# CONFIG_SND_MPU401 is not set
765
766#
767# PCI devices
768#
769# CONFIG_SND_AD1889 is not set
770# CONFIG_SND_ALI5451 is not set
771# CONFIG_SND_ATIIXP is not set
772# CONFIG_SND_ATIIXP_MODEM is not set
773# CONFIG_SND_AU8810 is not set
774# CONFIG_SND_AU8820 is not set
775# CONFIG_SND_AU8830 is not set
776# CONFIG_SND_BT87X is not set
777# CONFIG_SND_CA0106 is not set
778# CONFIG_SND_CMIPCI is not set
779# CONFIG_SND_CS4281 is not set
780# CONFIG_SND_CS46XX is not set
781# CONFIG_SND_EMU10K1 is not set
782# CONFIG_SND_EMU10K1X is not set
783# CONFIG_SND_ENS1370 is not set
784# CONFIG_SND_ENS1371 is not set
785# CONFIG_SND_ES1938 is not set
786# CONFIG_SND_ES1968 is not set
787# CONFIG_SND_FM801 is not set
788# CONFIG_SND_HDA_INTEL is not set
789# CONFIG_SND_HDSP is not set
790# CONFIG_SND_HDSPM is not set
791# CONFIG_SND_ICE1712 is not set
792# CONFIG_SND_ICE1724 is not set
793# CONFIG_SND_INTEL8X0 is not set
794# CONFIG_SND_KORG1212 is not set
795# CONFIG_SND_MAESTRO3 is not set
796# CONFIG_SND_MIXART is not set
797# CONFIG_SND_NM256 is not set
798# CONFIG_SND_PCXHR is not set
799# CONFIG_SND_RME32 is not set
800# CONFIG_SND_RME96 is not set
801# CONFIG_SND_RME9652 is not set
802# CONFIG_SND_SONICVIBES is not set
803# CONFIG_SND_TRIDENT is not set
804# CONFIG_SND_VIA82XX is not set
805# CONFIG_SND_VIA82XX_MODEM is not set
806# CONFIG_SND_VX222 is not set
807# CONFIG_SND_YMFPCI is not set
808
809#
810# GSC devices
811#
812CONFIG_SND_HARMONY=y
813
814#
815# Open Sound System
816#
817# CONFIG_SOUND_PRIME is not set
717 818
718# 819#
719# USB support 820# USB support
@@ -723,6 +824,10 @@ CONFIG_USB_ARCH_HAS_OHCI=y
723# CONFIG_USB is not set 824# CONFIG_USB is not set
724 825
725# 826#
827# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
828#
829
830#
726# USB Gadget Support 831# USB Gadget Support
727# 832#
728# CONFIG_USB_GADGET is not set 833# CONFIG_USB_GADGET is not set
@@ -877,18 +982,23 @@ CONFIG_NLS_DEFAULT="iso8859-1"
877# Kernel hacking 982# Kernel hacking
878# 983#
879# CONFIG_PRINTK_TIME is not set 984# CONFIG_PRINTK_TIME is not set
880CONFIG_DEBUG_KERNEL=y
881CONFIG_MAGIC_SYSRQ=y 985CONFIG_MAGIC_SYSRQ=y
986CONFIG_DEBUG_KERNEL=y
882CONFIG_LOG_BUF_SHIFT=16 987CONFIG_LOG_BUF_SHIFT=16
883CONFIG_DETECT_SOFTLOCKUP=y 988CONFIG_DETECT_SOFTLOCKUP=y
884# CONFIG_SCHEDSTATS is not set 989# CONFIG_SCHEDSTATS is not set
885# CONFIG_DEBUG_SLAB is not set 990# CONFIG_DEBUG_SLAB is not set
991# CONFIG_DEBUG_MUTEXES is not set
886# CONFIG_DEBUG_SPINLOCK is not set 992# CONFIG_DEBUG_SPINLOCK is not set
887# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 993# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
888# CONFIG_DEBUG_KOBJECT is not set 994# CONFIG_DEBUG_KOBJECT is not set
889# CONFIG_DEBUG_INFO is not set 995# CONFIG_DEBUG_INFO is not set
890# CONFIG_DEBUG_IOREMAP is not set 996# CONFIG_DEBUG_IOREMAP is not set
891# CONFIG_DEBUG_FS is not set 997# CONFIG_DEBUG_FS is not set
998# CONFIG_DEBUG_VM is not set
999CONFIG_FORCED_INLINING=y
1000# CONFIG_RCU_TORTURE_TEST is not set
1001# CONFIG_DEBUG_RODATA is not set
892 1002
893# 1003#
894# Security options 1004# Security options
diff --git a/arch/parisc/hpux/entry_hpux.S b/arch/parisc/hpux/entry_hpux.S
index fa9bf38787e7..31c8cccfba31 100644
--- a/arch/parisc/hpux/entry_hpux.S
+++ b/arch/parisc/hpux/entry_hpux.S
@@ -22,10 +22,9 @@
22#include <linux/linkage.h> 22#include <linux/linkage.h>
23#include <asm/unistd.h> 23#include <asm/unistd.h>
24 24
25 .text
26
27#define ENTRY_NAME(_name_) .word _name_ 25#define ENTRY_NAME(_name_) .word _name_
28 26
27 .section .rodata,"a"
29 .align 4 28 .align 4
30 .export hpux_call_table 29 .export hpux_call_table
31 .import hpux_unimplemented_wrapper 30 .import hpux_unimplemented_wrapper
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 2d804e2d16d1..3d569a485a1a 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -408,11 +408,10 @@ static void setup_bus_id(struct parisc_device *padev)
408 408
409struct parisc_device * create_tree_node(char id, struct device *parent) 409struct parisc_device * create_tree_node(char id, struct device *parent)
410{ 410{
411 struct parisc_device *dev = kmalloc(sizeof(*dev), GFP_KERNEL); 411 struct parisc_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
412 if (!dev) 412 if (!dev)
413 return NULL; 413 return NULL;
414 414
415 memset(dev, 0, sizeof(*dev));
416 dev->hw_path = id; 415 dev->hw_path = id;
417 dev->id.hw_type = HPHW_FAULTY; 416 dev->id.hw_type = HPHW_FAULTY;
418 417
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index f40a777dd388..1d00c365f2b1 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -48,9 +48,6 @@ EXPORT_SYMBOL(strrchr);
48EXPORT_SYMBOL(strstr); 48EXPORT_SYMBOL(strstr);
49EXPORT_SYMBOL(strpbrk); 49EXPORT_SYMBOL(strpbrk);
50 50
51#include <linux/pm.h>
52EXPORT_SYMBOL(pm_power_off);
53
54#include <asm/atomic.h> 51#include <asm/atomic.h>
55EXPORT_SYMBOL(__xchg8); 52EXPORT_SYMBOL(__xchg8);
56EXPORT_SYMBOL(__xchg32); 53EXPORT_SYMBOL(__xchg32);
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 88cba49c5301..79c7db2705fd 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -47,18 +47,17 @@
47 * this makes the boot time much longer than necessary. 47 * this makes the boot time much longer than necessary.
48 * 20ms seems to work for all the HP PCI implementations to date. 48 * 20ms seems to work for all the HP PCI implementations to date.
49 * 49 *
50 * XXX: turn into a #defined constant in <asm/pci.h> ? 50 * #define pci_post_reset_delay 50
51 */ 51 */
52int pci_post_reset_delay = 50;
53 52
54struct pci_port_ops *pci_port; 53struct pci_port_ops *pci_port __read_mostly;
55struct pci_bios_ops *pci_bios; 54struct pci_bios_ops *pci_bios __read_mostly;
56 55
57int pci_hba_count = 0; 56static int pci_hba_count __read_mostly;
58 57
59/* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */ 58/* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */
60#define PCI_HBA_MAX 32 59#define PCI_HBA_MAX 32
61struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX]; 60static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __read_mostly;
62 61
63 62
64/******************************************************************** 63/********************************************************************
@@ -259,8 +258,10 @@ void __devinit pcibios_resource_to_bus(struct pci_dev *dev,
259void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 258void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
260 struct pci_bus_region *region) 259 struct pci_bus_region *region)
261{ 260{
261#ifdef CONFIG_64BIT
262 struct pci_bus *bus = dev->bus; 262 struct pci_bus *bus = dev->bus;
263 struct pci_hba_data *hba = HBA_DATA(bus->bridge->platform_data); 263 struct pci_hba_data *hba = HBA_DATA(bus->bridge->platform_data);
264#endif
264 265
265 if (res->flags & IORESOURCE_MEM) { 266 if (res->flags & IORESOURCE_MEM) {
266 res->start = PCI_HOST_ADDR(hba, region->start); 267 res->start = PCI_HOST_ADDR(hba, region->start);
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 11d406cd0b3e..53f861c82f93 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -68,20 +68,20 @@ struct rdr_tbl_ent {
68}; 68};
69 69
70static int perf_processor_interface __read_mostly = UNKNOWN_INTF; 70static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
71static int perf_enabled __read_mostly = 0; 71static int perf_enabled __read_mostly;
72static spinlock_t perf_lock; 72static spinlock_t perf_lock;
73struct parisc_device *cpu_device __read_mostly = NULL; 73struct parisc_device *cpu_device __read_mostly;
74 74
75/* RDRs to write for PCX-W */ 75/* RDRs to write for PCX-W */
76static int perf_rdrs_W[] = 76static const int perf_rdrs_W[] =
77 { 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 }; 77 { 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
78 78
79/* RDRs to write for PCX-U */ 79/* RDRs to write for PCX-U */
80static int perf_rdrs_U[] = 80static const int perf_rdrs_U[] =
81 { 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 }; 81 { 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
82 82
83/* RDR register descriptions for PCX-W */ 83/* RDR register descriptions for PCX-W */
84static struct rdr_tbl_ent perf_rdr_tbl_W[] = { 84static const struct rdr_tbl_ent perf_rdr_tbl_W[] = {
85 { 19, 1, 8 }, /* RDR 0 */ 85 { 19, 1, 8 }, /* RDR 0 */
86 { 16, 1, 16 }, /* RDR 1 */ 86 { 16, 1, 16 }, /* RDR 1 */
87 { 72, 2, 0 }, /* RDR 2 */ 87 { 72, 2, 0 }, /* RDR 2 */
@@ -117,7 +117,7 @@ static struct rdr_tbl_ent perf_rdr_tbl_W[] = {
117}; 117};
118 118
119/* RDR register descriptions for PCX-U */ 119/* RDR register descriptions for PCX-U */
120static struct rdr_tbl_ent perf_rdr_tbl_U[] = { 120static const struct rdr_tbl_ent perf_rdr_tbl_U[] = {
121 { 19, 1, 8 }, /* RDR 0 */ 121 { 19, 1, 8 }, /* RDR 0 */
122 { 32, 1, 16 }, /* RDR 1 */ 122 { 32, 1, 16 }, /* RDR 1 */
123 { 20, 1, 0 }, /* RDR 2 */ 123 { 20, 1, 0 }, /* RDR 2 */
@@ -156,7 +156,7 @@ static struct rdr_tbl_ent perf_rdr_tbl_U[] = {
156 * A non-zero write_control in the above tables is a byte offset into 156 * A non-zero write_control in the above tables is a byte offset into
157 * this array. 157 * this array.
158 */ 158 */
159static uint64_t perf_bitmasks[] = { 159static const uint64_t perf_bitmasks[] = {
160 0x0000000000000000ul, /* first dbl word must be zero */ 160 0x0000000000000000ul, /* first dbl word must be zero */
161 0xfdffe00000000000ul, /* RDR0 bitmask */ 161 0xfdffe00000000000ul, /* RDR0 bitmask */
162 0x003f000000000000ul, /* RDR1 bitmask */ 162 0x003f000000000000ul, /* RDR1 bitmask */
@@ -173,7 +173,7 @@ static uint64_t perf_bitmasks[] = {
173 * Write control bitmasks for Pa-8700 processor given 173 * Write control bitmasks for Pa-8700 processor given
174 * somethings have changed slightly. 174 * somethings have changed slightly.
175 */ 175 */
176static uint64_t perf_bitmasks_piranha[] = { 176static const uint64_t perf_bitmasks_piranha[] = {
177 0x0000000000000000ul, /* first dbl word must be zero */ 177 0x0000000000000000ul, /* first dbl word must be zero */
178 0xfdffe00000000000ul, /* RDR0 bitmask */ 178 0xfdffe00000000000ul, /* RDR0 bitmask */
179 0x003f000000000000ul, /* RDR1 bitmask */ 179 0x003f000000000000ul, /* RDR1 bitmask */
@@ -186,7 +186,7 @@ static uint64_t perf_bitmasks_piranha[] = {
186 0xfffc000000000000ul 186 0xfffc000000000000ul
187}; 187};
188 188
189static uint64_t *bitmask_array; /* array of bitmasks to use */ 189static const uint64_t *bitmask_array; /* array of bitmasks to use */
190 190
191/****************************************************************************** 191/******************************************************************************
192 * Function Prototypes 192 * Function Prototypes
@@ -200,7 +200,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
200static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 200static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
201static void perf_start_counters(void); 201static void perf_start_counters(void);
202static int perf_stop_counters(uint32_t *raddr); 202static int perf_stop_counters(uint32_t *raddr);
203static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num); 203static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
204static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer); 204static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer);
205static int perf_rdr_clear(uint32_t rdr_num); 205static int perf_rdr_clear(uint32_t rdr_num);
206static int perf_write_image(uint64_t *memaddr); 206static int perf_write_image(uint64_t *memaddr);
@@ -444,7 +444,6 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
444 uint32_t raddr[4]; 444 uint32_t raddr[4];
445 int error = 0; 445 int error = 0;
446 446
447 lock_kernel();
448 switch (cmd) { 447 switch (cmd) {
449 448
450 case PA_PERF_ON: 449 case PA_PERF_ON:
@@ -477,8 +476,6 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
477 error = -ENOTTY; 476 error = -ENOTTY;
478 } 477 }
479 478
480 unlock_kernel();
481
482 return error; 479 return error;
483} 480}
484 481
@@ -655,7 +652,7 @@ static int perf_stop_counters(uint32_t *raddr)
655 * Retrieve a pointer to the description of what this 652 * Retrieve a pointer to the description of what this
656 * RDR contains. 653 * RDR contains.
657 */ 654 */
658static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num) 655static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
659{ 656{
660 if (perf_processor_interface == ONYX_INTF) { 657 if (perf_processor_interface == ONYX_INTF) {
661 return &perf_rdr_tbl_U[rdr_num]; 658 return &perf_rdr_tbl_U[rdr_num];
@@ -673,7 +670,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
673{ 670{
674 uint64_t data, data_mask = 0; 671 uint64_t data, data_mask = 0;
675 uint32_t width, xbits, i; 672 uint32_t width, xbits, i;
676 struct rdr_tbl_ent *tentry; 673 const struct rdr_tbl_ent *tentry;
677 674
678 tentry = perf_rdr_get_entry(rdr_num); 675 tentry = perf_rdr_get_entry(rdr_num);
679 if ((width = tentry->width) == 0) 676 if ((width = tentry->width) == 0)
@@ -721,7 +718,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
721 */ 718 */
722static int perf_rdr_clear(uint32_t rdr_num) 719static int perf_rdr_clear(uint32_t rdr_num)
723{ 720{
724 struct rdr_tbl_ent *tentry; 721 const struct rdr_tbl_ent *tentry;
725 int32_t i; 722 int32_t i;
726 723
727 tentry = perf_rdr_get_entry(rdr_num); 724 tentry = perf_rdr_get_entry(rdr_num);
@@ -753,10 +750,11 @@ static int perf_write_image(uint64_t *memaddr)
753 uint64_t buffer[MAX_RDR_WORDS]; 750 uint64_t buffer[MAX_RDR_WORDS];
754 uint64_t *bptr; 751 uint64_t *bptr;
755 uint32_t dwords; 752 uint32_t dwords;
756 uint32_t *intrigue_rdr; 753 const uint32_t *intrigue_rdr;
757 uint64_t *intrigue_bitmask, tmp64; 754 const uint64_t *intrigue_bitmask;
755 uint64_t tmp64;
758 void __iomem *runway; 756 void __iomem *runway;
759 struct rdr_tbl_ent *tentry; 757 const struct rdr_tbl_ent *tentry;
760 int i; 758 int i;
761 759
762 /* Clear out counters */ 760 /* Clear out counters */
@@ -830,7 +828,7 @@ static int perf_write_image(uint64_t *memaddr)
830 */ 828 */
831static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer) 829static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
832{ 830{
833 struct rdr_tbl_ent *tentry; 831 const struct rdr_tbl_ent *tentry;
834 int32_t i; 832 int32_t i;
835 833
836printk("perf_rdr_write\n"); 834printk("perf_rdr_write\n");
diff --git a/arch/parisc/kernel/perf_images.h b/arch/parisc/kernel/perf_images.h
index d9562fe3f75c..7fef9644df47 100644
--- a/arch/parisc/kernel/perf_images.h
+++ b/arch/parisc/kernel/perf_images.h
@@ -25,7 +25,7 @@
25 25
26#define PCXU_IMAGE_SIZE 584 26#define PCXU_IMAGE_SIZE 584
27 27
28static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] = { 28static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = {
29/* 29/*
30 * CPI: 30 * CPI:
31 * 31 *
@@ -2093,7 +2093,7 @@ static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] = {
2093}; 2093};
2094#define PCXW_IMAGE_SIZE 576 2094#define PCXW_IMAGE_SIZE 576
2095 2095
2096static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] = { 2096static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = {
2097/* 2097/*
2098 * CPI: FROM CPI.IDF (Image 0) 2098 * CPI: FROM CPI.IDF (Image 0)
2099 * 2099 *
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 5da41677e70b..e8dea4177113 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -54,27 +54,6 @@
54#include <asm/uaccess.h> 54#include <asm/uaccess.h>
55#include <asm/unwind.h> 55#include <asm/unwind.h>
56 56
57static int hlt_counter __read_mostly;
58
59/*
60 * Power off function, if any
61 */
62void (*pm_power_off)(void);
63
64void disable_hlt(void)
65{
66 hlt_counter++;
67}
68
69EXPORT_SYMBOL(disable_hlt);
70
71void enable_hlt(void)
72{
73 hlt_counter--;
74}
75
76EXPORT_SYMBOL(enable_hlt);
77
78void default_idle(void) 57void default_idle(void)
79{ 58{
80 barrier(); 59 barrier();
@@ -102,12 +81,7 @@ void cpu_idle(void)
102} 81}
103 82
104 83
105#ifdef __LP64__ 84#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
106#define COMMAND_GLOBAL 0xfffffffffffe0030UL
107#else
108#define COMMAND_GLOBAL 0xfffe0030
109#endif
110
111#define CMD_RESET 5 /* reset any module */ 85#define CMD_RESET 5 /* reset any module */
112 86
113/* 87/*
@@ -162,6 +136,7 @@ void machine_halt(void)
162 */ 136 */
163} 137}
164 138
139void (*chassis_power_off)(void);
165 140
166/* 141/*
167 * This routine is called from sys_reboot to actually turn off the 142 * This routine is called from sys_reboot to actually turn off the
@@ -170,8 +145,8 @@ void machine_halt(void)
170void machine_power_off(void) 145void machine_power_off(void)
171{ 146{
172 /* If there is a registered power off handler, call it. */ 147 /* If there is a registered power off handler, call it. */
173 if(pm_power_off) 148 if (chassis_power_off)
174 pm_power_off(); 149 chassis_power_off();
175 150
176 /* Put the soft power button back under hardware control. 151 /* Put the soft power button back under hardware control.
177 * If the user had already pressed the power button, the 152 * If the user had already pressed the power button, the
@@ -187,6 +162,8 @@ void machine_power_off(void)
187 KERN_EMERG "Please power this system off now."); 162 KERN_EMERG "Please power this system off now.");
188} 163}
189 164
165void (*pm_power_off)(void) = machine_power_off;
166EXPORT_SYMBOL(pm_power_off);
190 167
191/* 168/*
192 * Create a kernel thread 169 * Create a kernel thread
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 27160e8bf15b..413292f1a4a3 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -91,7 +91,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
91 int copied; 91 int copied;
92 92
93#ifdef __LP64__ 93#ifdef __LP64__
94 if (is_compat_task(child)) { 94 if (personality(child->personality) == PER_LINUX32) {
95 unsigned int tmp; 95 unsigned int tmp;
96 96
97 addr &= 0xffffffffL; 97 addr &= 0xffffffffL;
@@ -123,7 +123,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
123 case PTRACE_POKEDATA: 123 case PTRACE_POKEDATA:
124 ret = 0; 124 ret = 0;
125#ifdef __LP64__ 125#ifdef __LP64__
126 if (is_compat_task(child)) { 126 if (personality(child->personality) == PER_LINUX32) {
127 unsigned int tmp = (unsigned int)data; 127 unsigned int tmp = (unsigned int)data;
128 DBG("sys_ptrace(POKE%s, %d, %lx, %lx)\n", 128 DBG("sys_ptrace(POKE%s, %d, %lx, %lx)\n",
129 request == PTRACE_POKETEXT ? "TEXT" : "DATA", 129 request == PTRACE_POKETEXT ? "TEXT" : "DATA",
@@ -146,7 +146,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
146 case PTRACE_PEEKUSR: { 146 case PTRACE_PEEKUSR: {
147 ret = -EIO; 147 ret = -EIO;
148#ifdef __LP64__ 148#ifdef __LP64__
149 if (is_compat_task(child)) { 149 if (personality(child->personality) == PER_LINUX32) {
150 unsigned int tmp; 150 unsigned int tmp;
151 151
152 if (addr & (sizeof(int)-1)) 152 if (addr & (sizeof(int)-1))
@@ -205,7 +205,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
205 goto out_tsk; 205 goto out_tsk;
206 } 206 }
207#ifdef __LP64__ 207#ifdef __LP64__
208 if (is_compat_task(child)) { 208 if (personality(child->personality) == PER_LINUX32) {
209 if (addr & (sizeof(int)-1)) 209 if (addr & (sizeof(int)-1))
210 goto out_tsk; 210 goto out_tsk;
211 if ((addr = translate_usr_offset(addr)) < 0) 211 if ((addr = translate_usr_offset(addr)) < 0)
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 3a25a7bd673e..05767e83cf2d 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -317,7 +317,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
317 317
318 if(personality(current->personality) == PER_LINUX32) { 318 if(personality(current->personality) == PER_LINUX32) {
319 DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info); 319 DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info);
320 err |= compat_copy_siginfo_to_user(&compat_frame->info, info); 320 err |= copy_siginfo_to_user32(&compat_frame->info, info);
321 DBG(1,"SETUP_RT_FRAME: 1\n"); 321 DBG(1,"SETUP_RT_FRAME: 1\n");
322 compat_val = (compat_int_t)current->sas_ss_sp; 322 compat_val = (compat_int_t)current->sas_ss_sp;
323 err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_sp); 323 err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_sp);
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 0792e20efef3..a6b4231cafa1 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -31,7 +31,6 @@
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/errno.h> 32#include <linux/errno.h>
33 33
34#include <asm/compat_signal.h>
35#include <asm/uaccess.h> 34#include <asm/uaccess.h>
36 35
37#include "signal32.h" 36#include "signal32.h"
@@ -398,3 +397,104 @@ setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __
398 397
399 return err; 398 return err;
400} 399}
400
401int
402copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
403{
404 unsigned long tmp;
405 int err;
406
407 if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
408 return -EFAULT;
409
410 err = __get_user(to->si_signo, &from->si_signo);
411 err |= __get_user(to->si_errno, &from->si_errno);
412 err |= __get_user(to->si_code, &from->si_code);
413
414 if (to->si_code < 0)
415 err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
416 else {
417 switch (to->si_code >> 16) {
418 case __SI_CHLD >> 16:
419 err |= __get_user(to->si_utime, &from->si_utime);
420 err |= __get_user(to->si_stime, &from->si_stime);
421 err |= __get_user(to->si_status, &from->si_status);
422 default:
423 err |= __get_user(to->si_pid, &from->si_pid);
424 err |= __get_user(to->si_uid, &from->si_uid);
425 break;
426 case __SI_FAULT >> 16:
427 err |= __get_user(tmp, &from->si_addr);
428 to->si_addr = (void __user *) tmp;
429 break;
430 case __SI_POLL >> 16:
431 err |= __get_user(to->si_band, &from->si_band);
432 err |= __get_user(to->si_fd, &from->si_fd);
433 break;
434 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
435 case __SI_MESGQ >> 16:
436 err |= __get_user(to->si_pid, &from->si_pid);
437 err |= __get_user(to->si_uid, &from->si_uid);
438 err |= __get_user(to->si_int, &from->si_int);
439 break;
440 }
441 }
442 return err;
443}
444
445int
446copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
447{
448 unsigned int addr;
449 int err;
450
451 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
452 return -EFAULT;
453
454 /* If you change siginfo_t structure, please be sure
455 this code is fixed accordingly.
456 It should never copy any pad contained in the structure
457 to avoid security leaks, but must copy the generic
458 3 ints plus the relevant union member.
459 This routine must convert siginfo from 64bit to 32bit as well
460 at the same time. */
461 err = __put_user(from->si_signo, &to->si_signo);
462 err |= __put_user(from->si_errno, &to->si_errno);
463 err |= __put_user((short)from->si_code, &to->si_code);
464 if (from->si_code < 0)
465 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
466 else {
467 switch (from->si_code >> 16) {
468 case __SI_CHLD >> 16:
469 err |= __put_user(from->si_utime, &to->si_utime);
470 err |= __put_user(from->si_stime, &to->si_stime);
471 err |= __put_user(from->si_status, &to->si_status);
472 default:
473 err |= __put_user(from->si_pid, &to->si_pid);
474 err |= __put_user(from->si_uid, &to->si_uid);
475 break;
476 case __SI_FAULT >> 16:
477 /* avoid type-checking warnings by copying _pad[0] in lieu of si_addr... */
478 err |= __put_user(from->_sifields._pad[0], &to->si_addr);
479 break;
480 case __SI_POLL >> 16:
481 err |= __put_user(from->si_band, &to->si_band);
482 err |= __put_user(from->si_fd, &to->si_fd);
483 break;
484 case __SI_TIMER >> 16:
485 err |= __put_user(from->si_tid, &to->si_tid);
486 err |= __put_user(from->si_overrun, &to->si_overrun);
487 addr = (unsigned long) from->si_ptr;
488 err |= __put_user(addr, &to->si_ptr);
489 break;
490 case __SI_RT >> 16: /* Not generated by the kernel as of now. */
491 case __SI_MESGQ >> 16:
492 err |= __put_user(from->si_uid, &to->si_uid);
493 err |= __put_user(from->si_pid, &to->si_pid);
494 addr = (unsigned long) from->si_ptr;
495 err |= __put_user(addr, &to->si_ptr);
496 break;
497 }
498 }
499 return err;
500}
diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h
index 4d1569e717cc..e39b38a67a87 100644
--- a/arch/parisc/kernel/signal32.h
+++ b/arch/parisc/kernel/signal32.h
@@ -20,8 +20,34 @@
20#define _PARISC64_KERNEL_SIGNAL32_H 20#define _PARISC64_KERNEL_SIGNAL32_H
21 21
22#include <linux/compat.h> 22#include <linux/compat.h>
23#include <asm/compat_signal.h> 23
24#include <asm/compat_rt_sigframe.h> 24typedef compat_uptr_t compat_sighandler_t;
25
26typedef struct compat_sigaltstack {
27 compat_uptr_t ss_sp;
28 compat_int_t ss_flags;
29 compat_size_t ss_size;
30} compat_stack_t;
31
32/* Most things should be clean enough to redefine this at will, if care
33 is taken to make libc match. */
34
35struct compat_sigaction {
36 compat_sighandler_t sa_handler;
37 compat_uint_t sa_flags;
38 compat_sigset_t sa_mask; /* mask last for extensibility */
39};
40
41/* 32-bit ucontext as seen from an 64-bit kernel */
42struct compat_ucontext {
43 compat_uint_t uc_flags;
44 compat_uptr_t uc_link;
45 compat_stack_t uc_stack; /* struct compat_sigaltstack (12 bytes)*/
46 /* FIXME: Pad out to get uc_mcontext to start at an 8-byte aligned boundary */
47 compat_uint_t pad[1];
48 struct compat_sigcontext uc_mcontext;
49 compat_sigset_t uc_sigmask; /* mask last for extensibility */
50};
25 51
26/* ELF32 signal handling */ 52/* ELF32 signal handling */
27 53
@@ -29,6 +55,103 @@ struct k_sigaction32 {
29 struct compat_sigaction sa; 55 struct compat_sigaction sa;
30}; 56};
31 57
58typedef struct compat_siginfo {
59 int si_signo;
60 int si_errno;
61 int si_code;
62
63 union {
64 int _pad[((128/sizeof(int)) - 3)];
65
66 /* kill() */
67 struct {
68 unsigned int _pid; /* sender's pid */
69 unsigned int _uid; /* sender's uid */
70 } _kill;
71
72 /* POSIX.1b timers */
73 struct {
74 compat_timer_t _tid; /* timer id */
75 int _overrun; /* overrun count */
76 char _pad[sizeof(unsigned int) - sizeof(int)];
77 compat_sigval_t _sigval; /* same as below */
78 int _sys_private; /* not to be passed to user */
79 } _timer;
80
81 /* POSIX.1b signals */
82 struct {
83 unsigned int _pid; /* sender's pid */
84 unsigned int _uid; /* sender's uid */
85 compat_sigval_t _sigval;
86 } _rt;
87
88 /* SIGCHLD */
89 struct {
90 unsigned int _pid; /* which child */
91 unsigned int _uid; /* sender's uid */
92 int _status; /* exit code */
93 compat_clock_t _utime;
94 compat_clock_t _stime;
95 } _sigchld;
96
97 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
98 struct {
99 unsigned int _addr; /* faulting insn/memory ref. */
100 } _sigfault;
101
102 /* SIGPOLL */
103 struct {
104 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
105 int _fd;
106 } _sigpoll;
107 } _sifields;
108} compat_siginfo_t;
109
110int copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from);
111int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from);
112
113/* In a deft move of uber-hackery, we decide to carry the top half of all
114 * 64-bit registers in a non-portable, non-ABI, hidden structure.
115 * Userspace can read the hidden structure if it *wants* but is never
116 * guaranteed to be in the same place. Infact the uc_sigmask from the
117 * ucontext_t structure may push the hidden register file downards
118 */
119struct compat_regfile {
120 /* Upper half of all the 64-bit registers that were truncated
121 on a copy to a 32-bit userspace */
122 compat_int_t rf_gr[32];
123 compat_int_t rf_iasq[2];
124 compat_int_t rf_iaoq[2];
125 compat_int_t rf_sar;
126};
127
128#define COMPAT_SIGRETURN_TRAMP 4
129#define COMPAT_SIGRESTARTBLOCK_TRAMP 5
130#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \
131 COMPAT_SIGRESTARTBLOCK_TRAMP)
132
133struct compat_rt_sigframe {
134 /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c
135 Secondary to that it must protect the ERESTART_RESTARTBLOCK
136 trampoline we left on the stack (we were bad and didn't
137 change sp so we could run really fast.) */
138 compat_uint_t tramp[COMPAT_TRAMP_SIZE];
139 compat_siginfo_t info;
140 struct compat_ucontext uc;
141 /* Hidden location of truncated registers, *must* be last. */
142 struct compat_regfile regs;
143};
144
145/*
146 * The 32-bit ABI wants at least 48 bytes for a function call frame:
147 * 16 bytes for arg0-arg3, and 32 bytes for magic (the only part of
148 * which Linux/parisc uses is sp-20 for the saved return pointer...)
149 * Then, the stack pointer must be rounded to a cache line (64 bytes).
150 */
151#define SIGFRAME32 64
152#define FUNCTIONCALLFRAME32 48
153#define PARISC_RT_SIGFRAME_SIZE32 (((sizeof(struct compat_rt_sigframe) + FUNCTIONCALLFRAME32) + SIGFRAME32) & -SIGFRAME32)
154
32void sigset_32to64(sigset_t *s64, compat_sigset_t *s32); 155void sigset_32to64(sigset_t *s64, compat_sigset_t *s32);
33void sigset_64to32(compat_sigset_t *s32, sigset_t *s64); 156void sigset_64to32(compat_sigset_t *s32, sigset_t *s64);
34int do_sigaltstack32 (const compat_stack_t __user *uss32, 157int do_sigaltstack32 (const compat_stack_t __user *uss32,
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index d66163492890..af88afef41bd 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -650,6 +650,8 @@ end_linux_gateway_page:
650#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page) 650#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page)
651#endif 651#endif
652 652
653 .section .rodata,"a"
654
653 .align 4096 655 .align 4096
654 /* Light-weight-syscall table */ 656 /* Light-weight-syscall table */
655 /* Start of lws table. */ 657 /* Start of lws table. */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 32cbc0489324..51d2480627d1 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -374,5 +374,24 @@
374 ENTRY_SAME(keyctl) 374 ENTRY_SAME(keyctl)
375 ENTRY_SAME(ioprio_set) 375 ENTRY_SAME(ioprio_set)
376 ENTRY_SAME(ioprio_get) 376 ENTRY_SAME(ioprio_get)
377 ENTRY_SAME(inotify_init)
378 ENTRY_SAME(inotify_add_watch) /* 270 */
379 ENTRY_SAME(inotify_rm_watch)
380 ENTRY_COMP(pselect6)
381 ENTRY_COMP(ppoll)
382 ENTRY_SAME(migrate_pages)
383 ENTRY_COMP(openat) /* 275 */
384 ENTRY_SAME(mkdirat)
385 ENTRY_SAME(mknodat)
386 ENTRY_SAME(fchownat)
387 ENTRY_COMP(futimesat)
388 ENTRY_COMP(newfstatat) /* 280 */
389 ENTRY_SAME(unlinkat)
390 ENTRY_SAME(renameat)
391 ENTRY_SAME(linkat)
392 ENTRY_SAME(symlinkat)
393 ENTRY_SAME(readlinkat) /* 285 */
394 ENTRY_SAME(fchmodat)
395 ENTRY_SAME(faccessat)
377 /* Nothing yet */ 396 /* Nothing yet */
378 397
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 15914f0235a0..ff200608c851 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -193,10 +193,9 @@ void show_stack(struct task_struct *task, unsigned long *s)
193 193
194HERE: 194HERE:
195 asm volatile ("copy %%r30, %0" : "=r"(sp)); 195 asm volatile ("copy %%r30, %0" : "=r"(sp));
196 r = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL); 196 r = kzalloc(sizeof(struct pt_regs), GFP_KERNEL);
197 if (!r) 197 if (!r)
198 return; 198 return;
199 memset(r, 0, sizeof(struct pt_regs));
200 r->iaoq[0] = (unsigned long)&&HERE; 199 r->iaoq[0] = (unsigned long)&&HERE;
201 r->gr[2] = (unsigned long)__builtin_return_address(0); 200 r->gr[2] = (unsigned long)__builtin_return_address(0);
202 r->gr[30] = sp; 201 r->gr[30] = sp;
diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
index f84f2586672b..66c8a9f6a27e 100644
--- a/arch/parisc/math-emu/decode_exc.c
+++ b/arch/parisc/math-emu/decode_exc.c
@@ -337,6 +337,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
337 } 337 }
338 break; 338 break;
339 case INVALIDEXCEPTION: 339 case INVALIDEXCEPTION:
340 case OPC_2E_INVALIDEXCEPTION:
340 update_trap_counts(Fpu_register, aflags, bflags, trap_counts); 341 update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
341 return SIGNALCODE(SIGFPE, FPE_FLTINV); 342 return SIGNALCODE(SIGFPE, FPE_FLTINV);
342 case DIVISIONBYZEROEXCEPTION: 343 case DIVISIONBYZEROEXCEPTION:
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 720287d46e55..7847ca13d6c2 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -371,17 +371,11 @@ static void __init setup_bootmem(void)
371 371
372void free_initmem(void) 372void free_initmem(void)
373{ 373{
374 /* FIXME: */
375#if 0
376 printk(KERN_INFO "NOT FREEING INITMEM (%dk)\n",
377 (&__init_end - &__init_begin) >> 10);
378 return;
379#else
380 unsigned long addr; 374 unsigned long addr;
381 375
382 printk(KERN_INFO "Freeing unused kernel memory: "); 376 printk(KERN_INFO "Freeing unused kernel memory: ");
383 377
384#if 1 378#ifdef CONFIG_DEBUG_KERNEL
385 /* Attempt to catch anyone trying to execute code here 379 /* Attempt to catch anyone trying to execute code here
386 * by filling the page with BRK insns. 380 * by filling the page with BRK insns.
387 * 381 *
@@ -414,9 +408,21 @@ void free_initmem(void)
414 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 408 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
415 409
416 printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10); 410 printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10);
417#endif
418} 411}
419 412
413
414#ifdef CONFIG_DEBUG_RODATA
415void mark_rodata_ro(void)
416{
417 extern char __start_rodata, __end_rodata;
418 /* rodata memory was already mapped with KERNEL_RO access rights by
419 pagetable_init() and map_pages(). No need to do additional stuff here */
420 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
421 (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
422}
423#endif
424
425
420/* 426/*
421 * Just an arbitrary offset to serve as a "hole" between mapping areas 427 * Just an arbitrary offset to serve as a "hole" between mapping areas
422 * (between top of physical memory and a potential pcxl dma mapping 428 * (between top of physical memory and a potential pcxl dma mapping
@@ -477,11 +483,6 @@ void __init mem_init(void)
477 483
478} 484}
479 485
480int do_check_pgt_cache(int low, int high)
481{
482 return 0;
483}
484
485unsigned long *empty_zero_page __read_mostly; 486unsigned long *empty_zero_page __read_mostly;
486 487
487void show_mem(void) 488void show_mem(void)
@@ -690,7 +691,7 @@ static void __init pagetable_init(void)
690 691
691#ifdef CONFIG_BLK_DEV_INITRD 692#ifdef CONFIG_BLK_DEV_INITRD
692 if (initrd_end && initrd_end > mem_limit) { 693 if (initrd_end && initrd_end > mem_limit) {
693 printk("initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); 694 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
694 map_pages(initrd_start, __pa(initrd_start), 695 map_pages(initrd_start, __pa(initrd_start),
695 initrd_end - initrd_start, PAGE_KERNEL); 696 initrd_end - initrd_start, PAGE_KERNEL);
696 } 697 }
@@ -792,8 +793,6 @@ map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
792EXPORT_SYMBOL(map_hpux_gateway_page); 793EXPORT_SYMBOL(map_hpux_gateway_page);
793#endif 794#endif
794 795
795extern void flush_tlb_all_local(void);
796
797void __init paging_init(void) 796void __init paging_init(void)
798{ 797{
799 int i; 798 int i;
@@ -802,7 +801,7 @@ void __init paging_init(void)
802 pagetable_init(); 801 pagetable_init();
803 gateway_init(); 802 gateway_init();
804 flush_cache_all_local(); /* start with known state */ 803 flush_cache_all_local(); /* start with known state */
805 flush_tlb_all_local(); 804 flush_tlb_all_local(NULL);
806 805
807 for (i = 0; i < npmem_ranges; i++) { 806 for (i = 0; i < npmem_ranges; i++) {
808 unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 }; 807 unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
@@ -993,7 +992,7 @@ void flush_tlb_all(void)
993 do_recycle++; 992 do_recycle++;
994 } 993 }
995 spin_unlock(&sid_lock); 994 spin_unlock(&sid_lock);
996 on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); 995 on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
997 if (do_recycle) { 996 if (do_recycle) {
998 spin_lock(&sid_lock); 997 spin_lock(&sid_lock);
999 recycle_sids(recycle_ndirty,recycle_dirty_array); 998 recycle_sids(recycle_ndirty,recycle_dirty_array);
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 6e27ac68ec3f..83b33fe1923c 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1486,7 +1486,7 @@ sys_inotify_rm_watch_wrapper:
1486 1486
1487 .globl compat_sys_openat_wrapper 1487 .globl compat_sys_openat_wrapper
1488compat_sys_openat_wrapper: 1488compat_sys_openat_wrapper:
1489 lgfr %r2,%r2 # int 1489 llgfr %r2,%r2 # unsigned int
1490 llgtr %r3,%r3 # const char * 1490 llgtr %r3,%r3 # const char *
1491 lgfr %r4,%r4 # int 1491 lgfr %r4,%r4 # int
1492 lgfr %r5,%r5 # int 1492 lgfr %r5,%r5 # int
@@ -1518,14 +1518,14 @@ sys_fchownat_wrapper:
1518 1518
1519 .globl compat_sys_futimesat_wrapper 1519 .globl compat_sys_futimesat_wrapper
1520compat_sys_futimesat_wrapper: 1520compat_sys_futimesat_wrapper:
1521 lgfr %r2,%r2 # int 1521 llgfr %r2,%r2 # unsigned int
1522 llgtr %r3,%r3 # char * 1522 llgtr %r3,%r3 # char *
1523 llgtr %r4,%r4 # struct timeval * 1523 llgtr %r4,%r4 # struct timeval *
1524 jg compat_sys_futimesat 1524 jg compat_sys_futimesat
1525 1525
1526 .globl compat_sys_newfstatat_wrapper 1526 .globl compat_sys_newfstatat_wrapper
1527compat_sys_newfstatat_wrapper: 1527compat_sys_newfstatat_wrapper:
1528 lgfr %r2,%r2 # int 1528 llgfr %r2,%r2 # unsigned int
1529 llgtr %r3,%r3 # char * 1529 llgtr %r3,%r3 # char *
1530 llgtr %r4,%r4 # struct stat * 1530 llgtr %r4,%r4 # struct stat *
1531 lgfr %r5,%r5 # int 1531 lgfr %r5,%r5 # int
diff --git a/arch/sparc64/boot/.gitignore b/arch/sparc64/boot/.gitignore
new file mode 100644
index 000000000000..36356f9d498e
--- /dev/null
+++ b/arch/sparc64/boot/.gitignore
@@ -0,0 +1,4 @@
1image
2tftpboot.img
3vmlinux.aout
4piggyback
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 9ceddad0fb49..bc56a7d88308 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.16-rc1 3# Linux kernel version: 2.6.16-rc2
4# Wed Jan 18 13:41:02 2006 4# Sat Feb 4 02:31:38 2006
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -23,7 +23,6 @@ CONFIG_HZ=250
23# Code maturity level options 23# Code maturity level options
24# 24#
25CONFIG_EXPERIMENTAL=y 25CONFIG_EXPERIMENTAL=y
26CONFIG_CLEAN_COMPILE=y
27CONFIG_BROKEN_ON_SMP=y 26CONFIG_BROKEN_ON_SMP=y
28CONFIG_INIT_ENV_ARG_LIMIT=32 27CONFIG_INIT_ENV_ARG_LIMIT=32
29 28
@@ -155,6 +154,7 @@ CONFIG_NET=y
155# 154#
156# Networking options 155# Networking options
157# 156#
157# CONFIG_NETDEBUG is not set
158CONFIG_PACKET=y 158CONFIG_PACKET=y
159CONFIG_PACKET_MMAP=y 159CONFIG_PACKET_MMAP=y
160CONFIG_UNIX=y 160CONFIG_UNIX=y
@@ -224,6 +224,11 @@ CONFIG_IP_DCCP_TFRC_LIB=m
224# SCTP Configuration (EXPERIMENTAL) 224# SCTP Configuration (EXPERIMENTAL)
225# 225#
226# CONFIG_IP_SCTP is not set 226# CONFIG_IP_SCTP is not set
227
228#
229# TIPC Configuration (EXPERIMENTAL)
230#
231# CONFIG_TIPC is not set
227# CONFIG_ATM is not set 232# CONFIG_ATM is not set
228# CONFIG_BRIDGE is not set 233# CONFIG_BRIDGE is not set
229CONFIG_VLAN_8021Q=m 234CONFIG_VLAN_8021Q=m
@@ -233,11 +238,6 @@ CONFIG_VLAN_8021Q=m
233# CONFIG_ATALK is not set 238# CONFIG_ATALK is not set
234# CONFIG_X25 is not set 239# CONFIG_X25 is not set
235# CONFIG_LAPB is not set 240# CONFIG_LAPB is not set
236
237#
238# TIPC Configuration (EXPERIMENTAL)
239#
240# CONFIG_TIPC is not set
241# CONFIG_NET_DIVERT is not set 241# CONFIG_NET_DIVERT is not set
242# CONFIG_ECONET is not set 242# CONFIG_ECONET is not set
243# CONFIG_WAN_ROUTER is not set 243# CONFIG_WAN_ROUTER is not set
@@ -657,6 +657,7 @@ CONFIG_SERIAL_SUNSU_CONSOLE=y
657CONFIG_SERIAL_SUNSAB=m 657CONFIG_SERIAL_SUNSAB=m
658CONFIG_SERIAL_CORE=y 658CONFIG_SERIAL_CORE=y
659CONFIG_SERIAL_CORE_CONSOLE=y 659CONFIG_SERIAL_CORE_CONSOLE=y
660# CONFIG_SERIAL_JSM is not set
660CONFIG_UNIX98_PTYS=y 661CONFIG_UNIX98_PTYS=y
661# CONFIG_LEGACY_PTYS is not set 662# CONFIG_LEGACY_PTYS is not set
662 663
@@ -1118,6 +1119,10 @@ CONFIG_USB_HIDDEV=y
1118# 1119#
1119 1120
1120# 1121#
1122# EDAC - error detection and reporting (RAS)
1123#
1124
1125#
1121# Misc Linux/SPARC drivers 1126# Misc Linux/SPARC drivers
1122# 1127#
1123CONFIG_SUN_OPENPROMIO=m 1128CONFIG_SUN_OPENPROMIO=m
diff --git a/arch/v850/kernel/simcons.c b/arch/v850/kernel/simcons.c
index 7f0efaa025c9..3975aa02cef8 100644
--- a/arch/v850/kernel/simcons.c
+++ b/arch/v850/kernel/simcons.c
@@ -117,6 +117,7 @@ late_initcall(simcons_tty_init);
117 tty driver. */ 117 tty driver. */
118void simcons_poll_tty (struct tty_struct *tty) 118void simcons_poll_tty (struct tty_struct *tty)
119{ 119{
120 char buf[32]; /* Not the nicest way to do it but I need it correct first */
120 int flip = 0, send_break = 0; 121 int flip = 0, send_break = 0;
121 struct pollfd pfd; 122 struct pollfd pfd;
122 pfd.fd = 0; 123 pfd.fd = 0;
@@ -124,21 +125,15 @@ void simcons_poll_tty (struct tty_struct *tty)
124 125
125 if (V850_SIM_SYSCALL (poll, &pfd, 1, 0) > 0) { 126 if (V850_SIM_SYSCALL (poll, &pfd, 1, 0) > 0) {
126 if (pfd.revents & POLLIN) { 127 if (pfd.revents & POLLIN) {
127 int left = TTY_FLIPBUF_SIZE - tty->flip.count; 128 /* Real block hardware knows the transfer size before
128 129 transfer so the new tty buffering doesn't try to handle
129 if (left > 0) { 130 this rather weird simulator specific case well */
130 unsigned char *buf = tty->flip.char_buf_ptr; 131 int rd = V850_SIM_SYSCALL (read, 0, buf, 32);
131 int rd = V850_SIM_SYSCALL (read, 0, buf, left); 132 if (rd > 0) {
132 133 tty_insert_flip_string(tty, buf, rd);
133 if (rd > 0) { 134 flip = 1;
134 tty->flip.count += rd; 135 } else
135 tty->flip.char_buf_ptr += rd; 136 send_break = 1;
136 memset (tty->flip.flag_buf_ptr, 0, rd);
137 tty->flip.flag_buf_ptr += rd;
138 flip = 1;
139 } else
140 send_break = 1;
141 }
142 } else if (pfd.revents & POLLERR) 137 } else if (pfd.revents & POLLERR)
143 send_break = 1; 138 send_break = 1;
144 } 139 }
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index fcb06a50fdd2..ea31b4c62105 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -2,13 +2,6 @@ menu "Kernel hacking"
2 2
3source "lib/Kconfig.debug" 3source "lib/Kconfig.debug"
4 4
5config INIT_DEBUG
6 bool "Debug __init statements"
7 depends on DEBUG_KERNEL
8 help
9 Fill __init and __initdata at the end of boot. This helps debugging
10 illegal uses of __init and __initdata after initialization.
11
12config DEBUG_RODATA 5config DEBUG_RODATA
13 bool "Write protect kernel read-only data structures" 6 bool "Write protect kernel read-only data structures"
14 depends on DEBUG_KERNEL 7 depends on DEBUG_KERNEL
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 09a3eb743315..56832929a543 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.15-git12 3# Linux kernel version: 2.6.16-rc1-git2
4# Mon Jan 16 13:09:08 2006 4# Thu Jan 19 10:05:21 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -310,6 +310,11 @@ CONFIG_IPV6=y
310# SCTP Configuration (EXPERIMENTAL) 310# SCTP Configuration (EXPERIMENTAL)
311# 311#
312# CONFIG_IP_SCTP is not set 312# CONFIG_IP_SCTP is not set
313
314#
315# TIPC Configuration (EXPERIMENTAL)
316#
317# CONFIG_TIPC is not set
313# CONFIG_ATM is not set 318# CONFIG_ATM is not set
314# CONFIG_BRIDGE is not set 319# CONFIG_BRIDGE is not set
315# CONFIG_VLAN_8021Q is not set 320# CONFIG_VLAN_8021Q is not set
@@ -319,11 +324,6 @@ CONFIG_IPV6=y
319# CONFIG_ATALK is not set 324# CONFIG_ATALK is not set
320# CONFIG_X25 is not set 325# CONFIG_X25 is not set
321# CONFIG_LAPB is not set 326# CONFIG_LAPB is not set
322
323#
324# TIPC Configuration (EXPERIMENTAL)
325#
326# CONFIG_TIPC is not set
327# CONFIG_NET_DIVERT is not set 327# CONFIG_NET_DIVERT is not set
328# CONFIG_ECONET is not set 328# CONFIG_ECONET is not set
329# CONFIG_WAN_ROUTER is not set 329# CONFIG_WAN_ROUTER is not set
@@ -1098,6 +1098,12 @@ CONFIG_USB_MON=y
1098# 1098#
1099 1099
1100# 1100#
1101# EDAC - error detection and reporting (RAS)
1102#
1103# CONFIG_EDAC is not set
1104# CONFIG_EDAC_POLL is not set
1105
1106#
1101# Firmware Drivers 1107# Firmware Drivers
1102# 1108#
1103# CONFIG_EDD is not set 1109# CONFIG_EDD is not set
@@ -1290,6 +1296,7 @@ CONFIG_DEBUG_FS=y
1290# CONFIG_DEBUG_VM is not set 1296# CONFIG_DEBUG_VM is not set
1291# CONFIG_FRAME_POINTER is not set 1297# CONFIG_FRAME_POINTER is not set
1292# CONFIG_FORCED_INLINING is not set 1298# CONFIG_FORCED_INLINING is not set
1299# CONFIG_UNWIND_INFO is not set
1293# CONFIG_RCU_TORTURE_TEST is not set 1300# CONFIG_RCU_TORTURE_TEST is not set
1294CONFIG_INIT_DEBUG=y 1301CONFIG_INIT_DEBUG=y
1295# CONFIG_DEBUG_RODATA is not set 1302# CONFIG_DEBUG_RODATA is not set
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 5d3c5b07b8db..6147770b4347 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -35,8 +35,12 @@
35#include <asm/mach_apic.h> 35#include <asm/mach_apic.h>
36#include <asm/nmi.h> 36#include <asm/nmi.h>
37#include <asm/idle.h> 37#include <asm/idle.h>
38#include <asm/proto.h>
39#include <asm/timex.h>
38 40
39int apic_verbosity; 41int apic_verbosity;
42int apic_runs_main_timer;
43int apic_calibrate_pmtmr __initdata;
40 44
41int disable_apic_timer __initdata; 45int disable_apic_timer __initdata;
42 46
@@ -68,6 +72,26 @@ int get_maxlvt(void)
68 return maxlvt; 72 return maxlvt;
69} 73}
70 74
75/*
76 * 'what should we do if we get a hw irq event on an illegal vector'.
77 * each architecture has to answer this themselves.
78 */
79void ack_bad_irq(unsigned int irq)
80{
81 printk("unexpected IRQ trap at vector %02x\n", irq);
82 /*
83 * Currently unexpected vectors happen only on SMP and APIC.
84 * We _must_ ack these because every local APIC has only N
85 * irq slots per priority level, and a 'hanging, unacked' IRQ
86 * holds up an irq slot - in excessive cases (when multiple
87 * unexpected vectors occur) that might lock up the APIC
88 * completely.
89 * But don't ack when the APIC is disabled. -AK
90 */
91 if (!disable_apic)
92 ack_APIC_irq();
93}
94
71void clear_local_APIC(void) 95void clear_local_APIC(void)
72{ 96{
73 int maxlvt; 97 int maxlvt;
@@ -702,9 +726,17 @@ static void setup_APIC_timer(unsigned int clocks)
702 c2 |= inb_p(0x40) << 8; 726 c2 |= inb_p(0x40) << 8;
703 } while (c2 - c1 < 300); 727 } while (c2 - c1 < 300);
704 } 728 }
705
706 __setup_APIC_LVTT(clocks); 729 __setup_APIC_LVTT(clocks);
707 730 /* Turn off PIT interrupt if we use APIC timer as main timer.
731 Only works with the PM timer right now
732 TBD fix it for HPET too. */
733 if (vxtime.mode == VXTIME_PMTMR &&
734 smp_processor_id() == boot_cpu_id &&
735 apic_runs_main_timer == 1 &&
736 !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
737 stop_timer_interrupt();
738 apic_runs_main_timer++;
739 }
708 local_irq_restore(flags); 740 local_irq_restore(flags);
709} 741}
710 742
@@ -735,14 +767,27 @@ static int __init calibrate_APIC_clock(void)
735 __setup_APIC_LVTT(1000000000); 767 __setup_APIC_LVTT(1000000000);
736 768
737 apic_start = apic_read(APIC_TMCCT); 769 apic_start = apic_read(APIC_TMCCT);
738 rdtscl(tsc_start); 770#ifdef CONFIG_X86_PM_TIMER
739 771 if (apic_calibrate_pmtmr && pmtmr_ioport) {
740 do { 772 pmtimer_wait(5000); /* 5ms wait */
741 apic = apic_read(APIC_TMCCT); 773 apic = apic_read(APIC_TMCCT);
742 rdtscl(tsc); 774 result = (apic_start - apic) * 1000L / 5;
743 } while ((tsc - tsc_start) < TICK_COUNT && (apic - apic_start) < TICK_COUNT); 775 } else
776#endif
777 {
778 rdtscl(tsc_start);
779
780 do {
781 apic = apic_read(APIC_TMCCT);
782 rdtscl(tsc);
783 } while ((tsc - tsc_start) < TICK_COUNT &&
784 (apic - apic_start) < TICK_COUNT);
785
786 result = (apic_start - apic) * 1000L * cpu_khz /
787 (tsc - tsc_start);
788 }
789 printk("result %d\n", result);
744 790
745 result = (apic_start - apic) * 1000L * cpu_khz / (tsc - tsc_start);
746 791
747 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", 792 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
748 result / 1000 / 1000, result / 1000 % 1000); 793 result / 1000 / 1000, result / 1000 % 1000);
@@ -872,6 +917,8 @@ void smp_local_timer_interrupt(struct pt_regs *regs)
872#ifdef CONFIG_SMP 917#ifdef CONFIG_SMP
873 update_process_times(user_mode(regs)); 918 update_process_times(user_mode(regs));
874#endif 919#endif
920 if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
921 main_timer_handler(regs);
875 /* 922 /*
876 * We take the 'long' return path, and there every subsystem 923 * We take the 'long' return path, and there every subsystem
877 * grabs the appropriate locks (kernel lock/ irq lock). 924 * grabs the appropriate locks (kernel lock/ irq lock).
@@ -924,7 +971,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
924 * multi-chassis. Use available data to take a good guess. 971 * multi-chassis. Use available data to take a good guess.
925 * If in doubt, go HPET. 972 * If in doubt, go HPET.
926 */ 973 */
927__init int oem_force_hpet_timer(void) 974__cpuinit int oem_force_hpet_timer(void)
928{ 975{
929 int i, clusters, zeros; 976 int i, clusters, zeros;
930 unsigned id; 977 unsigned id;
@@ -1081,10 +1128,34 @@ static __init int setup_nolapic(char *str)
1081 1128
1082static __init int setup_noapictimer(char *str) 1129static __init int setup_noapictimer(char *str)
1083{ 1130{
1131 if (str[0] != ' ' && str[0] != 0)
1132 return -1;
1084 disable_apic_timer = 1; 1133 disable_apic_timer = 1;
1085 return 0; 1134 return 0;
1086} 1135}
1087 1136
1137static __init int setup_apicmaintimer(char *str)
1138{
1139 apic_runs_main_timer = 1;
1140 nohpet = 1;
1141 return 0;
1142}
1143__setup("apicmaintimer", setup_apicmaintimer);
1144
1145static __init int setup_noapicmaintimer(char *str)
1146{
1147 apic_runs_main_timer = -1;
1148 return 0;
1149}
1150__setup("noapicmaintimer", setup_noapicmaintimer);
1151
1152static __init int setup_apicpmtimer(char *s)
1153{
1154 apic_calibrate_pmtmr = 1;
1155 return setup_apicmaintimer(NULL);
1156}
1157__setup("apicpmtimer", setup_apicpmtimer);
1158
1088/* dummy parsing: see setup.c */ 1159/* dummy parsing: see setup.c */
1089 1160
1090__setup("disableapic", setup_disableapic); 1161__setup("disableapic", setup_disableapic);
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index dbdba56e8faa..b150c87a08c6 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -499,7 +499,9 @@ ENTRY(stub_rt_sigreturn)
499 movq %gs:pda_irqstackptr,%rax 499 movq %gs:pda_irqstackptr,%rax
500 cmoveq %rax,%rsp /*todo This needs CFI annotation! */ 500 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
501 pushq %rdi # save old stack 501 pushq %rdi # save old stack
502#ifndef CONFIG_DEBUG_INFO
502 CFI_ADJUST_CFA_OFFSET 8 503 CFI_ADJUST_CFA_OFFSET 8
504#endif
503 call \func 505 call \func
504 .endm 506 .endm
505 507
@@ -509,7 +511,9 @@ ENTRY(common_interrupt)
509 /* 0(%rsp): oldrsp-ARGOFFSET */ 511 /* 0(%rsp): oldrsp-ARGOFFSET */
510ret_from_intr: 512ret_from_intr:
511 popq %rdi 513 popq %rdi
514#ifndef CONFIG_DEBUG_INFO
512 CFI_ADJUST_CFA_OFFSET -8 515 CFI_ADJUST_CFA_OFFSET -8
516#endif
513 cli 517 cli
514 decl %gs:pda_irqcount 518 decl %gs:pda_irqcount
515#ifdef CONFIG_DEBUG_INFO 519#ifdef CONFIG_DEBUG_INFO
@@ -922,7 +926,7 @@ KPROBE_ENTRY(debug)
922 .previous .text 926 .previous .text
923 927
924 /* runs on exception stack */ 928 /* runs on exception stack */
925ENTRY(nmi) 929KPROBE_ENTRY(nmi)
926 INTR_FRAME 930 INTR_FRAME
927 pushq $-1 931 pushq $-1
928 CFI_ADJUST_CFA_OFFSET 8 932 CFI_ADJUST_CFA_OFFSET 8
@@ -969,6 +973,7 @@ paranoid_schedule:
969 cli 973 cli
970 jmp paranoid_userspace 974 jmp paranoid_userspace
971 CFI_ENDPROC 975 CFI_ENDPROC
976 .previous .text
972 977
973KPROBE_ENTRY(int3) 978KPROBE_ENTRY(int3)
974 INTR_FRAME 979 INTR_FRAME
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 1a5060b434b8..4282d72b2a26 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -304,6 +304,14 @@ void __init check_ioapic(void)
304#endif 304#endif
305 /* RED-PEN skip them on mptables too? */ 305 /* RED-PEN skip them on mptables too? */
306 return; 306 return;
307 case PCI_VENDOR_ID_ATI:
308 if (apic_runs_main_timer != 0)
309 break;
310 printk(KERN_INFO
311 "ATI board detected. Using APIC/PM timer.\n");
312 apic_runs_main_timer = 1;
313 nohpet = 1;
314 return;
307 } 315 }
308 316
309 /* No multi-function device? */ 317 /* No multi-function device? */
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 13a2eada6c95..b8b9529fa89e 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -380,7 +380,7 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
380 */ 380 */
381void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 381void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
382{ 382{
383 static cpumask_t mce_cpus __initdata = CPU_MASK_NONE; 383 static cpumask_t mce_cpus = CPU_MASK_NONE;
384 384
385 mce_cpu_quirks(c); 385 mce_cpu_quirks(c);
386 386
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 5fae6f0cd994..8be407a1f62d 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -24,6 +24,7 @@
24#include <linux/sysdev.h> 24#include <linux/sysdev.h>
25#include <linux/nmi.h> 25#include <linux/nmi.h>
26#include <linux/sysctl.h> 26#include <linux/sysctl.h>
27#include <linux/kprobes.h>
27 28
28#include <asm/smp.h> 29#include <asm/smp.h>
29#include <asm/mtrr.h> 30#include <asm/mtrr.h>
@@ -468,7 +469,7 @@ void touch_nmi_watchdog (void)
468 touch_softlockup_watchdog(); 469 touch_softlockup_watchdog();
469} 470}
470 471
471void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason) 472void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
472{ 473{
473 int sum; 474 int sum;
474 int touched = 0; 475 int touched = 0;
@@ -512,14 +513,14 @@ void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
512 } 513 }
513} 514}
514 515
515static int dummy_nmi_callback(struct pt_regs * regs, int cpu) 516static __kprobes int dummy_nmi_callback(struct pt_regs * regs, int cpu)
516{ 517{
517 return 0; 518 return 0;
518} 519}
519 520
520static nmi_callback_t nmi_callback = dummy_nmi_callback; 521static nmi_callback_t nmi_callback = dummy_nmi_callback;
521 522
522asmlinkage void do_nmi(struct pt_regs * regs, long error_code) 523asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
523{ 524{
524 int cpu = safe_smp_processor_id(); 525 int cpu = safe_smp_processor_id();
525 526
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 2f5d8328e2b9..4ed391edd47a 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -107,6 +107,9 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
107 goto again; 107 goto again;
108 } 108 }
109 109
110 /* Let low level make its own zone decisions */
111 gfp &= ~(GFP_DMA32|GFP_DMA);
112
110 if (dma_ops->alloc_coherent) 113 if (dma_ops->alloc_coherent)
111 return dma_ops->alloc_coherent(dev, size, 114 return dma_ops->alloc_coherent(dev, size,
112 dma_handle, gfp); 115 dma_handle, gfp);
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index c37fc7726ba6..2fe23a6c361b 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -457,9 +457,12 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
457error: 457error:
458 flush_gart(NULL); 458 flush_gart(NULL);
459 gart_unmap_sg(dev, sg, nents, dir); 459 gart_unmap_sg(dev, sg, nents, dir);
460 /* When it was forced try again unforced */ 460 /* When it was forced or merged try again in a dumb way */
461 if (force_iommu) 461 if (force_iommu || iommu_merge) {
462 return dma_map_sg_nonforce(dev, sg, nents, dir); 462 out = dma_map_sg_nonforce(dev, sg, nents, dir);
463 if (out > 0)
464 return out;
465 }
463 if (panic_on_overflow) 466 if (panic_on_overflow)
464 panic("dma_map_sg: overflow on %lu pages\n", pages); 467 panic("dma_map_sg: overflow on %lu pages\n", pages);
465 iommu_full(dev, pages << PAGE_SHIFT, dir); 468 iommu_full(dev, pages << PAGE_SHIFT, dir);
@@ -642,9 +645,18 @@ static int __init pci_iommu_init(void)
642 (no_agp && init_k8_gatt(&info) < 0)) { 645 (no_agp && init_k8_gatt(&info) < 0)) {
643 no_iommu = 1; 646 no_iommu = 1;
644 no_iommu_init(); 647 no_iommu_init();
648 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
649 if (end_pfn > MAX_DMA32_PFN) {
650 printk(KERN_ERR "WARNING more than 4GB of memory "
651 "but IOMMU not compiled in.\n"
652 KERN_ERR "WARNING 32bit PCI may malfunction.\n"
653 KERN_ERR "You might want to enable "
654 "CONFIG_GART_IOMMU\n");
655 }
645 return -1; 656 return -1;
646 } 657 }
647 658
659 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
648 aper_size = info.aper_size * 1024 * 1024; 660 aper_size = info.aper_size * 1024 * 1024;
649 iommu_size = check_iommu_size(info.aper_base, aper_size); 661 iommu_size = check_iommu_size(info.aper_base, aper_size);
650 iommu_pages = iommu_size >> PAGE_SHIFT; 662 iommu_pages = iommu_size >> PAGE_SHIFT;
@@ -718,7 +730,6 @@ static int __init pci_iommu_init(void)
718 730
719 flush_gart(NULL); 731 flush_gart(NULL);
720 732
721 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
722 dma_ops = &gart_dma_ops; 733 dma_ops = &gart_dma_ops;
723 734
724 return 0; 735 return 0;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index e41564975195..44adcc2d5e5b 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -88,12 +88,5 @@ void __init no_iommu_init(void)
88{ 88{
89 if (dma_ops) 89 if (dma_ops)
90 return; 90 return;
91 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
92 dma_ops = &nommu_dma_ops; 91 dma_ops = &nommu_dma_ops;
93 if (end_pfn > MAX_DMA32_PFN) {
94 printk(KERN_ERR
95 "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
96 KERN_ERR "WARNING 32bit PCI may malfunction.\n"
97 KERN_ERR "You might want to enable CONFIG_GART_IOMMU\n");
98 }
99} 92}
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 3569a25ad7fb..990ed67896f2 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -35,8 +35,8 @@ void pci_swiotlb_init(void)
35 (end_pfn > MAX_DMA32_PFN || force_iommu)) 35 (end_pfn > MAX_DMA32_PFN || force_iommu))
36 swiotlb = 1; 36 swiotlb = 1;
37 if (swiotlb) { 37 if (swiotlb) {
38 swiotlb_init();
39 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); 38 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
39 swiotlb_init();
40 dma_ops = &swiotlb_dma_ops; 40 dma_ops = &swiotlb_dma_ops;
41 } 41 }
42} 42}
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index feb5f108dd26..5c51d10408a6 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -80,6 +80,31 @@ int pmtimer_mark_offset(void)
80 return lost - 1; 80 return lost - 1;
81} 81}
82 82
83static unsigned pmtimer_wait_tick(void)
84{
85 u32 a, b;
86 for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK;
87 a == b;
88 b = inl(pmtmr_ioport) & ACPI_PM_MASK)
89 ;
90 return b;
91}
92
93/* note: wait time is rounded up to one tick */
94void pmtimer_wait(unsigned us)
95{
96 u32 a, b;
97 a = pmtimer_wait_tick();
98 do {
99 b = inl(pmtmr_ioport);
100 } while (cyc2us(b - a) < us);
101}
102
103void pmtimer_resume(void)
104{
105 last_pmtmr_tick = inl(pmtmr_ioport);
106}
107
83unsigned int do_gettimeoffset_pm(void) 108unsigned int do_gettimeoffset_pm(void)
84{ 109{
85 u32 now, offset, delta = 0; 110 u32 now, offset, delta = 0;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 8ded407e4a94..22a05dec81a2 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -516,8 +516,10 @@ out:
516 * This could still be optimized: 516 * This could still be optimized:
517 * - fold all the options into a flag word and test it with a single test. 517 * - fold all the options into a flag word and test it with a single test.
518 * - could test fs/gs bitsliced 518 * - could test fs/gs bitsliced
519 *
520 * Kprobes not supported here. Set the probe on schedule instead.
519 */ 521 */
520struct task_struct * 522__kprobes struct task_struct *
521__switch_to(struct task_struct *prev_p, struct task_struct *next_p) 523__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
522{ 524{
523 struct thread_struct *prev = &prev_p->thread, 525 struct thread_struct *prev = &prev_p->thread,
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 28895c03cb11..363db5a003df 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -741,7 +741,7 @@ void __init setup_arch(char **cmdline_p)
741 e820_setup_gap(); 741 e820_setup_gap();
742 742
743#ifdef CONFIG_GART_IOMMU 743#ifdef CONFIG_GART_IOMMU
744 iommu_hole_init(); 744 iommu_hole_init();
745#endif 745#endif
746 746
747#ifdef CONFIG_VT 747#ifdef CONFIG_VT
@@ -877,6 +877,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
877static int __init init_amd(struct cpuinfo_x86 *c) 877static int __init init_amd(struct cpuinfo_x86 *c)
878{ 878{
879 int r; 879 int r;
880 unsigned level;
880 881
881#ifdef CONFIG_SMP 882#ifdef CONFIG_SMP
882 unsigned long value; 883 unsigned long value;
@@ -899,6 +900,11 @@ static int __init init_amd(struct cpuinfo_x86 *c)
899 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ 900 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
900 clear_bit(0*32+31, &c->x86_capability); 901 clear_bit(0*32+31, &c->x86_capability);
901 902
903 /* On C+ stepping K8 rep microcode works well for copy/memset */
904 level = cpuid_eax(1);
905 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
906 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
907
902 r = get_model_name(c); 908 r = get_model_name(c);
903 if (!r) { 909 if (!r) {
904 switch (c->x86) { 910 switch (c->x86) {
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index f8c47c688443..c0844bffbf84 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -51,7 +51,7 @@ extern int using_apic_timer;
51DEFINE_SPINLOCK(rtc_lock); 51DEFINE_SPINLOCK(rtc_lock);
52DEFINE_SPINLOCK(i8253_lock); 52DEFINE_SPINLOCK(i8253_lock);
53 53
54static int nohpet __initdata = 0; 54int nohpet __initdata = 0;
55static int notsc __initdata = 0; 55static int notsc __initdata = 0;
56 56
57#undef HPET_HACK_ENABLE_DANGEROUS 57#undef HPET_HACK_ENABLE_DANGEROUS
@@ -345,7 +345,7 @@ static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
345#endif 345#endif
346} 346}
347 347
348static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 348void main_timer_handler(struct pt_regs *regs)
349{ 349{
350 static unsigned long rtc_update = 0; 350 static unsigned long rtc_update = 0;
351 unsigned long tsc; 351 unsigned long tsc;
@@ -458,12 +458,17 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
458 } 458 }
459 459
460 write_sequnlock(&xtime_lock); 460 write_sequnlock(&xtime_lock);
461}
461 462
463static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
464{
465 if (apic_runs_main_timer > 1)
466 return IRQ_HANDLED;
467 main_timer_handler(regs);
462#ifdef CONFIG_X86_LOCAL_APIC 468#ifdef CONFIG_X86_LOCAL_APIC
463 if (using_apic_timer) 469 if (using_apic_timer)
464 smp_send_timer_broadcast_ipi(); 470 smp_send_timer_broadcast_ipi();
465#endif 471#endif
466
467 return IRQ_HANDLED; 472 return IRQ_HANDLED;
468} 473}
469 474
@@ -843,17 +848,43 @@ static int hpet_reenable(void)
843 return hpet_timer_stop_set_go(hpet_tick); 848 return hpet_timer_stop_set_go(hpet_tick);
844} 849}
845 850
846void __init pit_init(void) 851#define PIT_MODE 0x43
852#define PIT_CH0 0x40
853
854static void __init __pit_init(int val, u8 mode)
847{ 855{
848 unsigned long flags; 856 unsigned long flags;
849 857
850 spin_lock_irqsave(&i8253_lock, flags); 858 spin_lock_irqsave(&i8253_lock, flags);
851 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */ 859 outb_p(mode, PIT_MODE);
852 outb_p(LATCH & 0xff, 0x40); /* LSB */ 860 outb_p(val & 0xff, PIT_CH0); /* LSB */
853 outb_p(LATCH >> 8, 0x40); /* MSB */ 861 outb_p(val >> 8, PIT_CH0); /* MSB */
854 spin_unlock_irqrestore(&i8253_lock, flags); 862 spin_unlock_irqrestore(&i8253_lock, flags);
855} 863}
856 864
865void __init pit_init(void)
866{
867 __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
868}
869
870void __init pit_stop_interrupt(void)
871{
872 __pit_init(0, 0x30); /* mode 0 */
873}
874
875void __init stop_timer_interrupt(void)
876{
877 char *name;
878 if (vxtime.hpet_address) {
879 name = "HPET";
880 hpet_timer_stop_set_go(0);
881 } else {
882 name = "PIT";
883 pit_stop_interrupt();
884 }
885 printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
886}
887
857int __init time_setup(char *str) 888int __init time_setup(char *str)
858{ 889{
859 report_lost_ticks = 1; 890 report_lost_ticks = 1;
@@ -932,7 +963,7 @@ void __init time_init(void)
932 * Make an educated guess if the TSC is trustworthy and synchronized 963 * Make an educated guess if the TSC is trustworthy and synchronized
933 * over all CPUs. 964 * over all CPUs.
934 */ 965 */
935__init int unsynchronized_tsc(void) 966__cpuinit int unsynchronized_tsc(void)
936{ 967{
937#ifdef CONFIG_SMP 968#ifdef CONFIG_SMP
938 if (oem_force_hpet_timer()) 969 if (oem_force_hpet_timer())
@@ -1016,9 +1047,21 @@ static int timer_resume(struct sys_device *dev)
1016 write_seqlock_irqsave(&xtime_lock,flags); 1047 write_seqlock_irqsave(&xtime_lock,flags);
1017 xtime.tv_sec = sec; 1048 xtime.tv_sec = sec;
1018 xtime.tv_nsec = 0; 1049 xtime.tv_nsec = 0;
1050 if (vxtime.mode == VXTIME_HPET) {
1051 if (hpet_use_timer)
1052 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
1053 else
1054 vxtime.last = hpet_readl(HPET_COUNTER);
1055#ifdef CONFIG_X86_PM_TIMER
1056 } else if (vxtime.mode == VXTIME_PMTMR) {
1057 pmtimer_resume();
1058#endif
1059 } else
1060 vxtime.last_tsc = get_cycles_sync();
1019 write_sequnlock_irqrestore(&xtime_lock,flags); 1061 write_sequnlock_irqrestore(&xtime_lock,flags);
1020 jiffies += sleep_length; 1062 jiffies += sleep_length;
1021 wall_jiffies += sleep_length; 1063 wall_jiffies += sleep_length;
1064 monotonic_base += sleep_length * (NSEC_PER_SEC/HZ);
1022 touch_softlockup_watchdog(); 1065 touch_softlockup_watchdog();
1023 return 0; 1066 return 0;
1024} 1067}
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 8bb0aeda78b9..ee1b2da9e5e7 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -372,7 +372,7 @@ void out_of_line_bug(void)
372static DEFINE_SPINLOCK(die_lock); 372static DEFINE_SPINLOCK(die_lock);
373static int die_owner = -1; 373static int die_owner = -1;
374 374
375unsigned long oops_begin(void) 375unsigned __kprobes long oops_begin(void)
376{ 376{
377 int cpu = safe_smp_processor_id(); 377 int cpu = safe_smp_processor_id();
378 unsigned long flags; 378 unsigned long flags;
@@ -391,7 +391,7 @@ unsigned long oops_begin(void)
391 return flags; 391 return flags;
392} 392}
393 393
394void oops_end(unsigned long flags) 394void __kprobes oops_end(unsigned long flags)
395{ 395{
396 die_owner = -1; 396 die_owner = -1;
397 bust_spinlocks(0); 397 bust_spinlocks(0);
@@ -400,7 +400,7 @@ void oops_end(unsigned long flags)
400 panic("Oops"); 400 panic("Oops");
401} 401}
402 402
403void __die(const char * str, struct pt_regs * regs, long err) 403void __kprobes __die(const char * str, struct pt_regs * regs, long err)
404{ 404{
405 static int die_counter; 405 static int die_counter;
406 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter); 406 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
@@ -432,7 +432,7 @@ void die(const char * str, struct pt_regs * regs, long err)
432 do_exit(SIGSEGV); 432 do_exit(SIGSEGV);
433} 433}
434 434
435void die_nmi(char *str, struct pt_regs *regs) 435void __kprobes die_nmi(char *str, struct pt_regs *regs)
436{ 436{
437 unsigned long flags = oops_begin(); 437 unsigned long flags = oops_begin();
438 438
@@ -575,7 +575,8 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
575 } 575 }
576} 576}
577 577
578static void mem_parity_error(unsigned char reason, struct pt_regs * regs) 578static __kprobes void
579mem_parity_error(unsigned char reason, struct pt_regs * regs)
579{ 580{
580 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n"); 581 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
581 printk("You probably have a hardware problem with your RAM chips\n"); 582 printk("You probably have a hardware problem with your RAM chips\n");
@@ -585,7 +586,8 @@ static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
585 outb(reason, 0x61); 586 outb(reason, 0x61);
586} 587}
587 588
588static void io_check_error(unsigned char reason, struct pt_regs * regs) 589static __kprobes void
590io_check_error(unsigned char reason, struct pt_regs * regs)
589{ 591{
590 printk("NMI: IOCK error (debug interrupt?)\n"); 592 printk("NMI: IOCK error (debug interrupt?)\n");
591 show_registers(regs); 593 show_registers(regs);
@@ -598,7 +600,8 @@ static void io_check_error(unsigned char reason, struct pt_regs * regs)
598 outb(reason, 0x61); 600 outb(reason, 0x61);
599} 601}
600 602
601static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs) 603static __kprobes void
604unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
602{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason); 605{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
603 printk("Dazed and confused, but trying to continue\n"); 606 printk("Dazed and confused, but trying to continue\n");
604 printk("Do you have a strange power saving mode enabled?\n"); 607 printk("Do you have a strange power saving mode enabled?\n");
@@ -606,7 +609,7 @@ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
606 609
607/* Runs on IST stack. This code must keep interrupts off all the time. 610/* Runs on IST stack. This code must keep interrupts off all the time.
608 Nested NMIs are prevented by the CPU. */ 611 Nested NMIs are prevented by the CPU. */
609asmlinkage void default_do_nmi(struct pt_regs *regs) 612asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
610{ 613{
611 unsigned char reason = 0; 614 unsigned char reason = 0;
612 int cpu; 615 int cpu;
@@ -658,7 +661,7 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
658/* Help handler running on IST stack to switch back to user stack 661/* Help handler running on IST stack to switch back to user stack
659 for scheduling or signal handling. The actual stack switch is done in 662 for scheduling or signal handling. The actual stack switch is done in
660 entry.S */ 663 entry.S */
661asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs) 664asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
662{ 665{
663 struct pt_regs *regs = eregs; 666 struct pt_regs *regs = eregs;
664 /* Did already sync */ 667 /* Did already sync */
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b0eed1faf740..74db0062d4a2 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -172,13 +172,15 @@ SECTIONS
172 . = ALIGN(4096); 172 . = ALIGN(4096);
173 __initramfs_start = .; 173 __initramfs_start = .;
174 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } 174 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
175 __initramfs_end = .; 175 __initramfs_end = .;
176 . = ALIGN(32); 176 /* temporary here to work around NR_CPUS. If you see this comment in 2.6.17+
177 complain */
178 . = ALIGN(4096);
179 __init_end = .;
180 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
177 __per_cpu_start = .; 181 __per_cpu_start = .;
178 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) } 182 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
179 __per_cpu_end = .; 183 __per_cpu_end = .;
180 . = ALIGN(4096);
181 __init_end = .;
182 184
183 . = ALIGN(4096); 185 . = ALIGN(4096);
184 __nosave_begin = .; 186 __nosave_begin = .;
diff --git a/arch/x86_64/lib/clear_page.S b/arch/x86_64/lib/clear_page.S
index 43d9fa136180..1f81b79b796c 100644
--- a/arch/x86_64/lib/clear_page.S
+++ b/arch/x86_64/lib/clear_page.S
@@ -5,8 +5,46 @@
5 .globl clear_page 5 .globl clear_page
6 .p2align 4 6 .p2align 4
7clear_page: 7clear_page:
8 xorl %eax,%eax
9 movl $4096/64,%ecx
10 .p2align 4
11.Lloop:
12 decl %ecx
13#define PUT(x) movq %rax,x*8(%rdi)
14 movq %rax,(%rdi)
15 PUT(1)
16 PUT(2)
17 PUT(3)
18 PUT(4)
19 PUT(5)
20 PUT(6)
21 PUT(7)
22 leaq 64(%rdi),%rdi
23 jnz .Lloop
24 nop
25 ret
26clear_page_end:
27
28 /* Some CPUs run faster using the string instructions.
29 It is also a lot simpler. Use this when possible */
30
31#include <asm/cpufeature.h>
32
33 .section .altinstructions,"a"
34 .align 8
35 .quad clear_page
36 .quad clear_page_c
37 .byte X86_FEATURE_REP_GOOD
38 .byte clear_page_end-clear_page
39 .byte clear_page_c_end-clear_page_c
40 .previous
41
42 .section .altinstr_replacement,"ax"
43clear_page_c:
8 movl $4096/8,%ecx 44 movl $4096/8,%ecx
9 xorl %eax,%eax 45 xorl %eax,%eax
10 rep 46 rep
11 stosq 47 stosq
12 ret 48 ret
49clear_page_c_end:
50 .previous
diff --git a/arch/x86_64/lib/copy_page.S b/arch/x86_64/lib/copy_page.S
index 621a19769406..8fa19d96a7ee 100644
--- a/arch/x86_64/lib/copy_page.S
+++ b/arch/x86_64/lib/copy_page.S
@@ -8,7 +8,94 @@
8 .globl copy_page 8 .globl copy_page
9 .p2align 4 9 .p2align 4
10copy_page: 10copy_page:
11 subq $3*8,%rsp
12 movq %rbx,(%rsp)
13 movq %r12,1*8(%rsp)
14 movq %r13,2*8(%rsp)
15
16 movl $(4096/64)-5,%ecx
17 .p2align 4
18.Loop64:
19 dec %rcx
20
21 movq (%rsi), %rax
22 movq 8 (%rsi), %rbx
23 movq 16 (%rsi), %rdx
24 movq 24 (%rsi), %r8
25 movq 32 (%rsi), %r9
26 movq 40 (%rsi), %r10
27 movq 48 (%rsi), %r11
28 movq 56 (%rsi), %r12
29
30 prefetcht0 5*64(%rsi)
31
32 movq %rax, (%rdi)
33 movq %rbx, 8 (%rdi)
34 movq %rdx, 16 (%rdi)
35 movq %r8, 24 (%rdi)
36 movq %r9, 32 (%rdi)
37 movq %r10, 40 (%rdi)
38 movq %r11, 48 (%rdi)
39 movq %r12, 56 (%rdi)
40
41 leaq 64 (%rsi), %rsi
42 leaq 64 (%rdi), %rdi
43
44 jnz .Loop64
45
46 movl $5,%ecx
47 .p2align 4
48.Loop2:
49 decl %ecx
50
51 movq (%rsi), %rax
52 movq 8 (%rsi), %rbx
53 movq 16 (%rsi), %rdx
54 movq 24 (%rsi), %r8
55 movq 32 (%rsi), %r9
56 movq 40 (%rsi), %r10
57 movq 48 (%rsi), %r11
58 movq 56 (%rsi), %r12
59
60 movq %rax, (%rdi)
61 movq %rbx, 8 (%rdi)
62 movq %rdx, 16 (%rdi)
63 movq %r8, 24 (%rdi)
64 movq %r9, 32 (%rdi)
65 movq %r10, 40 (%rdi)
66 movq %r11, 48 (%rdi)
67 movq %r12, 56 (%rdi)
68
69 leaq 64(%rdi),%rdi
70 leaq 64(%rsi),%rsi
71
72 jnz .Loop2
73
74 movq (%rsp),%rbx
75 movq 1*8(%rsp),%r12
76 movq 2*8(%rsp),%r13
77 addq $3*8,%rsp
78 ret
79
80 /* Some CPUs run faster using the string copy instructions.
81 It is also a lot simpler. Use this when possible */
82
83#include <asm/cpufeature.h>
84
85 .section .altinstructions,"a"
86 .align 8
87 .quad copy_page
88 .quad copy_page_c
89 .byte X86_FEATURE_REP_GOOD
90 .byte copy_page_c_end-copy_page_c
91 .byte copy_page_c_end-copy_page_c
92 .previous
93
94 .section .altinstr_replacement,"ax"
95copy_page_c:
11 movl $4096/8,%ecx 96 movl $4096/8,%ecx
12 rep 97 rep
13 movsq 98 movsq
14 ret 99 ret
100copy_page_c_end:
101 .previous
diff --git a/arch/x86_64/lib/copy_user.S b/arch/x86_64/lib/copy_user.S
index 79422b6559c3..f64569b83b54 100644
--- a/arch/x86_64/lib/copy_user.S
+++ b/arch/x86_64/lib/copy_user.S
@@ -4,9 +4,12 @@
4 * Functions to copy from and to user space. 4 * Functions to copy from and to user space.
5 */ 5 */
6 6
7#define FIX_ALIGNMENT 1
8
7 #include <asm/current.h> 9 #include <asm/current.h>
8 #include <asm/asm-offsets.h> 10 #include <asm/asm-offsets.h>
9 #include <asm/thread_info.h> 11 #include <asm/thread_info.h>
12 #include <asm/cpufeature.h>
10 13
11/* Standard copy_to_user with segment limit checking */ 14/* Standard copy_to_user with segment limit checking */
12 .globl copy_to_user 15 .globl copy_to_user
@@ -18,7 +21,23 @@ copy_to_user:
18 jc bad_to_user 21 jc bad_to_user
19 cmpq threadinfo_addr_limit(%rax),%rcx 22 cmpq threadinfo_addr_limit(%rax),%rcx
20 jae bad_to_user 23 jae bad_to_user
21 jmp copy_user_generic 242:
25 .byte 0xe9 /* 32bit jump */
26 .long .Lcug-1f
271:
28
29 .section .altinstr_replacement,"ax"
303: .byte 0xe9 /* replacement jmp with 8 bit immediate */
31 .long copy_user_generic_c-1b /* offset */
32 .previous
33 .section .altinstructions,"a"
34 .align 8
35 .quad 2b
36 .quad 3b
37 .byte X86_FEATURE_REP_GOOD
38 .byte 5
39 .byte 5
40 .previous
22 41
23/* Standard copy_from_user with segment limit checking */ 42/* Standard copy_from_user with segment limit checking */
24 .globl copy_from_user 43 .globl copy_from_user
@@ -53,44 +72,230 @@ bad_to_user:
53 * rsi source 72 * rsi source
54 * rdx count 73 * rdx count
55 * 74 *
56 * Only 4GB of copy is supported. This shouldn't be a problem
57 * because the kernel normally only writes from/to page sized chunks
58 * even if user space passed a longer buffer.
59 * And more would be dangerous because both Intel and AMD have
60 * errata with rep movsq > 4GB. If someone feels the need to fix
61 * this please consider this.
62 *
63 * Output: 75 * Output:
64 * eax uncopied bytes or 0 if successful. 76 * eax uncopied bytes or 0 if successful.
65 */ 77 */
66
67 .globl copy_user_generic 78 .globl copy_user_generic
79 .p2align 4
68copy_user_generic: 80copy_user_generic:
81 .byte 0x66,0x66,0x90 /* 5 byte nop for replacement jump */
82 .byte 0x66,0x90
831:
84 .section .altinstr_replacement,"ax"
852: .byte 0xe9 /* near jump with 32bit immediate */
86 .long copy_user_generic_c-1b /* offset */
87 .previous
88 .section .altinstructions,"a"
89 .align 8
90 .quad copy_user_generic
91 .quad 2b
92 .byte X86_FEATURE_REP_GOOD
93 .byte 5
94 .byte 5
95 .previous
96.Lcug:
97 pushq %rbx
98 xorl %eax,%eax /*zero for the exception handler */
99
100#ifdef FIX_ALIGNMENT
101 /* check for bad alignment of destination */
102 movl %edi,%ecx
103 andl $7,%ecx
104 jnz .Lbad_alignment
105.Lafter_bad_alignment:
106#endif
107
108 movq %rdx,%rcx
109
110 movl $64,%ebx
111 shrq $6,%rdx
112 decq %rdx
113 js .Lhandle_tail
114
115 .p2align 4
116.Lloop:
117.Ls1: movq (%rsi),%r11
118.Ls2: movq 1*8(%rsi),%r8
119.Ls3: movq 2*8(%rsi),%r9
120.Ls4: movq 3*8(%rsi),%r10
121.Ld1: movq %r11,(%rdi)
122.Ld2: movq %r8,1*8(%rdi)
123.Ld3: movq %r9,2*8(%rdi)
124.Ld4: movq %r10,3*8(%rdi)
125
126.Ls5: movq 4*8(%rsi),%r11
127.Ls6: movq 5*8(%rsi),%r8
128.Ls7: movq 6*8(%rsi),%r9
129.Ls8: movq 7*8(%rsi),%r10
130.Ld5: movq %r11,4*8(%rdi)
131.Ld6: movq %r8,5*8(%rdi)
132.Ld7: movq %r9,6*8(%rdi)
133.Ld8: movq %r10,7*8(%rdi)
134
135 decq %rdx
136
137 leaq 64(%rsi),%rsi
138 leaq 64(%rdi),%rdi
139
140 jns .Lloop
141
142 .p2align 4
143.Lhandle_tail:
144 movl %ecx,%edx
145 andl $63,%ecx
146 shrl $3,%ecx
147 jz .Lhandle_7
148 movl $8,%ebx
149 .p2align 4
150.Lloop_8:
151.Ls9: movq (%rsi),%r8
152.Ld9: movq %r8,(%rdi)
153 decl %ecx
154 leaq 8(%rdi),%rdi
155 leaq 8(%rsi),%rsi
156 jnz .Lloop_8
157
158.Lhandle_7:
159 movl %edx,%ecx
160 andl $7,%ecx
161 jz .Lende
162 .p2align 4
163.Lloop_1:
164.Ls10: movb (%rsi),%bl
165.Ld10: movb %bl,(%rdi)
166 incq %rdi
167 incq %rsi
168 decl %ecx
169 jnz .Lloop_1
170
171.Lende:
172 popq %rbx
173 ret
174
175#ifdef FIX_ALIGNMENT
176 /* align destination */
177 .p2align 4
178.Lbad_alignment:
179 movl $8,%r9d
180 subl %ecx,%r9d
181 movl %r9d,%ecx
182 cmpq %r9,%rdx
183 jz .Lhandle_7
184 js .Lhandle_7
185.Lalign_1:
186.Ls11: movb (%rsi),%bl
187.Ld11: movb %bl,(%rdi)
188 incq %rsi
189 incq %rdi
190 decl %ecx
191 jnz .Lalign_1
192 subq %r9,%rdx
193 jmp .Lafter_bad_alignment
194#endif
195
196 /* table sorted by exception address */
197 .section __ex_table,"a"
198 .align 8
199 .quad .Ls1,.Ls1e
200 .quad .Ls2,.Ls2e
201 .quad .Ls3,.Ls3e
202 .quad .Ls4,.Ls4e
203 .quad .Ld1,.Ls1e
204 .quad .Ld2,.Ls2e
205 .quad .Ld3,.Ls3e
206 .quad .Ld4,.Ls4e
207 .quad .Ls5,.Ls5e
208 .quad .Ls6,.Ls6e
209 .quad .Ls7,.Ls7e
210 .quad .Ls8,.Ls8e
211 .quad .Ld5,.Ls5e
212 .quad .Ld6,.Ls6e
213 .quad .Ld7,.Ls7e
214 .quad .Ld8,.Ls8e
215 .quad .Ls9,.Le_quad
216 .quad .Ld9,.Le_quad
217 .quad .Ls10,.Le_byte
218 .quad .Ld10,.Le_byte
219#ifdef FIX_ALIGNMENT
220 .quad .Ls11,.Lzero_rest
221 .quad .Ld11,.Lzero_rest
222#endif
223 .quad .Le5,.Le_zero
224 .previous
225
226 /* compute 64-offset for main loop. 8 bytes accuracy with error on the
227 pessimistic side. this is gross. it would be better to fix the
228 interface. */
229 /* eax: zero, ebx: 64 */
230.Ls1e: addl $8,%eax
231.Ls2e: addl $8,%eax
232.Ls3e: addl $8,%eax
233.Ls4e: addl $8,%eax
234.Ls5e: addl $8,%eax
235.Ls6e: addl $8,%eax
236.Ls7e: addl $8,%eax
237.Ls8e: addl $8,%eax
238 addq %rbx,%rdi /* +64 */
239 subq %rax,%rdi /* correct destination with computed offset */
240
241 shlq $6,%rdx /* loop counter * 64 (stride length) */
242 addq %rax,%rdx /* add offset to loopcnt */
243 andl $63,%ecx /* remaining bytes */
244 addq %rcx,%rdx /* add them */
245 jmp .Lzero_rest
246
247 /* exception on quad word loop in tail handling */
248 /* ecx: loopcnt/8, %edx: length, rdi: correct */
249.Le_quad:
250 shll $3,%ecx
251 andl $7,%edx
252 addl %ecx,%edx
253 /* edx: bytes to zero, rdi: dest, eax:zero */
254.Lzero_rest:
255 movq %rdx,%rcx
256.Le_byte:
257 xorl %eax,%eax
258.Le5: rep
259 stosb
260 /* when there is another exception while zeroing the rest just return */
261.Le_zero:
262 movq %rdx,%rax
263 jmp .Lende
264
265 /* Some CPUs run faster using the string copy instructions.
266 This is also a lot simpler. Use them when possible.
267 Patch in jmps to this code instead of copying it fully
268 to avoid unwanted aliasing in the exception tables. */
269
270 /* rdi destination
271 * rsi source
272 * rdx count
273 *
274 * Output:
275 * eax uncopied bytes or 0 if successfull.
276 *
277 * Only 4GB of copy is supported. This shouldn't be a problem
278 * because the kernel normally only writes from/to page sized chunks
279 * even if user space passed a longer buffer.
280 * And more would be dangerous because both Intel and AMD have
281 * errata with rep movsq > 4GB. If someone feels the need to fix
282 * this please consider this.
283 */
284copy_user_generic_c:
69 movl %edx,%ecx 285 movl %edx,%ecx
70 shrl $3,%ecx 286 shrl $3,%ecx
71 andl $7,%edx 287 andl $7,%edx
72 jz 5f
731: rep 2881: rep
74 movsq 289 movsq
75 movl %edx,%ecx 290 movl %edx,%ecx
76 xor %eax,%eax
772: rep 2912: rep
78 movsb 292 movsb
2934: movl %ecx,%eax
79 ret 294 ret
80 /* align here? */
815: xorl %eax,%eax
826: rep movsq
83 ret
84
85 .section .fixup,"ax"
863: lea (%rdx,%rcx,8),%rax 2953: lea (%rdx,%rcx,8),%rax
87 ret 296 ret
884: movl %ecx,%eax
89 ret
90 .previous
91 297
92 .section __ex_table,"a" 298 .section __ex_table,"a"
93 .quad 1b,3b 299 .quad 1b,3b
94 .quad 2b,4b 300 .quad 2b,4b
95 .quad 6b,4b
96 .previous 301 .previous
diff --git a/arch/x86_64/lib/memcpy.S b/arch/x86_64/lib/memcpy.S
index 92dd80544602..5554948b5554 100644
--- a/arch/x86_64/lib/memcpy.S
+++ b/arch/x86_64/lib/memcpy.S
@@ -11,8 +11,6 @@
11 * 11 *
12 * Output: 12 * Output:
13 * rax original destination 13 * rax original destination
14 *
15 * TODO: check best memcpy for PSC
16 */ 14 */
17 15
18 .globl __memcpy 16 .globl __memcpy
@@ -20,6 +18,95 @@
20 .p2align 4 18 .p2align 4
21__memcpy: 19__memcpy:
22memcpy: 20memcpy:
21 pushq %rbx
22 movq %rdi,%rax
23
24 movl %edx,%ecx
25 shrl $6,%ecx
26 jz .Lhandle_tail
27
28 .p2align 4
29.Lloop_64:
30 decl %ecx
31
32 movq (%rsi),%r11
33 movq 8(%rsi),%r8
34
35 movq %r11,(%rdi)
36 movq %r8,1*8(%rdi)
37
38 movq 2*8(%rsi),%r9
39 movq 3*8(%rsi),%r10
40
41 movq %r9,2*8(%rdi)
42 movq %r10,3*8(%rdi)
43
44 movq 4*8(%rsi),%r11
45 movq 5*8(%rsi),%r8
46
47 movq %r11,4*8(%rdi)
48 movq %r8,5*8(%rdi)
49
50 movq 6*8(%rsi),%r9
51 movq 7*8(%rsi),%r10
52
53 movq %r9,6*8(%rdi)
54 movq %r10,7*8(%rdi)
55
56 leaq 64(%rsi),%rsi
57 leaq 64(%rdi),%rdi
58 jnz .Lloop_64
59
60.Lhandle_tail:
61 movl %edx,%ecx
62 andl $63,%ecx
63 shrl $3,%ecx
64 jz .Lhandle_7
65 .p2align 4
66.Lloop_8:
67 decl %ecx
68 movq (%rsi),%r8
69 movq %r8,(%rdi)
70 leaq 8(%rdi),%rdi
71 leaq 8(%rsi),%rsi
72 jnz .Lloop_8
73
74.Lhandle_7:
75 movl %edx,%ecx
76 andl $7,%ecx
77 jz .Lende
78 .p2align 4
79.Lloop_1:
80 movb (%rsi),%r8b
81 movb %r8b,(%rdi)
82 incq %rdi
83 incq %rsi
84 decl %ecx
85 jnz .Lloop_1
86
87.Lende:
88 popq %rbx
89 ret
90.Lfinal:
91
92 /* Some CPUs run faster using the string copy instructions.
93 It is also a lot simpler. Use this when possible */
94
95 .section .altinstructions,"a"
96 .align 8
97 .quad memcpy
98 .quad memcpy_c
99 .byte X86_FEATURE_REP_GOOD
100 .byte .Lfinal-memcpy
101 .byte memcpy_c_end-memcpy_c
102 .previous
103
104 .section .altinstr_replacement,"ax"
105 /* rdi destination
106 * rsi source
107 * rdx count
108 */
109memcpy_c:
23 movq %rdi,%rax 110 movq %rdi,%rax
24 movl %edx,%ecx 111 movl %edx,%ecx
25 shrl $3,%ecx 112 shrl $3,%ecx
@@ -30,3 +117,5 @@ memcpy:
30 rep 117 rep
31 movsb 118 movsb
32 ret 119 ret
120memcpy_c_end:
121 .previous
diff --git a/arch/x86_64/lib/memset.S b/arch/x86_64/lib/memset.S
index 2aa48f24ed1e..ad397f2c7de8 100644
--- a/arch/x86_64/lib/memset.S
+++ b/arch/x86_64/lib/memset.S
@@ -13,6 +13,98 @@
13 .p2align 4 13 .p2align 4
14memset: 14memset:
15__memset: 15__memset:
16 movq %rdi,%r10
17 movq %rdx,%r11
18
19 /* expand byte value */
20 movzbl %sil,%ecx
21 movabs $0x0101010101010101,%rax
22 mul %rcx /* with rax, clobbers rdx */
23
24 /* align dst */
25 movl %edi,%r9d
26 andl $7,%r9d
27 jnz .Lbad_alignment
28.Lafter_bad_alignment:
29
30 movl %r11d,%ecx
31 shrl $6,%ecx
32 jz .Lhandle_tail
33
34 .p2align 4
35.Lloop_64:
36 decl %ecx
37 movq %rax,(%rdi)
38 movq %rax,8(%rdi)
39 movq %rax,16(%rdi)
40 movq %rax,24(%rdi)
41 movq %rax,32(%rdi)
42 movq %rax,40(%rdi)
43 movq %rax,48(%rdi)
44 movq %rax,56(%rdi)
45 leaq 64(%rdi),%rdi
46 jnz .Lloop_64
47
48 /* Handle tail in loops. The loops should be faster than hard
49 to predict jump tables. */
50 .p2align 4
51.Lhandle_tail:
52 movl %r11d,%ecx
53 andl $63&(~7),%ecx
54 jz .Lhandle_7
55 shrl $3,%ecx
56 .p2align 4
57.Lloop_8:
58 decl %ecx
59 movq %rax,(%rdi)
60 leaq 8(%rdi),%rdi
61 jnz .Lloop_8
62
63.Lhandle_7:
64 movl %r11d,%ecx
65 andl $7,%ecx
66 jz .Lende
67 .p2align 4
68.Lloop_1:
69 decl %ecx
70 movb %al,(%rdi)
71 leaq 1(%rdi),%rdi
72 jnz .Lloop_1
73
74.Lende:
75 movq %r10,%rax
76 ret
77
78.Lbad_alignment:
79 cmpq $7,%r11
80 jbe .Lhandle_7
81 movq %rax,(%rdi) /* unaligned store */
82 movq $8,%r8
83 subq %r9,%r8
84 addq %r8,%rdi
85 subq %r8,%r11
86 jmp .Lafter_bad_alignment
87
88 /* Some CPUs run faster using the string instructions.
89 It is also a lot simpler. Use this when possible */
90
91#include <asm/cpufeature.h>
92
93 .section .altinstructions,"a"
94 .align 8
95 .quad memset
96 .quad memset_c
97 .byte X86_FEATURE_REP_GOOD
98 .byte memset_c_end-memset_c
99 .byte memset_c_end-memset_c
100 .previous
101
102 .section .altinstr_replacement,"ax"
103 /* rdi destination
104 * rsi value
105 * rdx count
106 */
107memset_c:
16 movq %rdi,%r9 108 movq %rdi,%r9
17 movl %edx,%r8d 109 movl %edx,%r8d
18 andl $7,%r8d 110 andl $7,%r8d
@@ -29,3 +121,5 @@ __memset:
29 stosb 121 stosb
30 movq %r9,%rax 122 movq %r9,%rax
31 ret 123 ret
124memset_c_end:
125 .previous
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 26eac194064b..2e7c3c8ffe03 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -33,7 +33,6 @@
33#include <asm/proto.h> 33#include <asm/proto.h>
34#include <asm/kdebug.h> 34#include <asm/kdebug.h>
35#include <asm-generic/sections.h> 35#include <asm-generic/sections.h>
36#include <asm/kdebug.h>
37 36
38/* Page fault error code bits */ 37/* Page fault error code bits */
39#define PF_PROT (1<<0) /* or no page found */ 38#define PF_PROT (1<<0) /* or no page found */
@@ -157,8 +156,8 @@ void dump_pagetable(unsigned long address)
157 156
158 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); 157 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
159 pgd += pgd_index(address); 158 pgd += pgd_index(address);
160 printk("PGD %lx ", pgd_val(*pgd));
161 if (bad_address(pgd)) goto bad; 159 if (bad_address(pgd)) goto bad;
160 printk("PGD %lx ", pgd_val(*pgd));
162 if (!pgd_present(*pgd)) goto ret; 161 if (!pgd_present(*pgd)) goto ret;
163 162
164 pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address); 163 pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 8b7f85608fa8..cd25300726fc 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -26,6 +26,10 @@ static nodemask_t nodes_found __initdata;
26static struct node nodes[MAX_NUMNODES] __initdata; 26static struct node nodes[MAX_NUMNODES] __initdata;
27static u8 pxm2node[256] = { [0 ... 255] = 0xff }; 27static u8 pxm2node[256] = { [0 ... 255] = 0xff };
28 28
29/* Too small nodes confuse the VM badly. Usually they result
30 from BIOS bugs. */
31#define NODE_MIN_SIZE (4*1024*1024)
32
29static int node_to_pxm(int n); 33static int node_to_pxm(int n);
30 34
31int pxm_to_node(int pxm) 35int pxm_to_node(int pxm)
@@ -131,7 +135,12 @@ void __init
131acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 135acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
132{ 136{
133 int pxm, node; 137 int pxm, node;
134 if (srat_disabled() || pa->flags.enabled == 0) 138 if (srat_disabled())
139 return;
140 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
141 return;
142 }
143 if (pa->flags.enabled == 0)
135 return; 144 return;
136 pxm = pa->proximity_domain; 145 pxm = pa->proximity_domain;
137 node = setup_node(pxm); 146 node = setup_node(pxm);
@@ -155,8 +164,16 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
155 int node, pxm; 164 int node, pxm;
156 int i; 165 int i;
157 166
158 if (srat_disabled() || ma->flags.enabled == 0) 167 if (srat_disabled())
168 return;
169 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
170 bad_srat();
159 return; 171 return;
172 }
173 if (ma->flags.enabled == 0)
174 return;
175 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
176 end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
160 pxm = ma->proximity_domain; 177 pxm = ma->proximity_domain;
161 node = setup_node(pxm); 178 node = setup_node(pxm);
162 if (node < 0) { 179 if (node < 0) {
@@ -164,8 +181,6 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
164 bad_srat(); 181 bad_srat();
165 return; 182 return;
166 } 183 }
167 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
168 end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
169 /* It is fine to add this area to the nodes data it will be used later*/ 184 /* It is fine to add this area to the nodes data it will be used later*/
170 if (ma->flags.hot_pluggable == 1) 185 if (ma->flags.hot_pluggable == 1)
171 printk(KERN_INFO "SRAT: hot plug zone found %lx - %lx \n", 186 printk(KERN_INFO "SRAT: hot plug zone found %lx - %lx \n",
@@ -223,6 +238,16 @@ static int nodes_cover_memory(void)
223 return 1; 238 return 1;
224} 239}
225 240
241static void unparse_node(int node)
242{
243 int i;
244 node_clear(node, nodes_parsed);
245 for (i = 0; i < MAX_LOCAL_APIC; i++) {
246 if (apicid_to_node[i] == node)
247 apicid_to_node[i] = NUMA_NO_NODE;
248 }
249}
250
226void __init acpi_numa_arch_fixup(void) {} 251void __init acpi_numa_arch_fixup(void) {}
227 252
228/* Use the information discovered above to actually set up the nodes. */ 253/* Use the information discovered above to actually set up the nodes. */
@@ -230,16 +255,16 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
230{ 255{
231 int i; 256 int i;
232 257
233 if (acpi_numa <= 0)
234 return -1;
235
236 /* First clean up the node list */ 258 /* First clean up the node list */
237 for_each_node_mask(i, nodes_parsed) { 259 for (i = 0; i < MAX_NUMNODES; i++) {
238 cutoff_node(i, start, end); 260 cutoff_node(i, start, end);
239 if (nodes[i].start == nodes[i].end) 261 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE)
240 node_clear(i, nodes_parsed); 262 unparse_node(i);
241 } 263 }
242 264
265 if (acpi_numa <= 0)
266 return -1;
267
243 if (!nodes_cover_memory()) { 268 if (!nodes_cover_memory()) {
244 bad_srat(); 269 bad_srat();
245 return -1; 270 return -1;
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index 00d4ddbf980c..b4a3fe4ec249 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -46,7 +46,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
46 if (pci_mmcfg_config_num == 1 && 46 if (pci_mmcfg_config_num == 1 &&
47 cfg->pci_segment_group_number == 0 && 47 cfg->pci_segment_group_number == 0 &&
48 (cfg->start_bus_number | cfg->end_bus_number) == 0) 48 (cfg->start_bus_number | cfg->end_bus_number) == 0)
49 return cfg->base_address; 49 return pci_mmcfg_virt[0].virt;
50 50
51 /* Fall back to type 0 */ 51 /* Fall back to type 0 */
52 return 0; 52 return 0;
diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c
index 4fbddf92a921..94fdfe474ac1 100644
--- a/arch/xtensa/platform-iss/console.c
+++ b/arch/xtensa/platform-iss/console.c
@@ -128,9 +128,7 @@ static void rs_poll(unsigned long priv)
128 128
129 while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){ 129 while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){
130 __simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0); 130 __simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0);
131 tty->flip.count++; 131 tty_insert_flip_char(tty, c, TTY_NORMAL);
132 *tty->flip.char_buf_ptr++ = c;
133 *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
134 i++; 132 i++;
135 } 133 }
136 134
diff --git a/block/elevator.c b/block/elevator.c
index 96a61e029ce5..2fc269f69726 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -323,7 +323,8 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
323 /* 323 /*
324 * toggle ordered color 324 * toggle ordered color
325 */ 325 */
326 q->ordcolor ^= 1; 326 if (blk_barrier_rq(rq))
327 q->ordcolor ^= 1;
327 328
328 /* 329 /*
329 * barriers implicitly indicate back insertion 330 * barriers implicitly indicate back insertion
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index f9fc07efd2da..ee5ed98db4cd 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -508,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
508 508
509int blk_do_ordered(request_queue_t *q, struct request **rqp) 509int blk_do_ordered(request_queue_t *q, struct request **rqp)
510{ 510{
511 struct request *rq = *rqp, *allowed_rq; 511 struct request *rq = *rqp;
512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
513 513
514 if (!q->ordseq) { 514 if (!q->ordseq) {
@@ -532,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
532 } 532 }
533 } 533 }
534 534
535 /*
536 * Ordered sequence in progress
537 */
538
539 /* Special requests are not subject to ordering rules. */
540 if (!blk_fs_request(rq) &&
541 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
542 return 1;
543
535 if (q->ordered & QUEUE_ORDERED_TAG) { 544 if (q->ordered & QUEUE_ORDERED_TAG) {
545 /* Ordered by tag. Blocking the next barrier is enough. */
536 if (is_barrier && rq != &q->bar_rq) 546 if (is_barrier && rq != &q->bar_rq)
537 *rqp = NULL; 547 *rqp = NULL;
538 return 1; 548 } else {
539 } 549 /* Ordered by draining. Wait for turn. */
540 550 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
541 switch (blk_ordered_cur_seq(q)) { 551 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
542 case QUEUE_ORDSEQ_PREFLUSH: 552 *rqp = NULL;
543 allowed_rq = &q->pre_flush_rq;
544 break;
545 case QUEUE_ORDSEQ_BAR:
546 allowed_rq = &q->bar_rq;
547 break;
548 case QUEUE_ORDSEQ_POSTFLUSH:
549 allowed_rq = &q->post_flush_rq;
550 break;
551 default:
552 allowed_rq = NULL;
553 break;
554 } 553 }
555 554
556 if (rq != allowed_rq &&
557 (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
558 rq == &q->post_flush_rq))
559 *rqp = NULL;
560
561 return 1; 555 return 1;
562} 556}
563 557
@@ -3453,7 +3447,7 @@ int __init blk_dev_init(void)
3453 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3447 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3454 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3448 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3455 3449
3456 for (i = 0; i < NR_CPUS; i++) 3450 for_each_cpu(i)
3457 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3451 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3458 3452
3459 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); 3453 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index be2dae52f6fa..eb730a80952c 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -94,7 +94,9 @@ static int set_max_cstate(struct dmi_system_id *id)
94 return 0; 94 return 0;
95} 95}
96 96
97static struct dmi_system_id __initdata processor_power_dmi_table[] = { 97/* Actually this shouldn't be __cpuinitdata, would be better to fix the
98 callers to only run once -AK */
99static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
98 { set_max_cstate, "IBM ThinkPad R40e", { 100 { set_max_cstate, "IBM ThinkPad R40e", {
99 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 101 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
100 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, 102 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
@@ -899,7 +901,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
899 case ACPI_STATE_C3: 901 case ACPI_STATE_C3:
900 acpi_processor_power_verify_c3(pr, cx); 902 acpi_processor_power_verify_c3(pr, cx);
901#ifdef ARCH_APICTIMER_STOPS_ON_C3 903#ifdef ARCH_APICTIMER_STOPS_ON_C3
902 if (c->x86_vendor == X86_VENDOR_INTEL) { 904 if (cx->valid && c->x86_vendor == X86_VENDOR_INTEL) {
903 on_each_cpu(switch_APIC_timer_to_ipi, 905 on_each_cpu(switch_APIC_timer_to_ipi,
904 &mask, 1, 1); 906 &mask, 1, 1);
905 } 907 }
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index f12898d53078..e99471d3232b 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -8,6 +8,7 @@ obj-y += power/
8obj-$(CONFIG_FW_LOADER) += firmware_class.o 8obj-$(CONFIG_FW_LOADER) += firmware_class.o
9obj-$(CONFIG_NUMA) += node.o 9obj-$(CONFIG_NUMA) += node.o
10obj-$(CONFIG_MEMORY_HOTPLUG) += memory.o 10obj-$(CONFIG_MEMORY_HOTPLUG) += memory.o
11obj-$(CONFIG_SMP) += topology.o
11 12
12ifeq ($(CONFIG_DEBUG_DRIVER),y) 13ifeq ($(CONFIG_DEBUG_DRIVER),y)
13EXTRA_CFLAGS += -DDEBUG 14EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
new file mode 100644
index 000000000000..915810f6237e
--- /dev/null
+++ b/drivers/base/topology.c
@@ -0,0 +1,148 @@
1/*
2 * driver/base/topology.c - Populate sysfs with cpu topology information
3 *
4 * Written by: Zhang Yanmin, Intel Corporation
5 *
6 * Copyright (C) 2006, Intel Corp.
7 *
8 * All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 */
26#include <linux/sysdev.h>
27#include <linux/init.h>
28#include <linux/mm.h>
29#include <linux/cpu.h>
30#include <linux/module.h>
31#include <linux/topology.h>
32
33#define define_one_ro(_name) \
34static SYSDEV_ATTR(_name, 0444, show_##_name, NULL)
35
36#define define_id_show_func(name) \
37static ssize_t show_##name(struct sys_device *dev, char *buf) \
38{ \
39 unsigned int cpu = dev->id; \
40 return sprintf(buf, "%d\n", topology_##name(cpu)); \
41}
42
43#define define_siblings_show_func(name) \
44static ssize_t show_##name(struct sys_device *dev, char *buf) \
45{ \
46 ssize_t len = -1; \
47 unsigned int cpu = dev->id; \
48 len = cpumask_scnprintf(buf, NR_CPUS+1, topology_##name(cpu)); \
49 return (len + sprintf(buf + len, "\n")); \
50}
51
52#ifdef topology_physical_package_id
53define_id_show_func(physical_package_id);
54define_one_ro(physical_package_id);
55#define ref_physical_package_id_attr &attr_physical_package_id.attr,
56#else
57#define ref_physical_package_id_attr
58#endif
59
60#ifdef topology_core_id
61define_id_show_func(core_id);
62define_one_ro(core_id);
63#define ref_core_id_attr &attr_core_id.attr,
64#else
65#define ref_core_id_attr
66#endif
67
68#ifdef topology_thread_siblings
69define_siblings_show_func(thread_siblings);
70define_one_ro(thread_siblings);
71#define ref_thread_siblings_attr &attr_thread_siblings.attr,
72#else
73#define ref_thread_siblings_attr
74#endif
75
76#ifdef topology_core_siblings
77define_siblings_show_func(core_siblings);
78define_one_ro(core_siblings);
79#define ref_core_siblings_attr &attr_core_siblings.attr,
80#else
81#define ref_core_siblings_attr
82#endif
83
84static struct attribute *default_attrs[] = {
85 ref_physical_package_id_attr
86 ref_core_id_attr
87 ref_thread_siblings_attr
88 ref_core_siblings_attr
89 NULL
90};
91
92static struct attribute_group topology_attr_group = {
93 .attrs = default_attrs,
94 .name = "topology"
95};
96
97/* Add/Remove cpu_topology interface for CPU device */
98static int __cpuinit topology_add_dev(struct sys_device * sys_dev)
99{
100 sysfs_create_group(&sys_dev->kobj, &topology_attr_group);
101 return 0;
102}
103
104static int __cpuinit topology_remove_dev(struct sys_device * sys_dev)
105{
106 sysfs_remove_group(&sys_dev->kobj, &topology_attr_group);
107 return 0;
108}
109
110static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
111 unsigned long action, void *hcpu)
112{
113 unsigned int cpu = (unsigned long)hcpu;
114 struct sys_device *sys_dev;
115
116 sys_dev = get_cpu_sysdev(cpu);
117 switch (action) {
118 case CPU_ONLINE:
119 topology_add_dev(sys_dev);
120 break;
121 case CPU_DEAD:
122 topology_remove_dev(sys_dev);
123 break;
124 }
125 return NOTIFY_OK;
126}
127
128static struct notifier_block topology_cpu_notifier =
129{
130 .notifier_call = topology_cpu_callback,
131};
132
133static int __cpuinit topology_sysfs_init(void)
134{
135 int i;
136
137 for_each_online_cpu(i) {
138 topology_cpu_callback(&topology_cpu_notifier, CPU_ONLINE,
139 (void *)(long)i);
140 }
141
142 register_cpu_notifier(&topology_cpu_notifier);
143
144 return 0;
145}
146
147device_initcall(topology_sysfs_init);
148
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 139cbba76180..8b1331677407 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -433,12 +433,12 @@ config CDROM_PKTCDVD_BUFFERS
433 This controls the maximum number of active concurrent packets. More 433 This controls the maximum number of active concurrent packets. More
434 concurrent packets can increase write performance, but also require 434 concurrent packets can increase write performance, but also require
435 more memory. Each concurrent packet will require approximately 64Kb 435 more memory. Each concurrent packet will require approximately 64Kb
436 of non-swappable kernel memory, memory which will be allocated at 436 of non-swappable kernel memory, memory which will be allocated when
437 pktsetup time. 437 a disc is opened for writing.
438 438
439config CDROM_PKTCDVD_WCACHE 439config CDROM_PKTCDVD_WCACHE
440 bool "Enable write caching" 440 bool "Enable write caching (EXPERIMENTAL)"
441 depends on CDROM_PKTCDVD 441 depends on CDROM_PKTCDVD && EXPERIMENTAL
442 help 442 help
443 If enabled, write caching will be set for the CD-R/W device. For now 443 If enabled, write caching will be set for the CD-R/W device. For now
444 this option is dangerous unless the CD-RW media is known good, as we 444 this option is dangerous unless the CD-RW media is known good, as we
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 93affeeef7bd..4e7dbcc425ff 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -43,8 +43,6 @@
43 * 43 *
44 *************************************************************************/ 44 *************************************************************************/
45 45
46#define VERSION_CODE "v0.2.0a 2004-07-14 Jens Axboe (axboe@suse.de) and petero2@telia.com"
47
48#include <linux/pktcdvd.h> 46#include <linux/pktcdvd.h>
49#include <linux/config.h> 47#include <linux/config.h>
50#include <linux/module.h> 48#include <linux/module.h>
@@ -131,7 +129,7 @@ static struct bio *pkt_bio_alloc(int nr_iovecs)
131/* 129/*
132 * Allocate a packet_data struct 130 * Allocate a packet_data struct
133 */ 131 */
134static struct packet_data *pkt_alloc_packet_data(void) 132static struct packet_data *pkt_alloc_packet_data(int frames)
135{ 133{
136 int i; 134 int i;
137 struct packet_data *pkt; 135 struct packet_data *pkt;
@@ -140,11 +138,12 @@ static struct packet_data *pkt_alloc_packet_data(void)
140 if (!pkt) 138 if (!pkt)
141 goto no_pkt; 139 goto no_pkt;
142 140
143 pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE); 141 pkt->frames = frames;
142 pkt->w_bio = pkt_bio_alloc(frames);
144 if (!pkt->w_bio) 143 if (!pkt->w_bio)
145 goto no_bio; 144 goto no_bio;
146 145
147 for (i = 0; i < PAGES_PER_PACKET; i++) { 146 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
148 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); 147 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
149 if (!pkt->pages[i]) 148 if (!pkt->pages[i])
150 goto no_page; 149 goto no_page;
@@ -152,7 +151,7 @@ static struct packet_data *pkt_alloc_packet_data(void)
152 151
153 spin_lock_init(&pkt->lock); 152 spin_lock_init(&pkt->lock);
154 153
155 for (i = 0; i < PACKET_MAX_SIZE; i++) { 154 for (i = 0; i < frames; i++) {
156 struct bio *bio = pkt_bio_alloc(1); 155 struct bio *bio = pkt_bio_alloc(1);
157 if (!bio) 156 if (!bio)
158 goto no_rd_bio; 157 goto no_rd_bio;
@@ -162,14 +161,14 @@ static struct packet_data *pkt_alloc_packet_data(void)
162 return pkt; 161 return pkt;
163 162
164no_rd_bio: 163no_rd_bio:
165 for (i = 0; i < PACKET_MAX_SIZE; i++) { 164 for (i = 0; i < frames; i++) {
166 struct bio *bio = pkt->r_bios[i]; 165 struct bio *bio = pkt->r_bios[i];
167 if (bio) 166 if (bio)
168 bio_put(bio); 167 bio_put(bio);
169 } 168 }
170 169
171no_page: 170no_page:
172 for (i = 0; i < PAGES_PER_PACKET; i++) 171 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
173 if (pkt->pages[i]) 172 if (pkt->pages[i])
174 __free_page(pkt->pages[i]); 173 __free_page(pkt->pages[i]);
175 bio_put(pkt->w_bio); 174 bio_put(pkt->w_bio);
@@ -186,12 +185,12 @@ static void pkt_free_packet_data(struct packet_data *pkt)
186{ 185{
187 int i; 186 int i;
188 187
189 for (i = 0; i < PACKET_MAX_SIZE; i++) { 188 for (i = 0; i < pkt->frames; i++) {
190 struct bio *bio = pkt->r_bios[i]; 189 struct bio *bio = pkt->r_bios[i];
191 if (bio) 190 if (bio)
192 bio_put(bio); 191 bio_put(bio);
193 } 192 }
194 for (i = 0; i < PAGES_PER_PACKET; i++) 193 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
195 __free_page(pkt->pages[i]); 194 __free_page(pkt->pages[i]);
196 bio_put(pkt->w_bio); 195 bio_put(pkt->w_bio);
197 kfree(pkt); 196 kfree(pkt);
@@ -206,17 +205,17 @@ static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
206 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { 205 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
207 pkt_free_packet_data(pkt); 206 pkt_free_packet_data(pkt);
208 } 207 }
208 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
209} 209}
210 210
211static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) 211static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
212{ 212{
213 struct packet_data *pkt; 213 struct packet_data *pkt;
214 214
215 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); 215 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
216 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); 216
217 spin_lock_init(&pd->cdrw.active_list_lock);
218 while (nr_packets > 0) { 217 while (nr_packets > 0) {
219 pkt = pkt_alloc_packet_data(); 218 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
220 if (!pkt) { 219 if (!pkt) {
221 pkt_shrink_pktlist(pd); 220 pkt_shrink_pktlist(pd);
222 return 0; 221 return 0;
@@ -951,7 +950,7 @@ try_next_bio:
951 950
952 pd->current_sector = zone + pd->settings.size; 951 pd->current_sector = zone + pd->settings.size;
953 pkt->sector = zone; 952 pkt->sector = zone;
954 pkt->frames = pd->settings.size >> 2; 953 BUG_ON(pkt->frames != pd->settings.size >> 2);
955 pkt->write_size = 0; 954 pkt->write_size = 0;
956 955
957 /* 956 /*
@@ -1639,7 +1638,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
1639 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; 1638 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1640 if (pd->settings.size == 0) { 1639 if (pd->settings.size == 0) {
1641 printk("pktcdvd: detected zero packet size!\n"); 1640 printk("pktcdvd: detected zero packet size!\n");
1642 pd->settings.size = 128; 1641 return -ENXIO;
1643 } 1642 }
1644 if (pd->settings.size > PACKET_MAX_SECTORS) { 1643 if (pd->settings.size > PACKET_MAX_SECTORS) {
1645 printk("pktcdvd: packet size is too big\n"); 1644 printk("pktcdvd: packet size is too big\n");
@@ -1987,8 +1986,14 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1987 if ((ret = pkt_set_segment_merging(pd, q))) 1986 if ((ret = pkt_set_segment_merging(pd, q)))
1988 goto out_unclaim; 1987 goto out_unclaim;
1989 1988
1990 if (write) 1989 if (write) {
1990 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
1991 printk("pktcdvd: not enough memory for buffers\n");
1992 ret = -ENOMEM;
1993 goto out_unclaim;
1994 }
1991 printk("pktcdvd: %lukB available on disc\n", lba << 1); 1995 printk("pktcdvd: %lukB available on disc\n", lba << 1);
1996 }
1992 1997
1993 return 0; 1998 return 0;
1994 1999
@@ -2014,6 +2019,8 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2014 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); 2019 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2015 bd_release(pd->bdev); 2020 bd_release(pd->bdev);
2016 blkdev_put(pd->bdev); 2021 blkdev_put(pd->bdev);
2022
2023 pkt_shrink_pktlist(pd);
2017} 2024}
2018 2025
2019static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) 2026static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
@@ -2379,12 +2386,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2379 /* This is safe, since we have a reference from open(). */ 2386 /* This is safe, since we have a reference from open(). */
2380 __module_get(THIS_MODULE); 2387 __module_get(THIS_MODULE);
2381 2388
2382 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2383 printk("pktcdvd: not enough memory for buffers\n");
2384 ret = -ENOMEM;
2385 goto out_mem;
2386 }
2387
2388 pd->bdev = bdev; 2389 pd->bdev = bdev;
2389 set_blocksize(bdev, CD_FRAMESIZE); 2390 set_blocksize(bdev, CD_FRAMESIZE);
2390 2391
@@ -2395,7 +2396,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2395 if (IS_ERR(pd->cdrw.thread)) { 2396 if (IS_ERR(pd->cdrw.thread)) {
2396 printk("pktcdvd: can't start kernel thread\n"); 2397 printk("pktcdvd: can't start kernel thread\n");
2397 ret = -ENOMEM; 2398 ret = -ENOMEM;
2398 goto out_thread; 2399 goto out_mem;
2399 } 2400 }
2400 2401
2401 proc = create_proc_entry(pd->name, 0, pkt_proc); 2402 proc = create_proc_entry(pd->name, 0, pkt_proc);
@@ -2406,8 +2407,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2406 DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); 2407 DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
2407 return 0; 2408 return 0;
2408 2409
2409out_thread:
2410 pkt_shrink_pktlist(pd);
2411out_mem: 2410out_mem:
2412 blkdev_put(bdev); 2411 blkdev_put(bdev);
2413 /* This is safe: open() is still holding a reference. */ 2412 /* This is safe: open() is still holding a reference. */
@@ -2503,6 +2502,10 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
2503 goto out_mem; 2502 goto out_mem;
2504 pd->disk = disk; 2503 pd->disk = disk;
2505 2504
2505 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2506 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2507 spin_lock_init(&pd->cdrw.active_list_lock);
2508
2506 spin_lock_init(&pd->lock); 2509 spin_lock_init(&pd->lock);
2507 spin_lock_init(&pd->iosched.lock); 2510 spin_lock_init(&pd->iosched.lock);
2508 sprintf(pd->name, "pktcdvd%d", idx); 2511 sprintf(pd->name, "pktcdvd%d", idx);
@@ -2567,8 +2570,6 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
2567 2570
2568 blkdev_put(pd->bdev); 2571 blkdev_put(pd->bdev);
2569 2572
2570 pkt_shrink_pktlist(pd);
2571
2572 remove_proc_entry(pd->name, pkt_proc); 2573 remove_proc_entry(pd->name, pkt_proc);
2573 DPRINTK("pktcdvd: writer %s unmapped\n", pd->name); 2574 DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
2574 2575
@@ -2678,7 +2679,6 @@ static int __init pkt_init(void)
2678 2679
2679 pkt_proc = proc_mkdir("pktcdvd", proc_root_driver); 2680 pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
2680 2681
2681 DPRINTK("pktcdvd: %s\n", VERSION_CODE);
2682 return 0; 2682 return 0;
2683 2683
2684out: 2684out:
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index a3614e6a68d0..4ada1268b40d 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -882,7 +882,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
882 card->card_number, dev->bus->number, dev->devfn); 882 card->card_number, dev->bus->number, dev->devfn);
883 883
884 if (pci_set_dma_mask(dev, 0xffffffffffffffffLL) && 884 if (pci_set_dma_mask(dev, 0xffffffffffffffffLL) &&
885 !pci_set_dma_mask(dev, 0xffffffffLL)) { 885 pci_set_dma_mask(dev, 0xffffffffLL)) {
886 printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards); 886 printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards);
887 return -ENOMEM; 887 return -ENOMEM;
888 } 888 }
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 39c61a71176e..cc7acf877dc0 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -1233,7 +1233,7 @@ cyy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1233 } 1233 }
1234 info->idle_stats.recv_idle = jiffies; 1234 info->idle_stats.recv_idle = jiffies;
1235 } 1235 }
1236 schedule_delayed_work(&tty->buf.work, 1); 1236 tty_schedule_flip(tty);
1237 } 1237 }
1238 /* end of service */ 1238 /* end of service */
1239 cy_writeb(base_addr+(CyRIR<<index), (save_xir & 0x3f)); 1239 cy_writeb(base_addr+(CyRIR<<index), (save_xir & 0x3f));
@@ -1606,7 +1606,7 @@ cyz_handle_rx(struct cyclades_port *info,
1606 } 1606 }
1607#endif 1607#endif
1608 info->idle_stats.recv_idle = jiffies; 1608 info->idle_stats.recv_idle = jiffies;
1609 schedule_delayed_work(&tty->buf.work, 1); 1609 tty_schedule_flip(tty);
1610 } 1610 }
1611 /* Update rx_get */ 1611 /* Update rx_get */
1612 cy_writel(&buf_ctrl->rx_get, new_rx_get); 1612 cy_writel(&buf_ctrl->rx_get, new_rx_get);
@@ -1809,7 +1809,7 @@ cyz_handle_cmd(struct cyclades_card *cinfo)
1809 if(delta_count) 1809 if(delta_count)
1810 cy_sched_event(info, Cy_EVENT_DELTA_WAKEUP); 1810 cy_sched_event(info, Cy_EVENT_DELTA_WAKEUP);
1811 if(special_count) 1811 if(special_count)
1812 schedule_delayed_work(&tty->buf.work, 1); 1812 tty_schedule_flip(tty);
1813 } 1813 }
1814} 1814}
1815 1815
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index 5485382cadec..bd7be09ea53d 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -59,17 +59,16 @@ static void *drm_ati_alloc_pcigart_table(void)
59 int i; 59 int i;
60 DRM_DEBUG("%s\n", __FUNCTION__); 60 DRM_DEBUG("%s\n", __FUNCTION__);
61 61
62 address = __get_free_pages(GFP_KERNEL, ATI_PCIGART_TABLE_ORDER); 62 address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
63 ATI_PCIGART_TABLE_ORDER);
63 if (address == 0UL) { 64 if (address == 0UL) {
64 return 0; 65 return NULL;
65 } 66 }
66 67
67 page = virt_to_page(address); 68 page = virt_to_page(address);
68 69
69 for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) { 70 for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++)
70 get_page(page);
71 SetPageReserved(page); 71 SetPageReserved(page);
72 }
73 72
74 DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address); 73 DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
75 return (void *)address; 74 return (void *)address;
@@ -83,10 +82,8 @@ static void drm_ati_free_pcigart_table(void *address)
83 82
84 page = virt_to_page((unsigned long)address); 83 page = virt_to_page((unsigned long)address);
85 84
86 for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) { 85 for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++)
87 __put_page(page);
88 ClearPageReserved(page); 86 ClearPageReserved(page);
89 }
90 87
91 free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER); 88 free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER);
92} 89}
@@ -127,7 +124,7 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
127 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN 124 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
128 && gart_info->addr) { 125 && gart_info->addr) {
129 drm_ati_free_pcigart_table(gart_info->addr); 126 drm_ati_free_pcigart_table(gart_info->addr);
130 gart_info->addr = 0; 127 gart_info->addr = NULL;
131 } 128 }
132 129
133 return 1; 130 return 1;
@@ -168,7 +165,7 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
168 if (bus_address == 0) { 165 if (bus_address == 0) {
169 DRM_ERROR("unable to map PCIGART pages!\n"); 166 DRM_ERROR("unable to map PCIGART pages!\n");
170 drm_ati_free_pcigart_table(address); 167 drm_ati_free_pcigart_table(address);
171 address = 0; 168 address = NULL;
172 goto done; 169 goto done;
173 } 170 }
174 } else { 171 } else {
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 54b561e69486..71b8b32b075f 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -57,6 +57,7 @@
57#include <linux/smp_lock.h> /* For (un)lock_kernel */ 57#include <linux/smp_lock.h> /* For (un)lock_kernel */
58#include <linux/mm.h> 58#include <linux/mm.h>
59#include <linux/cdev.h> 59#include <linux/cdev.h>
60#include <linux/mutex.h>
60#if defined(__alpha__) || defined(__powerpc__) 61#if defined(__alpha__) || defined(__powerpc__)
61#include <asm/pgtable.h> /* For pte_wrprotect */ 62#include <asm/pgtable.h> /* For pte_wrprotect */
62#endif 63#endif
@@ -623,7 +624,7 @@ typedef struct drm_device {
623 /** \name Locks */ 624 /** \name Locks */
624 /*@{ */ 625 /*@{ */
625 spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ 626 spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
626 struct semaphore struct_sem; /**< For others */ 627 struct mutex struct_mutex; /**< For others */
627 /*@} */ 628 /*@} */
628 629
629 /** \name Usage Counters */ 630 /** \name Usage Counters */
@@ -658,7 +659,7 @@ typedef struct drm_device {
658 /*@{ */ 659 /*@{ */
659 drm_ctx_list_t *ctxlist; /**< Linked list of context handles */ 660 drm_ctx_list_t *ctxlist; /**< Linked list of context handles */
660 int ctx_count; /**< Number of context handles */ 661 int ctx_count; /**< Number of context handles */
661 struct semaphore ctxlist_sem; /**< For ctxlist */ 662 struct mutex ctxlist_mutex; /**< For ctxlist */
662 663
663 drm_map_t **context_sareas; /**< per-context SAREA's */ 664 drm_map_t **context_sareas; /**< per-context SAREA's */
664 int max_context; 665 int max_context;
diff --git a/drivers/char/drm/drm_auth.c b/drivers/char/drm/drm_auth.c
index a47b502bc7cc..2a37586a7ee8 100644
--- a/drivers/char/drm/drm_auth.c
+++ b/drivers/char/drm/drm_auth.c
@@ -56,7 +56,7 @@ static int drm_hash_magic(drm_magic_t magic)
56 * \param magic magic number. 56 * \param magic magic number.
57 * 57 *
58 * Searches in drm_device::magiclist within all files with the same hash key 58 * Searches in drm_device::magiclist within all files with the same hash key
59 * the one with matching magic number, while holding the drm_device::struct_sem 59 * the one with matching magic number, while holding the drm_device::struct_mutex
60 * lock. 60 * lock.
61 */ 61 */
62static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) 62static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
@@ -65,14 +65,14 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
65 drm_magic_entry_t *pt; 65 drm_magic_entry_t *pt;
66 int hash = drm_hash_magic(magic); 66 int hash = drm_hash_magic(magic);
67 67
68 down(&dev->struct_sem); 68 mutex_lock(&dev->struct_mutex);
69 for (pt = dev->magiclist[hash].head; pt; pt = pt->next) { 69 for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
70 if (pt->magic == magic) { 70 if (pt->magic == magic) {
71 retval = pt->priv; 71 retval = pt->priv;
72 break; 72 break;
73 } 73 }
74 } 74 }
75 up(&dev->struct_sem); 75 mutex_unlock(&dev->struct_mutex);
76 return retval; 76 return retval;
77} 77}
78 78
@@ -85,7 +85,7 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
85 * 85 *
86 * Creates a drm_magic_entry structure and appends to the linked list 86 * Creates a drm_magic_entry structure and appends to the linked list
87 * associated the magic number hash key in drm_device::magiclist, while holding 87 * associated the magic number hash key in drm_device::magiclist, while holding
88 * the drm_device::struct_sem lock. 88 * the drm_device::struct_mutex lock.
89 */ 89 */
90static int drm_add_magic(drm_device_t * dev, drm_file_t * priv, 90static int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
91 drm_magic_t magic) 91 drm_magic_t magic)
@@ -104,7 +104,7 @@ static int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
104 entry->priv = priv; 104 entry->priv = priv;
105 entry->next = NULL; 105 entry->next = NULL;
106 106
107 down(&dev->struct_sem); 107 mutex_lock(&dev->struct_mutex);
108 if (dev->magiclist[hash].tail) { 108 if (dev->magiclist[hash].tail) {
109 dev->magiclist[hash].tail->next = entry; 109 dev->magiclist[hash].tail->next = entry;
110 dev->magiclist[hash].tail = entry; 110 dev->magiclist[hash].tail = entry;
@@ -112,7 +112,7 @@ static int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
112 dev->magiclist[hash].head = entry; 112 dev->magiclist[hash].head = entry;
113 dev->magiclist[hash].tail = entry; 113 dev->magiclist[hash].tail = entry;
114 } 114 }
115 up(&dev->struct_sem); 115 mutex_unlock(&dev->struct_mutex);
116 116
117 return 0; 117 return 0;
118} 118}
@@ -124,7 +124,7 @@ static int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
124 * \param magic magic number. 124 * \param magic magic number.
125 * 125 *
126 * Searches and unlinks the entry in drm_device::magiclist with the magic 126 * Searches and unlinks the entry in drm_device::magiclist with the magic
127 * number hash key, while holding the drm_device::struct_sem lock. 127 * number hash key, while holding the drm_device::struct_mutex lock.
128 */ 128 */
129static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) 129static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
130{ 130{
@@ -135,7 +135,7 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
135 DRM_DEBUG("%d\n", magic); 135 DRM_DEBUG("%d\n", magic);
136 hash = drm_hash_magic(magic); 136 hash = drm_hash_magic(magic);
137 137
138 down(&dev->struct_sem); 138 mutex_lock(&dev->struct_mutex);
139 for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { 139 for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
140 if (pt->magic == magic) { 140 if (pt->magic == magic) {
141 if (dev->magiclist[hash].head == pt) { 141 if (dev->magiclist[hash].head == pt) {
@@ -147,11 +147,11 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
147 if (prev) { 147 if (prev) {
148 prev->next = pt->next; 148 prev->next = pt->next;
149 } 149 }
150 up(&dev->struct_sem); 150 mutex_unlock(&dev->struct_mutex);
151 return 0; 151 return 0;
152 } 152 }
153 } 153 }
154 up(&dev->struct_sem); 154 mutex_unlock(&dev->struct_mutex);
155 155
156 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); 156 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
157 157
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 1db12dcb6802..e2637b4d51de 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -255,14 +255,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
255 memset(list, 0, sizeof(*list)); 255 memset(list, 0, sizeof(*list));
256 list->map = map; 256 list->map = map;
257 257
258 down(&dev->struct_sem); 258 mutex_lock(&dev->struct_mutex);
259 list_add(&list->head, &dev->maplist->head); 259 list_add(&list->head, &dev->maplist->head);
260 /* Assign a 32-bit handle */ 260 /* Assign a 32-bit handle */
261 /* We do it here so that dev->struct_sem protects the increment */ 261 /* We do it here so that dev->struct_mutex protects the increment */
262 list->user_token = HandleID(map->type == _DRM_SHM 262 list->user_token = HandleID(map->type == _DRM_SHM
263 ? (unsigned long)map->handle 263 ? (unsigned long)map->handle
264 : map->offset, dev); 264 : map->offset, dev);
265 up(&dev->struct_sem); 265 mutex_unlock(&dev->struct_mutex);
266 266
267 *maplist = list; 267 *maplist = list;
268 return 0; 268 return 0;
@@ -392,9 +392,9 @@ int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
392{ 392{
393 int ret; 393 int ret;
394 394
395 down(&dev->struct_sem); 395 mutex_lock(&dev->struct_mutex);
396 ret = drm_rmmap_locked(dev, map); 396 ret = drm_rmmap_locked(dev, map);
397 up(&dev->struct_sem); 397 mutex_unlock(&dev->struct_mutex);
398 398
399 return ret; 399 return ret;
400} 400}
@@ -423,7 +423,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
423 return -EFAULT; 423 return -EFAULT;
424 } 424 }
425 425
426 down(&dev->struct_sem); 426 mutex_lock(&dev->struct_mutex);
427 list_for_each(list, &dev->maplist->head) { 427 list_for_each(list, &dev->maplist->head) {
428 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); 428 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
429 429
@@ -439,7 +439,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
439 * find anything. 439 * find anything.
440 */ 440 */
441 if (list == (&dev->maplist->head)) { 441 if (list == (&dev->maplist->head)) {
442 up(&dev->struct_sem); 442 mutex_unlock(&dev->struct_mutex);
443 return -EINVAL; 443 return -EINVAL;
444 } 444 }
445 445
@@ -448,13 +448,13 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
448 448
449 /* Register and framebuffer maps are permanent */ 449 /* Register and framebuffer maps are permanent */
450 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 450 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
451 up(&dev->struct_sem); 451 mutex_unlock(&dev->struct_mutex);
452 return 0; 452 return 0;
453 } 453 }
454 454
455 ret = drm_rmmap_locked(dev, map); 455 ret = drm_rmmap_locked(dev, map);
456 456
457 up(&dev->struct_sem); 457 mutex_unlock(&dev->struct_mutex);
458 458
459 return ret; 459 return ret;
460} 460}
@@ -566,16 +566,16 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
566 atomic_inc(&dev->buf_alloc); 566 atomic_inc(&dev->buf_alloc);
567 spin_unlock(&dev->count_lock); 567 spin_unlock(&dev->count_lock);
568 568
569 down(&dev->struct_sem); 569 mutex_lock(&dev->struct_mutex);
570 entry = &dma->bufs[order]; 570 entry = &dma->bufs[order];
571 if (entry->buf_count) { 571 if (entry->buf_count) {
572 up(&dev->struct_sem); 572 mutex_unlock(&dev->struct_mutex);
573 atomic_dec(&dev->buf_alloc); 573 atomic_dec(&dev->buf_alloc);
574 return -ENOMEM; /* May only call once for each order */ 574 return -ENOMEM; /* May only call once for each order */
575 } 575 }
576 576
577 if (count < 0 || count > 4096) { 577 if (count < 0 || count > 4096) {
578 up(&dev->struct_sem); 578 mutex_unlock(&dev->struct_mutex);
579 atomic_dec(&dev->buf_alloc); 579 atomic_dec(&dev->buf_alloc);
580 return -EINVAL; 580 return -EINVAL;
581 } 581 }
@@ -583,7 +583,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
583 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 583 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
584 DRM_MEM_BUFS); 584 DRM_MEM_BUFS);
585 if (!entry->buflist) { 585 if (!entry->buflist) {
586 up(&dev->struct_sem); 586 mutex_unlock(&dev->struct_mutex);
587 atomic_dec(&dev->buf_alloc); 587 atomic_dec(&dev->buf_alloc);
588 return -ENOMEM; 588 return -ENOMEM;
589 } 589 }
@@ -616,7 +616,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
616 /* Set count correctly so we free the proper amount. */ 616 /* Set count correctly so we free the proper amount. */
617 entry->buf_count = count; 617 entry->buf_count = count;
618 drm_cleanup_buf_error(dev, entry); 618 drm_cleanup_buf_error(dev, entry);
619 up(&dev->struct_sem); 619 mutex_unlock(&dev->struct_mutex);
620 atomic_dec(&dev->buf_alloc); 620 atomic_dec(&dev->buf_alloc);
621 return -ENOMEM; 621 return -ENOMEM;
622 } 622 }
@@ -638,7 +638,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
638 if (!temp_buflist) { 638 if (!temp_buflist) {
639 /* Free the entry because it isn't valid */ 639 /* Free the entry because it isn't valid */
640 drm_cleanup_buf_error(dev, entry); 640 drm_cleanup_buf_error(dev, entry);
641 up(&dev->struct_sem); 641 mutex_unlock(&dev->struct_mutex);
642 atomic_dec(&dev->buf_alloc); 642 atomic_dec(&dev->buf_alloc);
643 return -ENOMEM; 643 return -ENOMEM;
644 } 644 }
@@ -656,7 +656,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
656 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 656 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
657 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 657 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
658 658
659 up(&dev->struct_sem); 659 mutex_unlock(&dev->struct_mutex);
660 660
661 request->count = entry->buf_count; 661 request->count = entry->buf_count;
662 request->size = size; 662 request->size = size;
@@ -722,16 +722,16 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
722 atomic_inc(&dev->buf_alloc); 722 atomic_inc(&dev->buf_alloc);
723 spin_unlock(&dev->count_lock); 723 spin_unlock(&dev->count_lock);
724 724
725 down(&dev->struct_sem); 725 mutex_lock(&dev->struct_mutex);
726 entry = &dma->bufs[order]; 726 entry = &dma->bufs[order];
727 if (entry->buf_count) { 727 if (entry->buf_count) {
728 up(&dev->struct_sem); 728 mutex_unlock(&dev->struct_mutex);
729 atomic_dec(&dev->buf_alloc); 729 atomic_dec(&dev->buf_alloc);
730 return -ENOMEM; /* May only call once for each order */ 730 return -ENOMEM; /* May only call once for each order */
731 } 731 }
732 732
733 if (count < 0 || count > 4096) { 733 if (count < 0 || count > 4096) {
734 up(&dev->struct_sem); 734 mutex_unlock(&dev->struct_mutex);
735 atomic_dec(&dev->buf_alloc); 735 atomic_dec(&dev->buf_alloc);
736 return -EINVAL; 736 return -EINVAL;
737 } 737 }
@@ -739,7 +739,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
739 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 739 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
740 DRM_MEM_BUFS); 740 DRM_MEM_BUFS);
741 if (!entry->buflist) { 741 if (!entry->buflist) {
742 up(&dev->struct_sem); 742 mutex_unlock(&dev->struct_mutex);
743 atomic_dec(&dev->buf_alloc); 743 atomic_dec(&dev->buf_alloc);
744 return -ENOMEM; 744 return -ENOMEM;
745 } 745 }
@@ -750,7 +750,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
750 if (!entry->seglist) { 750 if (!entry->seglist) {
751 drm_free(entry->buflist, 751 drm_free(entry->buflist,
752 count * sizeof(*entry->buflist), DRM_MEM_BUFS); 752 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
753 up(&dev->struct_sem); 753 mutex_unlock(&dev->struct_mutex);
754 atomic_dec(&dev->buf_alloc); 754 atomic_dec(&dev->buf_alloc);
755 return -ENOMEM; 755 return -ENOMEM;
756 } 756 }
@@ -766,7 +766,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
766 count * sizeof(*entry->buflist), DRM_MEM_BUFS); 766 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
767 drm_free(entry->seglist, 767 drm_free(entry->seglist,
768 count * sizeof(*entry->seglist), DRM_MEM_SEGS); 768 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
769 up(&dev->struct_sem); 769 mutex_unlock(&dev->struct_mutex);
770 atomic_dec(&dev->buf_alloc); 770 atomic_dec(&dev->buf_alloc);
771 return -ENOMEM; 771 return -ENOMEM;
772 } 772 }
@@ -790,7 +790,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
790 drm_free(temp_pagelist, 790 drm_free(temp_pagelist,
791 (dma->page_count + (count << page_order)) 791 (dma->page_count + (count << page_order))
792 * sizeof(*dma->pagelist), DRM_MEM_PAGES); 792 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
793 up(&dev->struct_sem); 793 mutex_unlock(&dev->struct_mutex);
794 atomic_dec(&dev->buf_alloc); 794 atomic_dec(&dev->buf_alloc);
795 return -ENOMEM; 795 return -ENOMEM;
796 } 796 }
@@ -831,7 +831,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
831 (count << page_order)) 831 (count << page_order))
832 * sizeof(*dma->pagelist), 832 * sizeof(*dma->pagelist),
833 DRM_MEM_PAGES); 833 DRM_MEM_PAGES);
834 up(&dev->struct_sem); 834 mutex_unlock(&dev->struct_mutex);
835 atomic_dec(&dev->buf_alloc); 835 atomic_dec(&dev->buf_alloc);
836 return -ENOMEM; 836 return -ENOMEM;
837 } 837 }
@@ -853,7 +853,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
853 drm_free(temp_pagelist, 853 drm_free(temp_pagelist,
854 (dma->page_count + (count << page_order)) 854 (dma->page_count + (count << page_order))
855 * sizeof(*dma->pagelist), DRM_MEM_PAGES); 855 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
856 up(&dev->struct_sem); 856 mutex_unlock(&dev->struct_mutex);
857 atomic_dec(&dev->buf_alloc); 857 atomic_dec(&dev->buf_alloc);
858 return -ENOMEM; 858 return -ENOMEM;
859 } 859 }
@@ -878,7 +878,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
878 dma->page_count += entry->seg_count << page_order; 878 dma->page_count += entry->seg_count << page_order;
879 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 879 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
880 880
881 up(&dev->struct_sem); 881 mutex_unlock(&dev->struct_mutex);
882 882
883 request->count = entry->buf_count; 883 request->count = entry->buf_count;
884 request->size = size; 884 request->size = size;
@@ -948,16 +948,16 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
948 atomic_inc(&dev->buf_alloc); 948 atomic_inc(&dev->buf_alloc);
949 spin_unlock(&dev->count_lock); 949 spin_unlock(&dev->count_lock);
950 950
951 down(&dev->struct_sem); 951 mutex_lock(&dev->struct_mutex);
952 entry = &dma->bufs[order]; 952 entry = &dma->bufs[order];
953 if (entry->buf_count) { 953 if (entry->buf_count) {
954 up(&dev->struct_sem); 954 mutex_unlock(&dev->struct_mutex);
955 atomic_dec(&dev->buf_alloc); 955 atomic_dec(&dev->buf_alloc);
956 return -ENOMEM; /* May only call once for each order */ 956 return -ENOMEM; /* May only call once for each order */
957 } 957 }
958 958
959 if (count < 0 || count > 4096) { 959 if (count < 0 || count > 4096) {
960 up(&dev->struct_sem); 960 mutex_unlock(&dev->struct_mutex);
961 atomic_dec(&dev->buf_alloc); 961 atomic_dec(&dev->buf_alloc);
962 return -EINVAL; 962 return -EINVAL;
963 } 963 }
@@ -965,7 +965,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
965 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 965 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
966 DRM_MEM_BUFS); 966 DRM_MEM_BUFS);
967 if (!entry->buflist) { 967 if (!entry->buflist) {
968 up(&dev->struct_sem); 968 mutex_unlock(&dev->struct_mutex);
969 atomic_dec(&dev->buf_alloc); 969 atomic_dec(&dev->buf_alloc);
970 return -ENOMEM; 970 return -ENOMEM;
971 } 971 }
@@ -999,7 +999,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
999 /* Set count correctly so we free the proper amount. */ 999 /* Set count correctly so we free the proper amount. */
1000 entry->buf_count = count; 1000 entry->buf_count = count;
1001 drm_cleanup_buf_error(dev, entry); 1001 drm_cleanup_buf_error(dev, entry);
1002 up(&dev->struct_sem); 1002 mutex_unlock(&dev->struct_mutex);
1003 atomic_dec(&dev->buf_alloc); 1003 atomic_dec(&dev->buf_alloc);
1004 return -ENOMEM; 1004 return -ENOMEM;
1005 } 1005 }
@@ -1022,7 +1022,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
1022 if (!temp_buflist) { 1022 if (!temp_buflist) {
1023 /* Free the entry because it isn't valid */ 1023 /* Free the entry because it isn't valid */
1024 drm_cleanup_buf_error(dev, entry); 1024 drm_cleanup_buf_error(dev, entry);
1025 up(&dev->struct_sem); 1025 mutex_unlock(&dev->struct_mutex);
1026 atomic_dec(&dev->buf_alloc); 1026 atomic_dec(&dev->buf_alloc);
1027 return -ENOMEM; 1027 return -ENOMEM;
1028 } 1028 }
@@ -1040,7 +1040,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
1040 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1040 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1041 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1041 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1042 1042
1043 up(&dev->struct_sem); 1043 mutex_unlock(&dev->struct_mutex);
1044 1044
1045 request->count = entry->buf_count; 1045 request->count = entry->buf_count;
1046 request->size = size; 1046 request->size = size;
@@ -1110,16 +1110,16 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1110 atomic_inc(&dev->buf_alloc); 1110 atomic_inc(&dev->buf_alloc);
1111 spin_unlock(&dev->count_lock); 1111 spin_unlock(&dev->count_lock);
1112 1112
1113 down(&dev->struct_sem); 1113 mutex_lock(&dev->struct_mutex);
1114 entry = &dma->bufs[order]; 1114 entry = &dma->bufs[order];
1115 if (entry->buf_count) { 1115 if (entry->buf_count) {
1116 up(&dev->struct_sem); 1116 mutex_unlock(&dev->struct_mutex);
1117 atomic_dec(&dev->buf_alloc); 1117 atomic_dec(&dev->buf_alloc);
1118 return -ENOMEM; /* May only call once for each order */ 1118 return -ENOMEM; /* May only call once for each order */
1119 } 1119 }
1120 1120
1121 if (count < 0 || count > 4096) { 1121 if (count < 0 || count > 4096) {
1122 up(&dev->struct_sem); 1122 mutex_unlock(&dev->struct_mutex);
1123 atomic_dec(&dev->buf_alloc); 1123 atomic_dec(&dev->buf_alloc);
1124 return -EINVAL; 1124 return -EINVAL;
1125 } 1125 }
@@ -1127,7 +1127,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1127 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 1127 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1128 DRM_MEM_BUFS); 1128 DRM_MEM_BUFS);
1129 if (!entry->buflist) { 1129 if (!entry->buflist) {
1130 up(&dev->struct_sem); 1130 mutex_unlock(&dev->struct_mutex);
1131 atomic_dec(&dev->buf_alloc); 1131 atomic_dec(&dev->buf_alloc);
1132 return -ENOMEM; 1132 return -ENOMEM;
1133 } 1133 }
@@ -1160,7 +1160,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1160 /* Set count correctly so we free the proper amount. */ 1160 /* Set count correctly so we free the proper amount. */
1161 entry->buf_count = count; 1161 entry->buf_count = count;
1162 drm_cleanup_buf_error(dev, entry); 1162 drm_cleanup_buf_error(dev, entry);
1163 up(&dev->struct_sem); 1163 mutex_unlock(&dev->struct_mutex);
1164 atomic_dec(&dev->buf_alloc); 1164 atomic_dec(&dev->buf_alloc);
1165 return -ENOMEM; 1165 return -ENOMEM;
1166 } 1166 }
@@ -1182,7 +1182,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1182 if (!temp_buflist) { 1182 if (!temp_buflist) {
1183 /* Free the entry because it isn't valid */ 1183 /* Free the entry because it isn't valid */
1184 drm_cleanup_buf_error(dev, entry); 1184 drm_cleanup_buf_error(dev, entry);
1185 up(&dev->struct_sem); 1185 mutex_unlock(&dev->struct_mutex);
1186 atomic_dec(&dev->buf_alloc); 1186 atomic_dec(&dev->buf_alloc);
1187 return -ENOMEM; 1187 return -ENOMEM;
1188 } 1188 }
@@ -1200,7 +1200,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1200 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1200 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1201 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1201 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1202 1202
1203 up(&dev->struct_sem); 1203 mutex_unlock(&dev->struct_mutex);
1204 1204
1205 request->count = entry->buf_count; 1205 request->count = entry->buf_count;
1206 request->size = size; 1206 request->size = size;
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index f84254526949..83094c73da67 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -53,7 +53,7 @@
53 * \param ctx_handle context handle. 53 * \param ctx_handle context handle.
54 * 54 *
55 * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry 55 * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
56 * in drm_device::context_sareas, while holding the drm_device::struct_sem 56 * in drm_device::context_sareas, while holding the drm_device::struct_mutex
57 * lock. 57 * lock.
58 */ 58 */
59void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) 59void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
@@ -64,10 +64,10 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
64 goto failed; 64 goto failed;
65 65
66 if (ctx_handle < DRM_MAX_CTXBITMAP) { 66 if (ctx_handle < DRM_MAX_CTXBITMAP) {
67 down(&dev->struct_sem); 67 mutex_lock(&dev->struct_mutex);
68 clear_bit(ctx_handle, dev->ctx_bitmap); 68 clear_bit(ctx_handle, dev->ctx_bitmap);
69 dev->context_sareas[ctx_handle] = NULL; 69 dev->context_sareas[ctx_handle] = NULL;
70 up(&dev->struct_sem); 70 mutex_unlock(&dev->struct_mutex);
71 return; 71 return;
72 } 72 }
73 failed: 73 failed:
@@ -83,7 +83,7 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
83 * 83 *
84 * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates 84 * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
85 * drm_device::context_sareas to accommodate the new entry while holding the 85 * drm_device::context_sareas to accommodate the new entry while holding the
86 * drm_device::struct_sem lock. 86 * drm_device::struct_mutex lock.
87 */ 87 */
88static int drm_ctxbitmap_next(drm_device_t * dev) 88static int drm_ctxbitmap_next(drm_device_t * dev)
89{ 89{
@@ -92,7 +92,7 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
92 if (!dev->ctx_bitmap) 92 if (!dev->ctx_bitmap)
93 return -1; 93 return -1;
94 94
95 down(&dev->struct_sem); 95 mutex_lock(&dev->struct_mutex);
96 bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP); 96 bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
97 if (bit < DRM_MAX_CTXBITMAP) { 97 if (bit < DRM_MAX_CTXBITMAP) {
98 set_bit(bit, dev->ctx_bitmap); 98 set_bit(bit, dev->ctx_bitmap);
@@ -113,7 +113,7 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
113 DRM_MEM_MAPS); 113 DRM_MEM_MAPS);
114 if (!ctx_sareas) { 114 if (!ctx_sareas) {
115 clear_bit(bit, dev->ctx_bitmap); 115 clear_bit(bit, dev->ctx_bitmap);
116 up(&dev->struct_sem); 116 mutex_unlock(&dev->struct_mutex);
117 return -1; 117 return -1;
118 } 118 }
119 dev->context_sareas = ctx_sareas; 119 dev->context_sareas = ctx_sareas;
@@ -126,16 +126,16 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
126 DRM_MEM_MAPS); 126 DRM_MEM_MAPS);
127 if (!dev->context_sareas) { 127 if (!dev->context_sareas) {
128 clear_bit(bit, dev->ctx_bitmap); 128 clear_bit(bit, dev->ctx_bitmap);
129 up(&dev->struct_sem); 129 mutex_unlock(&dev->struct_mutex);
130 return -1; 130 return -1;
131 } 131 }
132 dev->context_sareas[bit] = NULL; 132 dev->context_sareas[bit] = NULL;
133 } 133 }
134 } 134 }
135 up(&dev->struct_sem); 135 mutex_unlock(&dev->struct_mutex);
136 return bit; 136 return bit;
137 } 137 }
138 up(&dev->struct_sem); 138 mutex_unlock(&dev->struct_mutex);
139 return -1; 139 return -1;
140} 140}
141 141
@@ -145,24 +145,24 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
145 * \param dev DRM device. 145 * \param dev DRM device.
146 * 146 *
147 * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding 147 * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
148 * the drm_device::struct_sem lock. 148 * the drm_device::struct_mutex lock.
149 */ 149 */
150int drm_ctxbitmap_init(drm_device_t * dev) 150int drm_ctxbitmap_init(drm_device_t * dev)
151{ 151{
152 int i; 152 int i;
153 int temp; 153 int temp;
154 154
155 down(&dev->struct_sem); 155 mutex_lock(&dev->struct_mutex);
156 dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE, 156 dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE,
157 DRM_MEM_CTXBITMAP); 157 DRM_MEM_CTXBITMAP);
158 if (dev->ctx_bitmap == NULL) { 158 if (dev->ctx_bitmap == NULL) {
159 up(&dev->struct_sem); 159 mutex_unlock(&dev->struct_mutex);
160 return -ENOMEM; 160 return -ENOMEM;
161 } 161 }
162 memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE); 162 memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE);
163 dev->context_sareas = NULL; 163 dev->context_sareas = NULL;
164 dev->max_context = -1; 164 dev->max_context = -1;
165 up(&dev->struct_sem); 165 mutex_unlock(&dev->struct_mutex);
166 166
167 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { 167 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
168 temp = drm_ctxbitmap_next(dev); 168 temp = drm_ctxbitmap_next(dev);
@@ -178,17 +178,17 @@ int drm_ctxbitmap_init(drm_device_t * dev)
178 * \param dev DRM device. 178 * \param dev DRM device.
179 * 179 *
180 * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding 180 * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
181 * the drm_device::struct_sem lock. 181 * the drm_device::struct_mutex lock.
182 */ 182 */
183void drm_ctxbitmap_cleanup(drm_device_t * dev) 183void drm_ctxbitmap_cleanup(drm_device_t * dev)
184{ 184{
185 down(&dev->struct_sem); 185 mutex_lock(&dev->struct_mutex);
186 if (dev->context_sareas) 186 if (dev->context_sareas)
187 drm_free(dev->context_sareas, 187 drm_free(dev->context_sareas,
188 sizeof(*dev->context_sareas) * 188 sizeof(*dev->context_sareas) *
189 dev->max_context, DRM_MEM_MAPS); 189 dev->max_context, DRM_MEM_MAPS);
190 drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP); 190 drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP);
191 up(&dev->struct_sem); 191 mutex_unlock(&dev->struct_mutex);
192} 192}
193 193
194/*@}*/ 194/*@}*/
@@ -222,15 +222,15 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
222 if (copy_from_user(&request, argp, sizeof(request))) 222 if (copy_from_user(&request, argp, sizeof(request)))
223 return -EFAULT; 223 return -EFAULT;
224 224
225 down(&dev->struct_sem); 225 mutex_lock(&dev->struct_mutex);
226 if (dev->max_context < 0 226 if (dev->max_context < 0
227 || request.ctx_id >= (unsigned)dev->max_context) { 227 || request.ctx_id >= (unsigned)dev->max_context) {
228 up(&dev->struct_sem); 228 mutex_unlock(&dev->struct_mutex);
229 return -EINVAL; 229 return -EINVAL;
230 } 230 }
231 231
232 map = dev->context_sareas[request.ctx_id]; 232 map = dev->context_sareas[request.ctx_id];
233 up(&dev->struct_sem); 233 mutex_unlock(&dev->struct_mutex);
234 234
235 request.handle = NULL; 235 request.handle = NULL;
236 list_for_each_entry(_entry, &dev->maplist->head, head) { 236 list_for_each_entry(_entry, &dev->maplist->head, head) {
@@ -274,7 +274,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
274 (drm_ctx_priv_map_t __user *) arg, sizeof(request))) 274 (drm_ctx_priv_map_t __user *) arg, sizeof(request)))
275 return -EFAULT; 275 return -EFAULT;
276 276
277 down(&dev->struct_sem); 277 mutex_lock(&dev->struct_mutex);
278 list_for_each(list, &dev->maplist->head) { 278 list_for_each(list, &dev->maplist->head) {
279 r_list = list_entry(list, drm_map_list_t, head); 279 r_list = list_entry(list, drm_map_list_t, head);
280 if (r_list->map 280 if (r_list->map
@@ -282,7 +282,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
282 goto found; 282 goto found;
283 } 283 }
284 bad: 284 bad:
285 up(&dev->struct_sem); 285 mutex_unlock(&dev->struct_mutex);
286 return -EINVAL; 286 return -EINVAL;
287 287
288 found: 288 found:
@@ -294,7 +294,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
294 if (request.ctx_id >= (unsigned)dev->max_context) 294 if (request.ctx_id >= (unsigned)dev->max_context)
295 goto bad; 295 goto bad;
296 dev->context_sareas[request.ctx_id] = map; 296 dev->context_sareas[request.ctx_id] = map;
297 up(&dev->struct_sem); 297 mutex_unlock(&dev->struct_mutex);
298 return 0; 298 return 0;
299} 299}
300 300
@@ -448,10 +448,10 @@ int drm_addctx(struct inode *inode, struct file *filp,
448 ctx_entry->handle = ctx.handle; 448 ctx_entry->handle = ctx.handle;
449 ctx_entry->tag = priv; 449 ctx_entry->tag = priv;
450 450
451 down(&dev->ctxlist_sem); 451 mutex_lock(&dev->ctxlist_mutex);
452 list_add(&ctx_entry->head, &dev->ctxlist->head); 452 list_add(&ctx_entry->head, &dev->ctxlist->head);
453 ++dev->ctx_count; 453 ++dev->ctx_count;
454 up(&dev->ctxlist_sem); 454 mutex_unlock(&dev->ctxlist_mutex);
455 455
456 if (copy_to_user(argp, &ctx, sizeof(ctx))) 456 if (copy_to_user(argp, &ctx, sizeof(ctx)))
457 return -EFAULT; 457 return -EFAULT;
@@ -574,7 +574,7 @@ int drm_rmctx(struct inode *inode, struct file *filp,
574 drm_ctxbitmap_free(dev, ctx.handle); 574 drm_ctxbitmap_free(dev, ctx.handle);
575 } 575 }
576 576
577 down(&dev->ctxlist_sem); 577 mutex_lock(&dev->ctxlist_mutex);
578 if (!list_empty(&dev->ctxlist->head)) { 578 if (!list_empty(&dev->ctxlist->head)) {
579 drm_ctx_list_t *pos, *n; 579 drm_ctx_list_t *pos, *n;
580 580
@@ -586,7 +586,7 @@ int drm_rmctx(struct inode *inode, struct file *filp,
586 } 586 }
587 } 587 }
588 } 588 }
589 up(&dev->ctxlist_sem); 589 mutex_unlock(&dev->ctxlist_mutex);
590 590
591 return 0; 591 return 0;
592} 592}
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index c4fa5a29582b..dc6bbe8a18dc 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -151,7 +151,7 @@ int drm_lastclose(drm_device_t * dev)
151 if (dev->irq_enabled) 151 if (dev->irq_enabled)
152 drm_irq_uninstall(dev); 152 drm_irq_uninstall(dev);
153 153
154 down(&dev->struct_sem); 154 mutex_lock(&dev->struct_mutex);
155 del_timer(&dev->timer); 155 del_timer(&dev->timer);
156 156
157 /* Clear pid list */ 157 /* Clear pid list */
@@ -231,7 +231,7 @@ int drm_lastclose(drm_device_t * dev)
231 dev->lock.filp = NULL; 231 dev->lock.filp = NULL;
232 wake_up_interruptible(&dev->lock.lock_queue); 232 wake_up_interruptible(&dev->lock.lock_queue);
233 } 233 }
234 up(&dev->struct_sem); 234 mutex_unlock(&dev->struct_mutex);
235 235
236 DRM_DEBUG("lastclose completed\n"); 236 DRM_DEBUG("lastclose completed\n");
237 return 0; 237 return 0;
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 403f44a1bf01..641f7633878c 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -262,7 +262,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
262 goto out_free; 262 goto out_free;
263 } 263 }
264 264
265 down(&dev->struct_sem); 265 mutex_lock(&dev->struct_mutex);
266 if (!dev->file_last) { 266 if (!dev->file_last) {
267 priv->next = NULL; 267 priv->next = NULL;
268 priv->prev = NULL; 268 priv->prev = NULL;
@@ -276,7 +276,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
276 dev->file_last->next = priv; 276 dev->file_last->next = priv;
277 dev->file_last = priv; 277 dev->file_last = priv;
278 } 278 }
279 up(&dev->struct_sem); 279 mutex_unlock(&dev->struct_mutex);
280 280
281#ifdef __alpha__ 281#ifdef __alpha__
282 /* 282 /*
@@ -413,7 +413,7 @@ int drm_release(struct inode *inode, struct file *filp)
413 413
414 drm_fasync(-1, filp, 0); 414 drm_fasync(-1, filp, 0);
415 415
416 down(&dev->ctxlist_sem); 416 mutex_lock(&dev->ctxlist_mutex);
417 if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) { 417 if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
418 drm_ctx_list_t *pos, *n; 418 drm_ctx_list_t *pos, *n;
419 419
@@ -432,9 +432,9 @@ int drm_release(struct inode *inode, struct file *filp)
432 } 432 }
433 } 433 }
434 } 434 }
435 up(&dev->ctxlist_sem); 435 mutex_unlock(&dev->ctxlist_mutex);
436 436
437 down(&dev->struct_sem); 437 mutex_lock(&dev->struct_mutex);
438 if (priv->remove_auth_on_close == 1) { 438 if (priv->remove_auth_on_close == 1) {
439 drm_file_t *temp = dev->file_first; 439 drm_file_t *temp = dev->file_first;
440 while (temp) { 440 while (temp) {
@@ -452,7 +452,7 @@ int drm_release(struct inode *inode, struct file *filp)
452 } else { 452 } else {
453 dev->file_last = priv->prev; 453 dev->file_last = priv->prev;
454 } 454 }
455 up(&dev->struct_sem); 455 mutex_unlock(&dev->struct_mutex);
456 456
457 if (dev->driver->postclose) 457 if (dev->driver->postclose)
458 dev->driver->postclose(dev, priv); 458 dev->driver->postclose(dev, priv);
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index bcd4e604d3ec..555f323b8a32 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -194,9 +194,9 @@ int drm_getmap(struct inode *inode, struct file *filp,
194 return -EFAULT; 194 return -EFAULT;
195 idx = map.offset; 195 idx = map.offset;
196 196
197 down(&dev->struct_sem); 197 mutex_lock(&dev->struct_mutex);
198 if (idx < 0) { 198 if (idx < 0) {
199 up(&dev->struct_sem); 199 mutex_unlock(&dev->struct_mutex);
200 return -EINVAL; 200 return -EINVAL;
201 } 201 }
202 202
@@ -209,7 +209,7 @@ int drm_getmap(struct inode *inode, struct file *filp,
209 i++; 209 i++;
210 } 210 }
211 if (!r_list || !r_list->map) { 211 if (!r_list || !r_list->map) {
212 up(&dev->struct_sem); 212 mutex_unlock(&dev->struct_mutex);
213 return -EINVAL; 213 return -EINVAL;
214 } 214 }
215 215
@@ -219,7 +219,7 @@ int drm_getmap(struct inode *inode, struct file *filp,
219 map.flags = r_list->map->flags; 219 map.flags = r_list->map->flags;
220 map.handle = (void *)(unsigned long)r_list->user_token; 220 map.handle = (void *)(unsigned long)r_list->user_token;
221 map.mtrr = r_list->map->mtrr; 221 map.mtrr = r_list->map->mtrr;
222 up(&dev->struct_sem); 222 mutex_unlock(&dev->struct_mutex);
223 223
224 if (copy_to_user(argp, &map, sizeof(map))) 224 if (copy_to_user(argp, &map, sizeof(map)))
225 return -EFAULT; 225 return -EFAULT;
@@ -253,11 +253,11 @@ int drm_getclient(struct inode *inode, struct file *filp,
253 if (copy_from_user(&client, argp, sizeof(client))) 253 if (copy_from_user(&client, argp, sizeof(client)))
254 return -EFAULT; 254 return -EFAULT;
255 idx = client.idx; 255 idx = client.idx;
256 down(&dev->struct_sem); 256 mutex_lock(&dev->struct_mutex);
257 for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ; 257 for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;
258 258
259 if (!pt) { 259 if (!pt) {
260 up(&dev->struct_sem); 260 mutex_unlock(&dev->struct_mutex);
261 return -EINVAL; 261 return -EINVAL;
262 } 262 }
263 client.auth = pt->authenticated; 263 client.auth = pt->authenticated;
@@ -265,7 +265,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
265 client.uid = pt->uid; 265 client.uid = pt->uid;
266 client.magic = pt->magic; 266 client.magic = pt->magic;
267 client.iocs = pt->ioctl_count; 267 client.iocs = pt->ioctl_count;
268 up(&dev->struct_sem); 268 mutex_unlock(&dev->struct_mutex);
269 269
270 if (copy_to_user(argp, &client, sizeof(client))) 270 if (copy_to_user(argp, &client, sizeof(client)))
271 return -EFAULT; 271 return -EFAULT;
@@ -292,7 +292,7 @@ int drm_getstats(struct inode *inode, struct file *filp,
292 292
293 memset(&stats, 0, sizeof(stats)); 293 memset(&stats, 0, sizeof(stats));
294 294
295 down(&dev->struct_sem); 295 mutex_lock(&dev->struct_mutex);
296 296
297 for (i = 0; i < dev->counters; i++) { 297 for (i = 0; i < dev->counters; i++) {
298 if (dev->types[i] == _DRM_STAT_LOCK) 298 if (dev->types[i] == _DRM_STAT_LOCK)
@@ -305,7 +305,7 @@ int drm_getstats(struct inode *inode, struct file *filp,
305 305
306 stats.count = dev->counters; 306 stats.count = dev->counters;
307 307
308 up(&dev->struct_sem); 308 mutex_unlock(&dev->struct_mutex);
309 309
310 if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats))) 310 if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats)))
311 return -EFAULT; 311 return -EFAULT;
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index b0d4b236e837..611a1173091d 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -98,20 +98,20 @@ static int drm_irq_install(drm_device_t * dev)
98 if (dev->irq == 0) 98 if (dev->irq == 0)
99 return -EINVAL; 99 return -EINVAL;
100 100
101 down(&dev->struct_sem); 101 mutex_lock(&dev->struct_mutex);
102 102
103 /* Driver must have been initialized */ 103 /* Driver must have been initialized */
104 if (!dev->dev_private) { 104 if (!dev->dev_private) {
105 up(&dev->struct_sem); 105 mutex_unlock(&dev->struct_mutex);
106 return -EINVAL; 106 return -EINVAL;
107 } 107 }
108 108
109 if (dev->irq_enabled) { 109 if (dev->irq_enabled) {
110 up(&dev->struct_sem); 110 mutex_unlock(&dev->struct_mutex);
111 return -EBUSY; 111 return -EBUSY;
112 } 112 }
113 dev->irq_enabled = 1; 113 dev->irq_enabled = 1;
114 up(&dev->struct_sem); 114 mutex_unlock(&dev->struct_mutex);
115 115
116 DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq); 116 DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
117 117
@@ -135,9 +135,9 @@ static int drm_irq_install(drm_device_t * dev)
135 ret = request_irq(dev->irq, dev->driver->irq_handler, 135 ret = request_irq(dev->irq, dev->driver->irq_handler,
136 sh_flags, dev->devname, dev); 136 sh_flags, dev->devname, dev);
137 if (ret < 0) { 137 if (ret < 0) {
138 down(&dev->struct_sem); 138 mutex_lock(&dev->struct_mutex);
139 dev->irq_enabled = 0; 139 dev->irq_enabled = 0;
140 up(&dev->struct_sem); 140 mutex_unlock(&dev->struct_mutex);
141 return ret; 141 return ret;
142 } 142 }
143 143
@@ -161,10 +161,10 @@ int drm_irq_uninstall(drm_device_t * dev)
161 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 161 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
162 return -EINVAL; 162 return -EINVAL;
163 163
164 down(&dev->struct_sem); 164 mutex_lock(&dev->struct_mutex);
165 irq_enabled = dev->irq_enabled; 165 irq_enabled = dev->irq_enabled;
166 dev->irq_enabled = 0; 166 dev->irq_enabled = 0;
167 up(&dev->struct_sem); 167 mutex_unlock(&dev->struct_mutex);
168 168
169 if (!irq_enabled) 169 if (!irq_enabled)
170 return -EINVAL; 170 return -EINVAL;
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 5b1d3a04458d..8fd6357a48da 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -3,6 +3,7 @@
3 Please contact dri-devel@lists.sf.net to add new cards to this list 3 Please contact dri-devel@lists.sf.net to add new cards to this list
4*/ 4*/
5#define radeon_PCI_IDS \ 5#define radeon_PCI_IDS \
6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350},\
6 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|CHIP_IS_IGP}, \ 7 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|CHIP_IS_IGP}, \
7 {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP}, \ 8 {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP}, \
8 {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 9 {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
@@ -242,5 +243,6 @@
242 {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 243 {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
243 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 244 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
244 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 245 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
246 {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
245 {0, 0, 0} 247 {0, 0, 0}
246 248
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 6f943e3309ef..362a270af0f1 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -258,7 +258,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
258} 258}
259 259
260/** 260/**
261 * Simply calls _vm_info() while holding the drm_device::struct_sem lock. 261 * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
262 */ 262 */
263static int drm_vm_info(char *buf, char **start, off_t offset, int request, 263static int drm_vm_info(char *buf, char **start, off_t offset, int request,
264 int *eof, void *data) 264 int *eof, void *data)
@@ -266,9 +266,9 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request,
266 drm_device_t *dev = (drm_device_t *) data; 266 drm_device_t *dev = (drm_device_t *) data;
267 int ret; 267 int ret;
268 268
269 down(&dev->struct_sem); 269 mutex_lock(&dev->struct_mutex);
270 ret = drm__vm_info(buf, start, offset, request, eof, data); 270 ret = drm__vm_info(buf, start, offset, request, eof, data);
271 up(&dev->struct_sem); 271 mutex_unlock(&dev->struct_mutex);
272 return ret; 272 return ret;
273} 273}
274 274
@@ -331,7 +331,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
331} 331}
332 332
333/** 333/**
334 * Simply calls _queues_info() while holding the drm_device::struct_sem lock. 334 * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
335 */ 335 */
336static int drm_queues_info(char *buf, char **start, off_t offset, int request, 336static int drm_queues_info(char *buf, char **start, off_t offset, int request,
337 int *eof, void *data) 337 int *eof, void *data)
@@ -339,9 +339,9 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request,
339 drm_device_t *dev = (drm_device_t *) data; 339 drm_device_t *dev = (drm_device_t *) data;
340 int ret; 340 int ret;
341 341
342 down(&dev->struct_sem); 342 mutex_lock(&dev->struct_mutex);
343 ret = drm__queues_info(buf, start, offset, request, eof, data); 343 ret = drm__queues_info(buf, start, offset, request, eof, data);
344 up(&dev->struct_sem); 344 mutex_unlock(&dev->struct_mutex);
345 return ret; 345 return ret;
346} 346}
347 347
@@ -403,7 +403,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
403} 403}
404 404
405/** 405/**
406 * Simply calls _bufs_info() while holding the drm_device::struct_sem lock. 406 * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
407 */ 407 */
408static int drm_bufs_info(char *buf, char **start, off_t offset, int request, 408static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
409 int *eof, void *data) 409 int *eof, void *data)
@@ -411,9 +411,9 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
411 drm_device_t *dev = (drm_device_t *) data; 411 drm_device_t *dev = (drm_device_t *) data;
412 int ret; 412 int ret;
413 413
414 down(&dev->struct_sem); 414 mutex_lock(&dev->struct_mutex);
415 ret = drm__bufs_info(buf, start, offset, request, eof, data); 415 ret = drm__bufs_info(buf, start, offset, request, eof, data);
416 up(&dev->struct_sem); 416 mutex_unlock(&dev->struct_mutex);
417 return ret; 417 return ret;
418} 418}
419 419
@@ -459,7 +459,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
459} 459}
460 460
461/** 461/**
462 * Simply calls _clients_info() while holding the drm_device::struct_sem lock. 462 * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
463 */ 463 */
464static int drm_clients_info(char *buf, char **start, off_t offset, 464static int drm_clients_info(char *buf, char **start, off_t offset,
465 int request, int *eof, void *data) 465 int request, int *eof, void *data)
@@ -467,9 +467,9 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
467 drm_device_t *dev = (drm_device_t *) data; 467 drm_device_t *dev = (drm_device_t *) data;
468 int ret; 468 int ret;
469 469
470 down(&dev->struct_sem); 470 mutex_lock(&dev->struct_mutex);
471 ret = drm__clients_info(buf, start, offset, request, eof, data); 471 ret = drm__clients_info(buf, start, offset, request, eof, data);
472 up(&dev->struct_sem); 472 mutex_unlock(&dev->struct_mutex);
473 return ret; 473 return ret;
474} 474}
475 475
@@ -540,9 +540,9 @@ static int drm_vma_info(char *buf, char **start, off_t offset, int request,
540 drm_device_t *dev = (drm_device_t *) data; 540 drm_device_t *dev = (drm_device_t *) data;
541 int ret; 541 int ret;
542 542
543 down(&dev->struct_sem); 543 mutex_lock(&dev->struct_mutex);
544 ret = drm__vma_info(buf, start, offset, request, eof, data); 544 ret = drm__vma_info(buf, start, offset, request, eof, data);
545 up(&dev->struct_sem); 545 mutex_unlock(&dev->struct_mutex);
546 return ret; 546 return ret;
547} 547}
548#endif 548#endif
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 42d766359caa..7a9263ff3007 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -61,8 +61,8 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
61 61
62 spin_lock_init(&dev->count_lock); 62 spin_lock_init(&dev->count_lock);
63 init_timer(&dev->timer); 63 init_timer(&dev->timer);
64 sema_init(&dev->struct_sem, 1); 64 mutex_init(&dev->struct_mutex);
65 sema_init(&dev->ctxlist_sem, 1); 65 mutex_init(&dev->ctxlist_mutex);
66 66
67 dev->pdev = pdev; 67 dev->pdev = pdev;
68 68
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 3f73aa774c80..0291cd62c69f 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -188,7 +188,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
188 188
189 map = vma->vm_private_data; 189 map = vma->vm_private_data;
190 190
191 down(&dev->struct_sem); 191 mutex_lock(&dev->struct_mutex);
192 for (pt = dev->vmalist, prev = NULL; pt; pt = next) { 192 for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
193 next = pt->next; 193 next = pt->next;
194 if (pt->vma->vm_private_data == map) 194 if (pt->vma->vm_private_data == map)
@@ -248,7 +248,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
248 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 248 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
249 } 249 }
250 } 250 }
251 up(&dev->struct_sem); 251 mutex_unlock(&dev->struct_mutex);
252} 252}
253 253
254/** 254/**
@@ -404,12 +404,12 @@ static void drm_vm_open(struct vm_area_struct *vma)
404 404
405 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); 405 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
406 if (vma_entry) { 406 if (vma_entry) {
407 down(&dev->struct_sem); 407 mutex_lock(&dev->struct_mutex);
408 vma_entry->vma = vma; 408 vma_entry->vma = vma;
409 vma_entry->next = dev->vmalist; 409 vma_entry->next = dev->vmalist;
410 vma_entry->pid = current->pid; 410 vma_entry->pid = current->pid;
411 dev->vmalist = vma_entry; 411 dev->vmalist = vma_entry;
412 up(&dev->struct_sem); 412 mutex_unlock(&dev->struct_mutex);
413 } 413 }
414} 414}
415 415
@@ -431,7 +431,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
431 vma->vm_start, vma->vm_end - vma->vm_start); 431 vma->vm_start, vma->vm_end - vma->vm_start);
432 atomic_dec(&dev->vma_count); 432 atomic_dec(&dev->vma_count);
433 433
434 down(&dev->struct_sem); 434 mutex_lock(&dev->struct_mutex);
435 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { 435 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
436 if (pt->vma == vma) { 436 if (pt->vma == vma) {
437 if (prev) { 437 if (prev) {
@@ -443,7 +443,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
443 break; 443 break;
444 } 444 }
445 } 445 }
446 up(&dev->struct_sem); 446 mutex_unlock(&dev->struct_mutex);
447} 447}
448 448
449/** 449/**
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index cc1b89086876..ae0aa6d7e0bb 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -958,7 +958,7 @@ static int i810_flush_queue(drm_device_t * dev)
958} 958}
959 959
960/* Must be called with the lock held */ 960/* Must be called with the lock held */
961void i810_reclaim_buffers(drm_device_t * dev, struct file *filp) 961static void i810_reclaim_buffers(drm_device_t * dev, struct file *filp)
962{ 962{
963 drm_device_dma_t *dma = dev->dma; 963 drm_device_dma_t *dma = dev->dma;
964 int i; 964 int i;
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index a18b80d91920..e8cf3ff606f0 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -113,8 +113,6 @@ typedef struct drm_i810_private {
113} drm_i810_private_t; 113} drm_i810_private_t;
114 114
115 /* i810_dma.c */ 115 /* i810_dma.c */
116extern void i810_reclaim_buffers(drm_device_t * dev, struct file *filp);
117
118extern int i810_driver_dma_quiescent(drm_device_t * dev); 116extern int i810_driver_dma_quiescent(drm_device_t * dev);
119extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, 117extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
120 struct file *filp); 118 struct file *filp);
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 4fea32aed6d2..163f2cbfe60d 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -1239,7 +1239,7 @@ static int i830_flush_queue(drm_device_t * dev)
1239} 1239}
1240 1240
1241/* Must be called with the lock held */ 1241/* Must be called with the lock held */
1242void i830_reclaim_buffers(drm_device_t * dev, struct file *filp) 1242static void i830_reclaim_buffers(drm_device_t * dev, struct file *filp)
1243{ 1243{
1244 drm_device_dma_t *dma = dev->dma; 1244 drm_device_dma_t *dma = dev->dma;
1245 int i; 1245 int i;
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index bf9075b576bd..85bc5be6f916 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -123,9 +123,6 @@ typedef struct drm_i830_private {
123extern drm_ioctl_desc_t i830_ioctls[]; 123extern drm_ioctl_desc_t i830_ioctls[];
124extern int i830_max_ioctl; 124extern int i830_max_ioctl;
125 125
126/* i830_dma.c */
127extern void i830_reclaim_buffers(drm_device_t * dev, struct file *filp);
128
129/* i830_irq.c */ 126/* i830_irq.c */
130extern int i830_irq_emit(struct inode *inode, struct file *filp, 127extern int i830_irq_emit(struct inode *inode, struct file *filp,
131 unsigned int cmd, unsigned long arg); 128 unsigned int cmd, unsigned long arg);
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index 9140703da1ba..1ff4c7ca0bff 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -344,18 +344,20 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
344 int i; 344 int i;
345 RING_LOCALS; 345 RING_LOCALS;
346 346
347 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
348 return DRM_ERR(EINVAL);
349
350 BEGIN_LP_RING(((dwords+1)&~1));
351
347 for (i = 0; i < dwords;) { 352 for (i = 0; i < dwords;) {
348 int cmd, sz; 353 int cmd, sz;
349 354
350 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 355 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
351 return DRM_ERR(EINVAL); 356 return DRM_ERR(EINVAL);
352 357
353/* printk("%d/%d ", i, dwords); */
354
355 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 358 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
356 return DRM_ERR(EINVAL); 359 return DRM_ERR(EINVAL);
357 360
358 BEGIN_LP_RING(sz);
359 OUT_RING(cmd); 361 OUT_RING(cmd);
360 362
361 while (++i, --sz) { 363 while (++i, --sz) {
@@ -365,9 +367,13 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
365 } 367 }
366 OUT_RING(cmd); 368 OUT_RING(cmd);
367 } 369 }
368 ADVANCE_LP_RING();
369 } 370 }
370 371
372 if (dwords & 1)
373 OUT_RING(0);
374
375 ADVANCE_LP_RING();
376
371 return 0; 377 return 0;
372} 378}
373 379
@@ -401,6 +407,21 @@ static int i915_emit_box(drm_device_t * dev,
401 return 0; 407 return 0;
402} 408}
403 409
410static void i915_emit_breadcrumb(drm_device_t *dev)
411{
412 drm_i915_private_t *dev_priv = dev->dev_private;
413 RING_LOCALS;
414
415 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
416
417 BEGIN_LP_RING(4);
418 OUT_RING(CMD_STORE_DWORD_IDX);
419 OUT_RING(20);
420 OUT_RING(dev_priv->counter);
421 OUT_RING(0);
422 ADVANCE_LP_RING();
423}
424
404static int i915_dispatch_cmdbuffer(drm_device_t * dev, 425static int i915_dispatch_cmdbuffer(drm_device_t * dev,
405 drm_i915_cmdbuffer_t * cmd) 426 drm_i915_cmdbuffer_t * cmd)
406{ 427{
@@ -429,6 +450,7 @@ static int i915_dispatch_cmdbuffer(drm_device_t * dev,
429 return ret; 450 return ret;
430 } 451 }
431 452
453 i915_emit_breadcrumb(dev);
432 return 0; 454 return 0;
433} 455}
434 456
@@ -475,12 +497,7 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev,
475 497
476 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 498 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
477 499
478 BEGIN_LP_RING(4); 500 i915_emit_breadcrumb(dev);
479 OUT_RING(CMD_STORE_DWORD_IDX);
480 OUT_RING(20);
481 OUT_RING(dev_priv->counter);
482 OUT_RING(0);
483 ADVANCE_LP_RING();
484 501
485 return 0; 502 return 0;
486} 503}
@@ -657,7 +674,7 @@ static int i915_getparam(DRM_IOCTL_ARGS)
657 value = READ_BREADCRUMB(dev_priv); 674 value = READ_BREADCRUMB(dev_priv);
658 break; 675 break;
659 default: 676 default:
660 DRM_ERROR("Unkown parameter %d\n", param.param); 677 DRM_ERROR("Unknown parameter %d\n", param.param);
661 return DRM_ERR(EINVAL); 678 return DRM_ERR(EINVAL);
662 } 679 }
663 680
@@ -742,7 +759,8 @@ drm_ioctl_desc_t i915_ioctls[] = {
742 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH}, 759 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
743 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH}, 760 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
744 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 761 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
745 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH} 762 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
763 [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }
746}; 764};
747 765
748int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 766int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 77412ddac007..4cb3da578330 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -74,6 +74,30 @@ typedef struct _drm_i915_sarea {
74 int pf_active; 74 int pf_active;
75 int pf_current_page; /* which buffer is being displayed? */ 75 int pf_current_page; /* which buffer is being displayed? */
76 int perf_boxes; /* performance boxes to be displayed */ 76 int perf_boxes; /* performance boxes to be displayed */
77 int width, height; /* screen size in pixels */
78
79 drm_handle_t front_handle;
80 int front_offset;
81 int front_size;
82
83 drm_handle_t back_handle;
84 int back_offset;
85 int back_size;
86
87 drm_handle_t depth_handle;
88 int depth_offset;
89 int depth_size;
90
91 drm_handle_t tex_handle;
92 int tex_offset;
93 int tex_size;
94 int log_tex_granularity;
95 int pitch;
96 int rotation; /* 0, 90, 180 or 270 */
97 int rotated_offset;
98 int rotated_size;
99 int rotated_pitch;
100 int virtualX, virtualY;
77} drm_i915_sarea_t; 101} drm_i915_sarea_t;
78 102
79/* Flags for perf_boxes 103/* Flags for perf_boxes
@@ -99,6 +123,7 @@ typedef struct _drm_i915_sarea {
99#define DRM_I915_FREE 0x09 123#define DRM_I915_FREE 0x09
100#define DRM_I915_INIT_HEAP 0x0a 124#define DRM_I915_INIT_HEAP 0x0a
101#define DRM_I915_CMDBUFFER 0x0b 125#define DRM_I915_CMDBUFFER 0x0b
126#define DRM_I915_DESTROY_HEAP 0x0c
102 127
103#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 128#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
104#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 129#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -112,6 +137,7 @@ typedef struct _drm_i915_sarea {
112#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 137#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
113#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 138#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
114#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 139#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
140#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
115 141
116/* Allow drivers to submit batchbuffers directly to hardware, relying 142/* Allow drivers to submit batchbuffers directly to hardware, relying
117 * on the security mechanisms provided by hardware. 143 * on the security mechanisms provided by hardware.
@@ -191,4 +217,11 @@ typedef struct drm_i915_mem_init_heap {
191 int start; 217 int start;
192} drm_i915_mem_init_heap_t; 218} drm_i915_mem_init_heap_t;
193 219
220/* Allow memory manager to be torn down and re-initialized (eg on
221 * rotate):
222 */
223typedef struct drm_i915_mem_destroy_heap {
224 int region;
225} drm_i915_mem_destroy_heap_t;
226
194#endif /* _I915_DRM_H_ */ 227#endif /* _I915_DRM_H_ */
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index c6c71b45f101..7a65666899e4 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -37,16 +37,17 @@
37 37
38#define DRIVER_NAME "i915" 38#define DRIVER_NAME "i915"
39#define DRIVER_DESC "Intel Graphics" 39#define DRIVER_DESC "Intel Graphics"
40#define DRIVER_DATE "20051209" 40#define DRIVER_DATE "20060119"
41 41
42/* Interface history: 42/* Interface history:
43 * 43 *
44 * 1.1: Original. 44 * 1.1: Original.
45 * 1.2: Add Power Management 45 * 1.2: Add Power Management
46 * 1.3: Add vblank support 46 * 1.3: Add vblank support
47 * 1.4: Fix cmdbuffer path, add heap destroy
47 */ 48 */
48#define DRIVER_MAJOR 1 49#define DRIVER_MAJOR 1
49#define DRIVER_MINOR 3 50#define DRIVER_MINOR 4
50#define DRIVER_PATCHLEVEL 0 51#define DRIVER_PATCHLEVEL 0
51 52
52typedef struct _drm_i915_ring_buffer { 53typedef struct _drm_i915_ring_buffer {
@@ -123,6 +124,7 @@ extern void i915_driver_irq_uninstall(drm_device_t * dev);
123extern int i915_mem_alloc(DRM_IOCTL_ARGS); 124extern int i915_mem_alloc(DRM_IOCTL_ARGS);
124extern int i915_mem_free(DRM_IOCTL_ARGS); 125extern int i915_mem_free(DRM_IOCTL_ARGS);
125extern int i915_mem_init_heap(DRM_IOCTL_ARGS); 126extern int i915_mem_init_heap(DRM_IOCTL_ARGS);
127extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS);
126extern void i915_mem_takedown(struct mem_block **heap); 128extern void i915_mem_takedown(struct mem_block **heap);
127extern void i915_mem_release(drm_device_t * dev, 129extern void i915_mem_release(drm_device_t * dev,
128 DRMFILE filp, struct mem_block *heap); 130 DRMFILE filp, struct mem_block *heap);
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c
index ba87ff17ff64..52c67324df58 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/char/drm/i915_mem.c
@@ -365,3 +365,34 @@ int i915_mem_init_heap(DRM_IOCTL_ARGS)
365 365
366 return init_heap(heap, initheap.start, initheap.size); 366 return init_heap(heap, initheap.start, initheap.size);
367} 367}
368
369int i915_mem_destroy_heap( DRM_IOCTL_ARGS )
370{
371 DRM_DEVICE;
372 drm_i915_private_t *dev_priv = dev->dev_private;
373 drm_i915_mem_destroy_heap_t destroyheap;
374 struct mem_block **heap;
375
376 if ( !dev_priv ) {
377 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
378 return DRM_ERR(EINVAL);
379 }
380
381 DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data,
382 sizeof(destroyheap) );
383
384 heap = get_heap( dev_priv, destroyheap.region );
385 if (!heap) {
386 DRM_ERROR("get_heap failed");
387 return DRM_ERR(EFAULT);
388 }
389
390 if (!*heap) {
391 DRM_ERROR("heap not initialized?");
392 return DRM_ERR(EFAULT);
393 }
394
395 i915_mem_takedown( heap );
396 return 0;
397}
398
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 915665c7fe7c..9bb8ae0c1c27 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -1640,7 +1640,7 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
1640 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) 1640 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
1641 { 1641 {
1642 drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); 1642 drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
1643 dev_priv->gart_info.addr = 0; 1643 dev_priv->gart_info.addr = NULL;
1644 } 1644 }
1645 } 1645 }
1646 /* only clear to the start of flags */ 1646 /* only clear to the start of flags */
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
index 0d426deeefec..59c7520bf9a2 100644
--- a/drivers/char/drm/savage_bci.c
+++ b/drivers/char/drm/savage_bci.c
@@ -32,6 +32,8 @@
32#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ 32#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
33#define SAVAGE_FREELIST_DEBUG 0 33#define SAVAGE_FREELIST_DEBUG 0
34 34
35static int savage_do_cleanup_bci(drm_device_t *dev);
36
35static int 37static int
36savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n) 38savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
37{ 39{
@@ -895,7 +897,7 @@ static int savage_do_init_bci(drm_device_t * dev, drm_savage_init_t * init)
895 return 0; 897 return 0;
896} 898}
897 899
898int savage_do_cleanup_bci(drm_device_t * dev) 900static int savage_do_cleanup_bci(drm_device_t * dev)
899{ 901{
900 drm_savage_private_t *dev_priv = dev->dev_private; 902 drm_savage_private_t *dev_priv = dev->dev_private;
901 903
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
index dd46cb85439c..8f04b3d82292 100644
--- a/drivers/char/drm/savage_drv.h
+++ b/drivers/char/drm/savage_drv.h
@@ -212,7 +212,6 @@ extern int savage_driver_load(drm_device_t *dev, unsigned long chipset);
212extern int savage_driver_firstopen(drm_device_t *dev); 212extern int savage_driver_firstopen(drm_device_t *dev);
213extern void savage_driver_lastclose(drm_device_t *dev); 213extern void savage_driver_lastclose(drm_device_t *dev);
214extern int savage_driver_unload(drm_device_t *dev); 214extern int savage_driver_unload(drm_device_t *dev);
215extern int savage_do_cleanup_bci(drm_device_t * dev);
216extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp); 215extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp);
217 216
218/* state functions */ 217/* state functions */
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index 593c0b8f650a..a691ae74129d 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -222,7 +222,7 @@ static int via_initialize(drm_device_t * dev,
222 return 0; 222 return 0;
223} 223}
224 224
225int via_dma_init(DRM_IOCTL_ARGS) 225static int via_dma_init(DRM_IOCTL_ARGS)
226{ 226{
227 DRM_DEVICE; 227 DRM_DEVICE;
228 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 228 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -321,7 +321,7 @@ int via_driver_dma_quiescent(drm_device_t * dev)
321 return 0; 321 return 0;
322} 322}
323 323
324int via_flush_ioctl(DRM_IOCTL_ARGS) 324static int via_flush_ioctl(DRM_IOCTL_ARGS)
325{ 325{
326 DRM_DEVICE; 326 DRM_DEVICE;
327 327
@@ -330,7 +330,7 @@ int via_flush_ioctl(DRM_IOCTL_ARGS)
330 return via_driver_dma_quiescent(dev); 330 return via_driver_dma_quiescent(dev);
331} 331}
332 332
333int via_cmdbuffer(DRM_IOCTL_ARGS) 333static int via_cmdbuffer(DRM_IOCTL_ARGS)
334{ 334{
335 DRM_DEVICE; 335 DRM_DEVICE;
336 drm_via_cmdbuffer_t cmdbuf; 336 drm_via_cmdbuffer_t cmdbuf;
@@ -375,7 +375,7 @@ static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
375 return ret; 375 return ret;
376} 376}
377 377
378int via_pci_cmdbuffer(DRM_IOCTL_ARGS) 378static int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
379{ 379{
380 DRM_DEVICE; 380 DRM_DEVICE;
381 drm_via_cmdbuffer_t cmdbuf; 381 drm_via_cmdbuffer_t cmdbuf;
@@ -665,7 +665,7 @@ static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
665 * User interface to the space and lag functions. 665 * User interface to the space and lag functions.
666 */ 666 */
667 667
668int via_cmdbuf_size(DRM_IOCTL_ARGS) 668static int via_cmdbuf_size(DRM_IOCTL_ARGS)
669{ 669{
670 DRM_DEVICE; 670 DRM_DEVICE;
671 drm_via_cmdbuf_size_t d_siz; 671 drm_via_cmdbuf_size_t d_siz;
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 9d5e027dae0e..b7f17457b424 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -108,7 +108,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
108 int num_desc = 0; 108 int num_desc = 0;
109 int cur_line; 109 int cur_line;
110 dma_addr_t next = 0 | VIA_DMA_DPR_EC; 110 dma_addr_t next = 0 | VIA_DMA_DPR_EC;
111 drm_via_descriptor_t *desc_ptr = 0; 111 drm_via_descriptor_t *desc_ptr = NULL;
112 112
113 if (mode == 1) 113 if (mode == 1)
114 desc_ptr = vsg->desc_pages[cur_descriptor_page]; 114 desc_ptr = vsg->desc_pages[cur_descriptor_page];
@@ -167,7 +167,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
167 */ 167 */
168 168
169 169
170void 170static void
171via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 171via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
172{ 172{
173 struct page *page; 173 struct page *page;
@@ -581,7 +581,7 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
581 int ret = 0; 581 int ret = 0;
582 582
583 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 583 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
584 vsg->bounce_buffer = 0; 584 vsg->bounce_buffer = NULL;
585 585
586 vsg->state = dr_via_sg_init; 586 vsg->state = dr_via_sg_init;
587 587
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index aad4f99f5405..52bcc7b1ba45 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -110,11 +110,6 @@ extern int via_mem_free(DRM_IOCTL_ARGS);
110extern int via_agp_init(DRM_IOCTL_ARGS); 110extern int via_agp_init(DRM_IOCTL_ARGS);
111extern int via_map_init(DRM_IOCTL_ARGS); 111extern int via_map_init(DRM_IOCTL_ARGS);
112extern int via_decoder_futex(DRM_IOCTL_ARGS); 112extern int via_decoder_futex(DRM_IOCTL_ARGS);
113extern int via_dma_init(DRM_IOCTL_ARGS);
114extern int via_cmdbuffer(DRM_IOCTL_ARGS);
115extern int via_flush_ioctl(DRM_IOCTL_ARGS);
116extern int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
117extern int via_cmdbuf_size(DRM_IOCTL_ARGS);
118extern int via_wait_irq(DRM_IOCTL_ARGS); 113extern int via_wait_irq(DRM_IOCTL_ARGS);
119extern int via_dma_blit_sync( DRM_IOCTL_ARGS ); 114extern int via_dma_blit_sync( DRM_IOCTL_ARGS );
120extern int via_dma_blit( DRM_IOCTL_ARGS ); 115extern int via_dma_blit( DRM_IOCTL_ARGS );
@@ -139,8 +134,6 @@ extern int via_driver_dma_quiescent(drm_device_t * dev);
139extern void via_init_futex(drm_via_private_t * dev_priv); 134extern void via_init_futex(drm_via_private_t * dev_priv);
140extern void via_cleanup_futex(drm_via_private_t * dev_priv); 135extern void via_cleanup_futex(drm_via_private_t * dev_priv);
141extern void via_release_futex(drm_via_private_t * dev_priv, int context); 136extern void via_release_futex(drm_via_private_t * dev_priv, int context);
142extern int via_driver_irq_wait(drm_device_t * dev, unsigned int irq,
143 int force_sequence, unsigned int *sequence);
144 137
145extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq); 138extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
146extern void via_init_dmablit(drm_device_t *dev); 139extern void via_init_dmablit(drm_device_t *dev);
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index 56d7e3daea12..6152415644e9 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -190,7 +190,7 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
190 return ret; 190 return ret;
191} 191}
192 192
193int 193static int
194via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, 194via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
195 unsigned int *sequence) 195 unsigned int *sequence)
196{ 196{
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 3f3ac039f4d9..57539d8f9f7c 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -359,7 +359,7 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes)
359 } 359 }
360 } 360 }
361 361
362 schedule_delayed_work(&tty->buf.work, 1); 362 tty_schedule_flip(tty);
363 363
364 info->stat_flags &= ~ESP_STAT_RX_TIMEOUT; 364 info->stat_flags &= ~ESP_STAT_RX_TIMEOUT;
365 release_pio_buffer(pio_buf); 365 release_pio_buffer(pio_buf);
@@ -426,7 +426,7 @@ static inline void receive_chars_dma_done(struct esp_struct *info,
426 } 426 }
427 tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], statflag); 427 tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], statflag);
428 } 428 }
429 schedule_delayed_work(&tty->buf.work, 1); 429 tty_schedule_flip(tty);
430 } 430 }
431 431
432 if (dma_bytes != num_bytes) { 432 if (dma_bytes != num_bytes) {
diff --git a/drivers/char/ip2/i2cmd.c b/drivers/char/ip2/i2cmd.c
index cb8f4198e9a3..e7af647800b6 100644
--- a/drivers/char/ip2/i2cmd.c
+++ b/drivers/char/ip2/i2cmd.c
@@ -139,7 +139,6 @@ static UCHAR ct79[] = { 2, BYP, 0x4F,0 }; // XMIT_NOW
139//static UCHAR ct86[]={ 2, BTH, 0x56,0 }; // RCV_ENABLE 139//static UCHAR ct86[]={ 2, BTH, 0x56,0 }; // RCV_ENABLE
140static UCHAR ct87[] = { 1, BYP, 0x57 }; // HW_TEST 140static UCHAR ct87[] = { 1, BYP, 0x57 }; // HW_TEST
141//static UCHAR ct88[]={ 3, BTH, 0x58,0,0 }; // RCV_THRESHOLD 141//static UCHAR ct88[]={ 3, BTH, 0x58,0,0 }; // RCV_THRESHOLD
142static UCHAR ct89[]={ 1, BYP, 0x59 }; // DSS_NOW
143//static UCHAR ct90[]={ 3, BYP, 0x5A,0,0 }; // Set SILO 142//static UCHAR ct90[]={ 3, BYP, 0x5A,0,0 }; // Set SILO
144//static UCHAR ct91[]={ 2, BYP, 0x5B,0 }; // timed break 143//static UCHAR ct91[]={ 2, BYP, 0x5B,0 }; // timed break
145 144
diff --git a/drivers/char/ip2main.c b/drivers/char/ip2main.c
index 56e93a5a1e24..48fcfba37bfa 100644
--- a/drivers/char/ip2main.c
+++ b/drivers/char/ip2main.c
@@ -2906,65 +2906,16 @@ ip2_ipl_ioctl ( struct inode *pInode, struct file *pFile, UINT cmd, ULONG arg )
2906 rc = -EINVAL; 2906 rc = -EINVAL;
2907 break; 2907 break;
2908 case 3: // Trace device 2908 case 3: // Trace device
2909 if ( cmd == 1 ) { 2909 /*
2910 rc = put_user(iiSendPendingMail, pIndex++ ); 2910 * akpm: This used to write a whole bunch of function addresses
2911 rc = put_user(i2InitChannels, pIndex++ ); 2911 * to userspace, which generated lots of put_user() warnings.
2912 rc = put_user(i2QueueNeeds, pIndex++ ); 2912 * I killed it all. Just return "success" and don't do
2913 rc = put_user(i2QueueCommands, pIndex++ ); 2913 * anything.
2914 rc = put_user(i2GetStatus, pIndex++ ); 2914 */
2915 rc = put_user(i2Input, pIndex++ ); 2915 if (cmd == 1)
2916 rc = put_user(i2InputFlush, pIndex++ ); 2916 rc = 0;
2917 rc = put_user(i2Output, pIndex++ ); 2917 else
2918 rc = put_user(i2FlushOutput, pIndex++ );
2919 rc = put_user(i2DrainWakeup, pIndex++ );
2920 rc = put_user(i2DrainOutput, pIndex++ );
2921 rc = put_user(i2OutputFree, pIndex++ );
2922 rc = put_user(i2StripFifo, pIndex++ );
2923 rc = put_user(i2StuffFifoBypass, pIndex++ );
2924 rc = put_user(i2StuffFifoFlow, pIndex++ );
2925 rc = put_user(i2StuffFifoInline, pIndex++ );
2926 rc = put_user(i2ServiceBoard, pIndex++ );
2927 rc = put_user(serviceOutgoingFifo, pIndex++ );
2928 // rc = put_user(ip2_init, pIndex++ );
2929 rc = put_user(ip2_init_board, pIndex++ );
2930 rc = put_user(find_eisa_board, pIndex++ );
2931 rc = put_user(set_irq, pIndex++ );
2932 rc = put_user(ip2_interrupt, pIndex++ );
2933 rc = put_user(ip2_poll, pIndex++ );
2934 rc = put_user(service_all_boards, pIndex++ );
2935 rc = put_user(do_input, pIndex++ );
2936 rc = put_user(do_status, pIndex++ );
2937#ifndef IP2DEBUG_OPEN
2938 rc = put_user(0, pIndex++ );
2939#else
2940 rc = put_user(open_sanity_check, pIndex++ );
2941#endif
2942 rc = put_user(ip2_open, pIndex++ );
2943 rc = put_user(ip2_close, pIndex++ );
2944 rc = put_user(ip2_hangup, pIndex++ );
2945 rc = put_user(ip2_write, pIndex++ );
2946 rc = put_user(ip2_putchar, pIndex++ );
2947 rc = put_user(ip2_flush_chars, pIndex++ );
2948 rc = put_user(ip2_write_room, pIndex++ );
2949 rc = put_user(ip2_chars_in_buf, pIndex++ );
2950 rc = put_user(ip2_flush_buffer, pIndex++ );
2951
2952 //rc = put_user(ip2_wait_until_sent, pIndex++ );
2953 rc = put_user(0, pIndex++ );
2954
2955 rc = put_user(ip2_throttle, pIndex++ );
2956 rc = put_user(ip2_unthrottle, pIndex++ );
2957 rc = put_user(ip2_ioctl, pIndex++ );
2958 rc = put_user(0, pIndex++ );
2959 rc = put_user(get_serial_info, pIndex++ );
2960 rc = put_user(set_serial_info, pIndex++ );
2961 rc = put_user(ip2_set_termios, pIndex++ );
2962 rc = put_user(ip2_set_line_discipline, pIndex++ );
2963 rc = put_user(set_params, pIndex++ );
2964 } else {
2965 rc = -EINVAL; 2918 rc = -EINVAL;
2966 }
2967
2968 break; 2919 break;
2969 2920
2970 default: 2921 default:
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 0097f06fa67b..d745004281d0 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -481,7 +481,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len)
481 } 481 }
482 482
483 if ((addr->channel == IPMI_BMC_CHANNEL) 483 if ((addr->channel == IPMI_BMC_CHANNEL)
484 || (addr->channel >= IPMI_NUM_CHANNELS) 484 || (addr->channel >= IPMI_MAX_CHANNELS)
485 || (addr->channel < 0)) 485 || (addr->channel < 0))
486 return -EINVAL; 486 return -EINVAL;
487 487
@@ -1321,7 +1321,7 @@ static int i_ipmi_request(ipmi_user_t user,
1321 unsigned char ipmb_seq; 1321 unsigned char ipmb_seq;
1322 long seqid; 1322 long seqid;
1323 1323
1324 if (addr->channel >= IPMI_NUM_CHANNELS) { 1324 if (addr->channel >= IPMI_MAX_CHANNELS) {
1325 spin_lock_irqsave(&intf->counter_lock, flags); 1325 spin_lock_irqsave(&intf->counter_lock, flags);
1326 intf->sent_invalid_commands++; 1326 intf->sent_invalid_commands++;
1327 spin_unlock_irqrestore(&intf->counter_lock, flags); 1327 spin_unlock_irqrestore(&intf->counter_lock, flags);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 6ed213bd702c..e59b638766ef 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1270,36 +1270,36 @@ static int try_init_port(int intf_num, struct smi_info **new_info)
1270 return 0; 1270 return 0;
1271} 1271}
1272 1272
1273static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset) 1273static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1274{ 1274{
1275 return readb((io->addr)+(offset * io->regspacing)); 1275 return readb((io->addr)+(offset * io->regspacing));
1276} 1276}
1277 1277
1278static void mem_outb(struct si_sm_io *io, unsigned int offset, 1278static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1279 unsigned char b) 1279 unsigned char b)
1280{ 1280{
1281 writeb(b, (io->addr)+(offset * io->regspacing)); 1281 writeb(b, (io->addr)+(offset * io->regspacing));
1282} 1282}
1283 1283
1284static unsigned char mem_inw(struct si_sm_io *io, unsigned int offset) 1284static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1285{ 1285{
1286 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) 1286 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1287 && 0xff; 1287 && 0xff;
1288} 1288}
1289 1289
1290static void mem_outw(struct si_sm_io *io, unsigned int offset, 1290static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1291 unsigned char b) 1291 unsigned char b)
1292{ 1292{
1293 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); 1293 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1294} 1294}
1295 1295
1296static unsigned char mem_inl(struct si_sm_io *io, unsigned int offset) 1296static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1297{ 1297{
1298 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) 1298 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1299 && 0xff; 1299 && 0xff;
1300} 1300}
1301 1301
1302static void mem_outl(struct si_sm_io *io, unsigned int offset, 1302static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1303 unsigned char b) 1303 unsigned char b)
1304{ 1304{
1305 writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); 1305 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
@@ -1349,16 +1349,16 @@ static int mem_setup(struct smi_info *info)
1349 upon the register size. */ 1349 upon the register size. */
1350 switch (info->io.regsize) { 1350 switch (info->io.regsize) {
1351 case 1: 1351 case 1:
1352 info->io.inputb = mem_inb; 1352 info->io.inputb = intf_mem_inb;
1353 info->io.outputb = mem_outb; 1353 info->io.outputb = intf_mem_outb;
1354 break; 1354 break;
1355 case 2: 1355 case 2:
1356 info->io.inputb = mem_inw; 1356 info->io.inputb = intf_mem_inw;
1357 info->io.outputb = mem_outw; 1357 info->io.outputb = intf_mem_outw;
1358 break; 1358 break;
1359 case 4: 1359 case 4:
1360 info->io.inputb = mem_inl; 1360 info->io.inputb = intf_mem_inl;
1361 info->io.outputb = mem_outl; 1361 info->io.outputb = intf_mem_outl;
1362 break; 1362 break;
1363#ifdef readq 1363#ifdef readq
1364 case 8: 1364 case 8:
diff --git a/drivers/char/rio/cirrus.h b/drivers/char/rio/cirrus.h
index 217ff09f2fa1..89bd94eb45be 100644
--- a/drivers/char/rio/cirrus.h
+++ b/drivers/char/rio/cirrus.h
@@ -40,148 +40,6 @@
40#endif 40#endif
41#define _cirrus_h 1 41#define _cirrus_h 1
42 42
43#ifdef RTA
44#define TO_UART RX
45#define TO_DRIVER TX
46#endif
47
48#ifdef HOST
49#define TO_UART TX
50#define TO_DRIVER RX
51#endif
52#ifdef RTA
53/* Miscellaneous defines for CIRRUS addresses and related logic for
54 interrupts etc.
55*/
56#define MAP(a) ((short *)(cirrus_base + (a)))
57#define outp(a,b) (*MAP (a) =(b))
58#define inp(a) ((*MAP (a)) & 0xff)
59#define CIRRUS_FIRST (short*)0x7300
60#define CIRRUS_SECOND (short*)0x7200
61#define CIRRUS_THIRD (short*)0x7100
62#define CIRRUS_FOURTH (short*)0x7000
63#define PORTS_ON_CIRRUS 4
64#define CIRRUS_FIFO_SIZE 12
65#define SPACE 0x20
66#define TAB 0x09
67#define LINE_FEED 0x0a
68#define CARRIAGE_RETURN 0x0d
69#define BACKSPACE 0x08
70#define SPACES_IN_TABS 8
71#define SEND_ESCAPE 0x00
72#define START_BREAK 0x81
73#define TIMER_TICK 0x82
74#define STOP_BREAK 0x83
75#define BASE(a) ((a) < 4 ? (short*)CIRRUS_FIRST : ((a) < 8 ? (short *)CIRRUS_SECOND : ((a) < 12 ? (short*)CIRRUS_THIRD : (short *)CIRRUS_FOURTH)))
76#define txack1 ((short *)0x7104)
77#define rxack1 ((short *)0x7102)
78#define mdack1 ((short *)0x7106)
79#define txack2 ((short *)0x7006)
80#define rxack2 ((short *)0x7004)
81#define mdack2 ((short *)0x7100)
82#define int_latch ((short *) 0x7800)
83#define int_status ((short *) 0x7c00)
84#define tx1_pending 0x20
85#define rx1_pending 0x10
86#define md1_pending 0x40
87#define tx2_pending 0x02
88#define rx2_pending 0x01
89#define md2_pending 0x40
90#define module1_bits 0x07
91#define module1_modern 0x08
92#define module2_bits 0x70
93#define module2_modern 0x80
94#define module_blank 0xf
95#define rs232_d25 0x0
96#define rs232_rj45 0x1
97#define rs422_d25 0x3
98#define parallel 0x5
99
100#define CLK0 0x00
101#define CLK1 0x01
102#define CLK2 0x02
103#define CLK3 0x03
104#define CLK4 0x04
105
106#define CIRRUS_REVC 0x42
107#define CIRRUS_REVE 0x44
108
109#define TURNON 1
110#define TURNOFF 0
111
112/* The list of CIRRUS registers.
113 NB. These registers are relative values on 8 bit boundaries whereas
114 on the RTA's the CIRRUS registers are on word boundaries. Use pointer
115 arithmetic (short *) to obtain the real addresses required */
116#define ccr 0x05 /* Channel Command Register */
117#define ier 0x06 /* Interrupt Enable Register */
118#define cor1 0x08 /* Channel Option Register 1 */
119#define cor2 0x09 /* Channel Option Register 2 */
120#define cor3 0x0a /* Channel Option Register 3 */
121#define cor4 0x1e /* Channel Option Register 4 */
122#define cor5 0x1f /* Channel Option Register 5 */
123
124#define ccsr 0x0b /* Channel Control Status Register */
125#define rdcr 0x0e /* Receive Data Count Register */
126#define tdcr 0x12 /* Transmit Data Count Register */
127#define mcor1 0x15 /* Modem Change Option Register 1 */
128#define mcor2 0x16 /* Modem Change Option Regsiter 2 */
129
130#define livr 0x18 /* Local Interrupt Vector Register */
131#define schr1 0x1a /* Special Character Register 1 */
132#define schr2 0x1b /* Special Character Register 2 */
133#define schr3 0x1c /* Special Character Register 3 */
134#define schr4 0x1d /* Special Character Register 4 */
135
136#define rtr 0x20 /* Receive Timer Register */
137#define rtpr 0x21 /* Receive Timeout Period Register */
138#define lnc 0x24 /* Lnext character */
139
140#define rivr 0x43 /* Receive Interrupt Vector Register */
141#define tivr 0x42 /* Transmit Interrupt Vector Register */
142#define mivr 0x41 /* Modem Interrupt Vector Register */
143#define gfrcr 0x40 /* Global Firmware Revision code Reg */
144#define ricr 0x44 /* Receive Interrupting Channel Reg */
145#define ticr 0x45 /* Transmit Interrupting Channel Reg */
146#define micr 0x46 /* Modem Interrupting Channel Register */
147
148#define gcr 0x4b /* Global configuration register */
149#define misr 0x4c /* Modem interrupt status register */
150
151#define rbusr 0x59
152#define tbusr 0x5a
153#define mbusr 0x5b
154
155#define eoir 0x60 /* End Of Interrupt Register */
156#define rdsr 0x62 /* Receive Data / Status Register */
157#define tdr 0x63 /* Transmit Data Register */
158#define svrr 0x67 /* Service Request Register */
159
160#define car 0x68 /* Channel Access Register */
161#define mir 0x69 /* Modem Interrupt Register */
162#define tir 0x6a /* Transmit Interrupt Register */
163#define rir 0x6b /* Receive Interrupt Register */
164#define msvr1 0x6c /* Modem Signal Value Register 1 */
165#define msvr2 0x6d /* Modem Signal Value Register 2 */
166#define psvr 0x6f /* Printer Signal Value Register */
167
168#define tbpr 0x72 /* Transmit Baud Rate Period Register */
169#define tcor 0x76 /* Transmit Clock Option Register */
170
171#define rbpr 0x78 /* Receive Baud Rate Period Register */
172#define rber 0x7a /* Receive Baud Rate Extension Register */
173#define rcor 0x7c /* Receive Clock Option Register */
174#define ppr 0x7e /* Prescalar Period Register */
175
176/* Misc registers used for forcing the 1400 out of its reset woes */
177#define airl 0x6d
178#define airm 0x6e
179#define airh 0x6f
180#define btcr 0x66
181#define mtcr 0x6c
182#define tber 0x74
183
184#endif /* #ifdef RTA */
185 43
186 44
187/* Bit fields for particular registers */ 45/* Bit fields for particular registers */
diff --git a/drivers/char/rio/defaults.h b/drivers/char/rio/defaults.h
index 5b600c32ac02..d55c2f6a9877 100644
--- a/drivers/char/rio/defaults.h
+++ b/drivers/char/rio/defaults.h
@@ -45,13 +45,6 @@ static char *_rio_defaults_h_sccs = "@(#)defaults.h 1.1";
45#define MILLISECOND (int) (1000/64) /* 15.625 low ticks */ 45#define MILLISECOND (int) (1000/64) /* 15.625 low ticks */
46#define SECOND (int) 15625 /* Low priority ticks */ 46#define SECOND (int) 15625 /* Low priority ticks */
47 47
48#ifdef RTA
49#define RX_LIMIT (ushort) 3
50#endif
51#ifdef HOST
52#define RX_LIMIT (ushort) 1
53#endif
54
55#define LINK_TIMEOUT (int) (POLL_PERIOD / 2) 48#define LINK_TIMEOUT (int) (POLL_PERIOD / 2)
56 49
57 50
diff --git a/drivers/char/rio/link.h b/drivers/char/rio/link.h
index bfba5b0c033e..48d68ca7f825 100644
--- a/drivers/char/rio/link.h
+++ b/drivers/char/rio/link.h
@@ -102,30 +102,14 @@
102/* 102/*
103** LED stuff 103** LED stuff
104*/ 104*/
105#if defined(RTA)
106#define LED_OFF ((ushort) 0) /* LED off */
107#define LED_RED ((ushort) 1) /* LED Red */
108#define LED_GREEN ((ushort) 2) /* LED Green */
109#define LED_ORANGE ((ushort) 4) /* LED Orange */
110#define LED_1TO8_OPEN ((ushort) 1) /* Port 1->8 LED on */
111#define LED_9TO16_OPEN ((ushort) 2) /* Port 9->16 LED on */
112#define LED_SET_COLOUR(colour) (link->led = (colour))
113#define LED_OR_COLOUR(colour) (link->led |= (colour))
114#define LED_TIMEOUT(time) (link->led_timeout = RioTimePlus(RioTime(),(time)))
115#else
116#define LED_SET_COLOUR(colour) 105#define LED_SET_COLOUR(colour)
117#define LED_OR_COLOUR(colour) 106#define LED_OR_COLOUR(colour)
118#define LED_TIMEOUT(time) 107#define LED_TIMEOUT(time)
119#endif /* RTA */
120 108
121struct LPB { 109struct LPB {
122 WORD link_number; /* Link Number */ 110 WORD link_number; /* Link Number */
123 Channel_ptr in_ch; /* Link In Channel */ 111 Channel_ptr in_ch; /* Link In Channel */
124 Channel_ptr out_ch; /* Link Out Channel */ 112 Channel_ptr out_ch; /* Link Out Channel */
125#ifdef RTA
126 uchar stat_led; /* Port open leds */
127 uchar led; /* True, light led! */
128#endif
129 BYTE attached_serial[4]; /* Attached serial number */ 113 BYTE attached_serial[4]; /* Attached serial number */
130 BYTE attached_host_serial[4]; 114 BYTE attached_host_serial[4];
131 /* Serial number of Host who 115 /* Serial number of Host who
@@ -144,30 +128,12 @@ struct LPB {
144 WORD WaitNoBoot; /* Secs to hold off booting */ 128 WORD WaitNoBoot; /* Secs to hold off booting */
145 PKT_ptr add_packet_list; /* Add packets to here */ 129 PKT_ptr add_packet_list; /* Add packets to here */
146 PKT_ptr remove_packet_list; /* Send packets from here */ 130 PKT_ptr remove_packet_list; /* Send packets from here */
147#ifdef RTA
148#ifdef DCIRRUS
149#define QBUFS_PER_REDIRECT (4 / PKTS_PER_BUFFER + 1)
150#else
151#define QBUFS_PER_REDIRECT (8 / PKTS_PER_BUFFER + 1)
152#endif
153 PKT_ptr_ptr rd_add; /* Add a new Packet here */
154 Q_BUF_ptr rd_add_qb; /* Pointer to the add Q buf */
155 PKT_ptr_ptr rd_add_st_qbb; /* Pointer to start of the Q's buf */
156 PKT_ptr_ptr rd_add_end_qbb; /* Pointer to the end of the Q's buf */
157 PKT_ptr_ptr rd_remove; /* Remove a Packet here */
158 Q_BUF_ptr rd_remove_qb; /* Pointer to the remove Q buf */
159 PKT_ptr_ptr rd_remove_st_qbb; /* Pointer to the start of the Q buf */
160 PKT_ptr_ptr rd_remove_end_qbb; /* Pointer to the end of the Q buf */
161 ushort pkts_in_q; /* Packets in queue */
162#endif
163 131
164 Channel_ptr lrt_fail_chan; /* Lrt's failure channel */ 132 Channel_ptr lrt_fail_chan; /* Lrt's failure channel */
165 Channel_ptr ltt_fail_chan; /* Ltt's failure channel */ 133 Channel_ptr ltt_fail_chan; /* Ltt's failure channel */
166 134
167#if defined (HOST) || defined (INKERNEL)
168 /* RUP structure for HOST to driver communications */ 135 /* RUP structure for HOST to driver communications */
169 struct RUP rup; 136 struct RUP rup;
170#endif
171 struct RUP link_rup; /* RUP for the link (POLL, 137 struct RUP link_rup; /* RUP for the link (POLL,
172 topology etc.) */ 138 topology etc.) */
173 WORD attached_link; /* Number of attached link */ 139 WORD attached_link; /* Number of attached link */
diff --git a/drivers/char/rio/list.h b/drivers/char/rio/list.h
index 36aad4c9cb3a..79b853140ae5 100644
--- a/drivers/char/rio/list.h
+++ b/drivers/char/rio/list.h
@@ -44,8 +44,6 @@ static char *_rio_list_h_sccs = "@(#)list.h 1.9";
44 44
45#define PKT_IN_USE 0x1 45#define PKT_IN_USE 0x1
46 46
47#ifdef INKERNEL
48
49#define ZERO_PTR (ushort) 0x8000 47#define ZERO_PTR (ushort) 0x8000
50#define CaD PortP->Caddr 48#define CaD PortP->Caddr
51 49
@@ -54,143 +52,5 @@ static char *_rio_list_h_sccs = "@(#)list.h 1.9";
54** to by the TxAdd pointer has PKT_IN_USE clear in its address. 52** to by the TxAdd pointer has PKT_IN_USE clear in its address.
55*/ 53*/
56 54
57#ifndef linux
58#if defined( MIPS ) && !defined( MIPSEISA )
59/* May the shoes of the Devil dance on your grave for creating this */
60#define can_add_transmit(PacketP,PortP) \
61 (!((uint)(PacketP = (struct PKT *)RIO_PTR(CaD,RINDW(PortP->TxAdd))) \
62 & (PKT_IN_USE<<2)))
63
64#elif defined(MIPSEISA) || defined(nx6000) || \
65 defined(drs6000) || defined(UWsparc)
66
67#define can_add_transmit(PacketP,PortP) \
68 (!((uint)(PacketP = (struct PKT *)RIO_PTR(CaD,RINDW(PortP->TxAdd))) \
69 & PKT_IN_USE))
70
71#else
72#define can_add_transmit(PacketP,PortP) \
73 (!((uint)(PacketP = (struct PKT *)RIO_PTR(CaD,*PortP->TxAdd)) \
74 & PKT_IN_USE))
75#endif
76
77/*
78** To add a packet to the queue, you set the PKT_IN_USE bit in the address,
79** and then move the TxAdd pointer along one position to point to the next
80** packet pointer. You must wrap the pointer from the end back to the start.
81*/
82#if defined(MIPS) || defined(nx6000) || defined(drs6000) || defined(UWsparc)
83# define add_transmit(PortP) \
84 WINDW(PortP->TxAdd,RINDW(PortP->TxAdd) | PKT_IN_USE);\
85 if (PortP->TxAdd == PortP->TxEnd)\
86 PortP->TxAdd = PortP->TxStart;\
87 else\
88 PortP->TxAdd++;\
89 WWORD(PortP->PhbP->tx_add , RIO_OFF(CaD,PortP->TxAdd));
90#elif defined(AIX)
91# define add_transmit(PortP) \
92 {\
93 register ushort *TxAddP = (ushort *)RIO_PTR(Cad,PortP->TxAddO);\
94 WINDW( TxAddP, RINDW( TxAddP ) | PKT_IN_USE );\
95 if (PortP->TxAddO == PortP->TxEndO )\
96 PortP->TxAddO = PortP->TxStartO;\
97 else\
98 PortP->TxAddO += sizeof(ushort);\
99 WWORD(((PHB *)RIO_PTR(Cad,PortP->PhbO))->tx_add , PortP->TxAddO );\
100 }
101#else
102# define add_transmit(PortP) \
103 *PortP->TxAdd |= PKT_IN_USE;\
104 if (PortP->TxAdd == PortP->TxEnd)\
105 PortP->TxAdd = PortP->TxStart;\
106 else\
107 PortP->TxAdd++;\
108 PortP->PhbP->tx_add = RIO_OFF(CaD,PortP->TxAdd);
109#endif
110
111/*
112** can_remove_receive( PacketP, PortP ) returns non-zero if PKT_IN_USE is set
113** for the next packet on the queue. It will also set PacketP to point to the
114** relevant packet, [having cleared the PKT_IN_USE bit]. If PKT_IN_USE is clear,
115** then can_remove_receive() returns 0.
116*/
117#if defined(MIPS) || defined(nx6000) || defined(drs6000) || defined(UWsparc)
118# define can_remove_receive(PacketP,PortP) \
119 ((RINDW(PortP->RxRemove) & PKT_IN_USE) ? \
120 (PacketP=(struct PKT *)RIO_PTR(CaD,(RINDW(PortP->RxRemove) & ~PKT_IN_USE))):0)
121#elif defined(AIX)
122# define can_remove_receive(PacketP,PortP) \
123 ((RINDW((ushort *)RIO_PTR(Cad,PortP->RxRemoveO)) & PKT_IN_USE) ? \
124 (PacketP=(struct PKT *)RIO_PTR(Cad,RINDW((ushort *)RIO_PTR(Cad,PortP->RxRemoveO)) & ~PKT_IN_USE)):0)
125#else
126# define can_remove_receive(PacketP,PortP) \
127 ((*PortP->RxRemove & PKT_IN_USE) ? \
128 (PacketP=(struct PKT *)RIO_PTR(CaD,(*PortP->RxRemove & ~PKT_IN_USE))):0)
129#endif
130
131
132/*
133** Will God see it within his heart to forgive us for this thing that
134** we have created? To remove a packet from the receive queue you clear
135** its PKT_IN_USE bit, and then bump the pointers. Once the pointers
136** get to the end, they must be wrapped back to the start.
137*/
138#if defined(MIPS) || defined(nx6000) || defined(drs6000) || defined(UWsparc)
139# define remove_receive(PortP) \
140 WINDW(PortP->RxRemove, (RINDW(PortP->RxRemove) & ~PKT_IN_USE));\
141 if (PortP->RxRemove == PortP->RxEnd)\
142 PortP->RxRemove = PortP->RxStart;\
143 else\
144 PortP->RxRemove++;\
145 WWORD(PortP->PhbP->rx_remove , RIO_OFF(CaD,PortP->RxRemove));
146#elif defined(AIX)
147# define remove_receive(PortP) \
148 {\
149 register ushort *RxRemoveP = (ushort *)RIO_PTR(Cad,PortP->RxRemoveO);\
150 WINDW( RxRemoveP, RINDW( RxRemoveP ) & ~PKT_IN_USE );\
151 if (PortP->RxRemoveO == PortP->RxEndO)\
152 PortP->RxRemoveO = PortP->RxStartO;\
153 else\
154 PortP->RxRemoveO += sizeof(ushort);\
155 WWORD(((PHB *)RIO_PTR(Cad,PortP->PhbO))->rx_remove, PortP->RxRemoveO );\
156 }
157#else
158# define remove_receive(PortP) \
159 *PortP->RxRemove &= ~PKT_IN_USE;\
160 if (PortP->RxRemove == PortP->RxEnd)\
161 PortP->RxRemove = PortP->RxStart;\
162 else\
163 PortP->RxRemove++;\
164 PortP->PhbP->rx_remove = RIO_OFF(CaD,PortP->RxRemove);
165#endif
166#endif
167
168
169#else /* !IN_KERNEL */
170
171#define ZERO_PTR NULL
172
173
174#ifdef HOST
175/* #define can_remove_transmit(pkt,phb) ((((char*)pkt = (*(char**)(phb->tx_remove))-1) || 1)) && (*phb->u3.s2.tx_remove_ptr & PKT_IN_USE)) */
176#define remove_transmit(phb) *phb->u3.s2.tx_remove_ptr &= ~(ushort)PKT_IN_USE;\
177 if (phb->tx_remove == phb->tx_end)\
178 phb->tx_remove = phb->tx_start;\
179 else\
180 phb->tx_remove++;
181#define can_add_receive(phb) !(*phb->u4.s2.rx_add_ptr & PKT_IN_USE)
182#define add_receive(pkt,phb) *phb->rx_add = pkt;\
183 *phb->u4.s2.rx_add_ptr |= PKT_IN_USE;\
184 if (phb->rx_add == phb->rx_end)\
185 phb->rx_add = phb->rx_start;\
186 else\
187 phb->rx_add++;
188#endif
189#endif
190
191#ifdef RTA
192#define splx(oldspl) if ((oldspl) == 0) spl0()
193#endif
194
195#endif /* ifndef _list.h */ 55#endif /* ifndef _list.h */
196/*********** end of file ***********/ 56/*********** end of file ***********/
diff --git a/drivers/char/rio/parmmap.h b/drivers/char/rio/parmmap.h
index fe4e00567065..e24acc1d1844 100644
--- a/drivers/char/rio/parmmap.h
+++ b/drivers/char/rio/parmmap.h
@@ -78,14 +78,9 @@ struct PARM_MAP {
78 WORD idle_count; /* Idle time counter */ 78 WORD idle_count; /* Idle time counter */
79 WORD busy_count; /* Busy counter */ 79 WORD busy_count; /* Busy counter */
80 WORD idle_control; /* Control Idle Process */ 80 WORD idle_control; /* Control Idle Process */
81#if defined(HOST) || defined(INKERNEL)
82 WORD tx_intr; /* TX interrupt pending */ 81 WORD tx_intr; /* TX interrupt pending */
83 WORD rx_intr; /* RX interrupt pending */ 82 WORD rx_intr; /* RX interrupt pending */
84 WORD rup_intr; /* RUP interrupt pending */ 83 WORD rup_intr; /* RUP interrupt pending */
85#endif
86#if defined(RTA)
87 WORD dying_count; /* Count of processes dead */
88#endif
89}; 84};
90 85
91#endif 86#endif
diff --git a/drivers/char/rio/phb.h b/drivers/char/rio/phb.h
index 3baebf8513af..2663ca0306e2 100644
--- a/drivers/char/rio/phb.h
+++ b/drivers/char/rio/phb.h
@@ -44,17 +44,6 @@
44#endif 44#endif
45 45
46 46
47 /*************************************************
48 * Set the LIMIT values.
49 ************************************************/
50#ifdef RTA
51#define RX_LIMIT (ushort) 3
52#endif
53#ifdef HOST
54#define RX_LIMIT (ushort) 1
55#endif
56
57
58/************************************************* 47/*************************************************
59 * Handshake asserted. Deasserted by the LTT(s) 48 * Handshake asserted. Deasserted by the LTT(s)
60 ************************************************/ 49 ************************************************/
@@ -69,11 +58,7 @@
69/************************************************* 58/*************************************************
70 * Maximum number of PHB's 59 * Maximum number of PHB's
71 ************************************************/ 60 ************************************************/
72#if defined (HOST) || defined (INKERNEL)
73#define MAX_PHB ((ushort) 128) /* range 0-127 */ 61#define MAX_PHB ((ushort) 128) /* range 0-127 */
74#else
75#define MAX_PHB ((ushort) 8) /* range 0-7 */
76#endif
77 62
78/************************************************* 63/*************************************************
79 * Defines for the mode fields 64 * Defines for the mode fields
@@ -139,141 +124,23 @@
139 * the start. The pointer tx_add points to a SPACE to put a Packet. 124 * the start. The pointer tx_add points to a SPACE to put a Packet.
140 * The pointer tx_remove points to the next Packet to remove 125 * The pointer tx_remove points to the next Packet to remove
141 *************************************************************************/ 126 *************************************************************************/
142#ifndef INKERNEL
143#define src_unit u2.s2.unit
144#define src_port u2.s2.port
145#define dest_unit u1.s1.unit
146#define dest_port u1.s1.port
147#endif
148#ifdef HOST
149#define tx_start u3.s1.tx_start_ptr_ptr
150#define tx_add u3.s1.tx_add_ptr_ptr
151#define tx_end u3.s1.tx_end_ptr_ptr
152#define tx_remove u3.s1.tx_remove_ptr_ptr
153#define rx_start u4.s1.rx_start_ptr_ptr
154#define rx_add u4.s1.rx_add_ptr_ptr
155#define rx_end u4.s1.rx_end_ptr_ptr
156#define rx_remove u4.s1.rx_remove_ptr_ptr
157#endif
158typedef struct PHB PHB; 127typedef struct PHB PHB;
159struct PHB { 128struct PHB {
160#ifdef RTA
161 ushort port;
162#endif
163#ifdef INKERNEL
164 WORD source; 129 WORD source;
165#else
166 union {
167 ushort source; /* Complete source */
168 struct {
169 unsigned char unit; /* Source unit */
170 unsigned char port; /* Source port */
171 } s2;
172 } u2;
173#endif
174 WORD handshake; 130 WORD handshake;
175 WORD status; 131 WORD status;
176 NUMBER timeout; /* Maximum of 1.9 seconds */ 132 NUMBER timeout; /* Maximum of 1.9 seconds */
177 WORD link; /* Send down this link */ 133 WORD link; /* Send down this link */
178#ifdef INKERNEL
179 WORD destination; 134 WORD destination;
180#else
181 union {
182 ushort destination; /* Complete destination */
183 struct {
184 unsigned char unit; /* Destination unit */
185 unsigned char port; /* Destination port */
186 } s1;
187 } u1;
188#endif
189#ifdef RTA
190 ushort tx_pkts_added;
191 ushort tx_pkts_removed;
192 Q_BUF_ptr tx_q_start; /* Start of the Q list chain */
193 short num_tx_q_bufs; /* Number of Q buffers in the chain */
194 PKT_ptr_ptr tx_add; /* Add a new Packet here */
195 Q_BUF_ptr tx_add_qb; /* Pointer to the add Q buf */
196 PKT_ptr_ptr tx_add_st_qbb; /* Pointer to start of the Q's buf */
197 PKT_ptr_ptr tx_add_end_qbb; /* Pointer to the end of the Q's buf */
198 PKT_ptr_ptr tx_remove; /* Remove a Packet here */
199 Q_BUF_ptr tx_remove_qb; /* Pointer to the remove Q buf */
200 PKT_ptr_ptr tx_remove_st_qbb; /* Pointer to the start of the Q buf */
201 PKT_ptr_ptr tx_remove_end_qbb; /* Pointer to the end of the Q buf */
202#endif
203#ifdef INKERNEL
204 PKT_ptr_ptr tx_start; 135 PKT_ptr_ptr tx_start;
205 PKT_ptr_ptr tx_end; 136 PKT_ptr_ptr tx_end;
206 PKT_ptr_ptr tx_add; 137 PKT_ptr_ptr tx_add;
207 PKT_ptr_ptr tx_remove; 138 PKT_ptr_ptr tx_remove;
208#endif
209#ifdef HOST
210 union {
211 struct {
212 PKT_ptr_ptr tx_start_ptr_ptr;
213 PKT_ptr_ptr tx_end_ptr_ptr;
214 PKT_ptr_ptr tx_add_ptr_ptr;
215 PKT_ptr_ptr tx_remove_ptr_ptr;
216 } s1;
217 struct {
218 ushort *tx_start_ptr;
219 ushort *tx_end_ptr;
220 ushort *tx_add_ptr;
221 ushort *tx_remove_ptr;
222 } s2;
223 } u3;
224#endif
225 139
226#ifdef RTA
227 ushort rx_pkts_added;
228 ushort rx_pkts_removed;
229 Q_BUF_ptr rx_q_start; /* Start of the Q list chain */
230 short num_rx_q_bufs; /* Number of Q buffers in the chain */
231 PKT_ptr_ptr rx_add; /* Add a new Packet here */
232 Q_BUF_ptr rx_add_qb; /* Pointer to the add Q buf */
233 PKT_ptr_ptr rx_add_st_qbb; /* Pointer to start of the Q's buf */
234 PKT_ptr_ptr rx_add_end_qbb; /* Pointer to the end of the Q's buf */
235 PKT_ptr_ptr rx_remove; /* Remove a Packet here */
236 Q_BUF_ptr rx_remove_qb; /* Pointer to the remove Q buf */
237 PKT_ptr_ptr rx_remove_st_qbb; /* Pointer to the start of the Q buf */
238 PKT_ptr_ptr rx_remove_end_qbb; /* Pointer to the end of the Q buf */
239#endif
240#ifdef INKERNEL
241 PKT_ptr_ptr rx_start; 140 PKT_ptr_ptr rx_start;
242 PKT_ptr_ptr rx_end; 141 PKT_ptr_ptr rx_end;
243 PKT_ptr_ptr rx_add; 142 PKT_ptr_ptr rx_add;
244 PKT_ptr_ptr rx_remove; 143 PKT_ptr_ptr rx_remove;
245#endif
246#ifdef HOST
247 union {
248 struct {
249 PKT_ptr_ptr rx_start_ptr_ptr;
250 PKT_ptr_ptr rx_end_ptr_ptr;
251 PKT_ptr_ptr rx_add_ptr_ptr;
252 PKT_ptr_ptr rx_remove_ptr_ptr;
253 } s1;
254 struct {
255 ushort *rx_start_ptr;
256 ushort *rx_end_ptr;
257 ushort *rx_add_ptr;
258 ushort *rx_remove_ptr;
259 } s2;
260 } u4;
261#endif
262
263#ifdef RTA /* some fields for the remotes */
264 ushort flush_count; /* Count of write flushes */
265 ushort txmode; /* Modes for tx */
266 ushort rxmode; /* Modes for rx */
267 ushort portmode; /* Generic modes */
268 ushort column; /* TAB3 column count */
269 ushort tx_subscript; /* (TX) Subscript into data field */
270 ushort rx_subscript; /* (RX) Subscript into data field */
271 PKT_ptr rx_incomplete; /* Hold an incomplete packet here */
272 ushort modem_bits; /* Modem bits to mask */
273 ushort lastModem; /* Modem control lines. */
274 ushort addr; /* Address for sub commands */
275 ushort MonitorTstate; /* TRUE if monitoring tstop */
276#endif
277 144
278}; 145};
279 146
diff --git a/drivers/char/rio/pkt.h b/drivers/char/rio/pkt.h
index 882fd429ac2e..7011e52e82db 100644
--- a/drivers/char/rio/pkt.h
+++ b/drivers/char/rio/pkt.h
@@ -70,39 +70,12 @@
70#define CONTROL_DATA_WNDW (DATA_WNDW << 8) 70#define CONTROL_DATA_WNDW (DATA_WNDW << 8)
71 71
72struct PKT { 72struct PKT {
73#ifdef INKERNEL
74 BYTE dest_unit; /* Destination Unit Id */ 73 BYTE dest_unit; /* Destination Unit Id */
75 BYTE dest_port; /* Destination POrt */ 74 BYTE dest_port; /* Destination POrt */
76 BYTE src_unit; /* Source Unit Id */ 75 BYTE src_unit; /* Source Unit Id */
77 BYTE src_port; /* Source POrt */ 76 BYTE src_port; /* Source POrt */
78#else
79 union {
80 ushort destination; /* Complete destination */
81 struct {
82 unsigned char unit; /* Destination unit */
83 unsigned char port; /* Destination port */
84 } s1;
85 } u1;
86 union {
87 ushort source; /* Complete source */
88 struct {
89 unsigned char unit; /* Source unit */
90 unsigned char port; /* Source port */
91 } s2;
92 } u2;
93#endif
94#ifdef INKERNEL
95 BYTE len; 77 BYTE len;
96 BYTE control; 78 BYTE control;
97#else
98 union {
99 ushort control;
100 struct {
101 unsigned char len;
102 unsigned char control;
103 } s3;
104 } u3;
105#endif
106 BYTE data[PKT_MAX_DATA_LEN]; 79 BYTE data[PKT_MAX_DATA_LEN];
107 /* Actual data :-) */ 80 /* Actual data :-) */
108 WORD csum; /* C-SUM */ 81 WORD csum; /* C-SUM */
diff --git a/drivers/char/rio/qbuf.h b/drivers/char/rio/qbuf.h
index acd9e8e5307d..391ffc335535 100644
--- a/drivers/char/rio/qbuf.h
+++ b/drivers/char/rio/qbuf.h
@@ -46,11 +46,7 @@ static char *_rio_qbuf_h_sccs = "@(#)qbuf.h 1.1";
46 46
47 47
48 48
49#ifdef HOST
50#define PKTS_PER_BUFFER 1
51#else
52#define PKTS_PER_BUFFER (220 / PKT_LENGTH) 49#define PKTS_PER_BUFFER (220 / PKT_LENGTH)
53#endif
54 50
55typedef struct Q_BUF Q_BUF; 51typedef struct Q_BUF Q_BUF;
56struct Q_BUF { 52struct Q_BUF {
diff --git a/drivers/char/rio/riotypes.h b/drivers/char/rio/riotypes.h
index 9b67e2468bec..46084d5c7e98 100644
--- a/drivers/char/rio/riotypes.h
+++ b/drivers/char/rio/riotypes.h
@@ -43,9 +43,6 @@
43#endif 43#endif
44#endif 44#endif
45 45
46#ifdef INKERNEL
47
48#if !defined(MIPSAT)
49typedef unsigned short NUMBER_ptr; 46typedef unsigned short NUMBER_ptr;
50typedef unsigned short WORD_ptr; 47typedef unsigned short WORD_ptr;
51typedef unsigned short BYTE_ptr; 48typedef unsigned short BYTE_ptr;
@@ -65,69 +62,6 @@ typedef unsigned short RUP_ptr;
65typedef unsigned short short_ptr; 62typedef unsigned short short_ptr;
66typedef unsigned short u_short_ptr; 63typedef unsigned short u_short_ptr;
67typedef unsigned short ushort_ptr; 64typedef unsigned short ushort_ptr;
68#else
69/* MIPSAT types */
70typedef char RIO_POINTER[8];
71typedef RIO_POINTER NUMBER_ptr;
72typedef RIO_POINTER WORD_ptr;
73typedef RIO_POINTER BYTE_ptr;
74typedef RIO_POINTER char_ptr;
75typedef RIO_POINTER Channel_ptr;
76typedef RIO_POINTER FREE_LIST_ptr_ptr;
77typedef RIO_POINTER FREE_LIST_ptr;
78typedef RIO_POINTER LPB_ptr;
79typedef RIO_POINTER Process_ptr;
80typedef RIO_POINTER PHB_ptr;
81typedef RIO_POINTER PKT_ptr;
82typedef RIO_POINTER PKT_ptr_ptr;
83typedef RIO_POINTER Q_BUF_ptr;
84typedef RIO_POINTER Q_BUF_ptr_ptr;
85typedef RIO_POINTER ROUTE_STR_ptr;
86typedef RIO_POINTER RUP_ptr;
87typedef RIO_POINTER short_ptr;
88typedef RIO_POINTER u_short_ptr;
89typedef RIO_POINTER ushort_ptr;
90#endif
91
92#else /* not INKERNEL */
93typedef unsigned char BYTE;
94typedef unsigned short WORD;
95typedef unsigned long DWORD;
96typedef short NUMBER;
97typedef short *NUMBER_ptr;
98typedef unsigned short *WORD_ptr;
99typedef unsigned char *BYTE_ptr;
100typedef unsigned char uchar;
101typedef unsigned short ushort;
102typedef unsigned int uint;
103typedef unsigned long ulong;
104typedef unsigned char u_char;
105typedef unsigned short u_short;
106typedef unsigned int u_int;
107typedef unsigned long u_long;
108typedef unsigned short ERROR;
109typedef unsigned long ID;
110typedef char *char_ptr;
111typedef Channel *Channel_ptr;
112typedef struct FREE_LIST *FREE_LIST_ptr;
113typedef struct FREE_LIST **FREE_LIST_ptr_ptr;
114typedef struct LPB *LPB_ptr;
115typedef struct Process *Process_ptr;
116typedef struct PHB *PHB_ptr;
117typedef struct PKT *PKT_ptr;
118typedef struct PKT **PKT_ptr_ptr;
119typedef struct Q_BUF *Q_BUF_ptr;
120typedef struct Q_BUF **Q_BUF_ptr_ptr;
121typedef struct ROUTE_STR *ROUTE_STR_ptr;
122typedef struct RUP *RUP_ptr;
123typedef short *short_ptr;
124typedef u_short *u_short_ptr;
125typedef ushort *ushort_ptr;
126typedef struct PKT PKT;
127typedef struct LPB LPB;
128typedef struct RUP RUP;
129#endif
130
131 65
132#endif /* __riotypes__ */ 66#endif /* __riotypes__ */
133 67
diff --git a/drivers/char/rio/rup.h b/drivers/char/rio/rup.h
index 8d44fec91dd5..f74f67c6f702 100644
--- a/drivers/char/rio/rup.h
+++ b/drivers/char/rio/rup.h
@@ -43,12 +43,7 @@
43#endif 43#endif
44#endif 44#endif
45 45
46#if defined( HOST ) || defined( INKERNEL )
47#define MAX_RUP ((short) 16) 46#define MAX_RUP ((short) 16)
48#endif
49#ifdef RTA
50#define MAX_RUP ((short) 1)
51#endif
52 47
53#define PKTS_PER_RUP ((short) 2) /* They are always used in pairs */ 48#define PKTS_PER_RUP ((short) 2) /* They are always used in pairs */
54 49
diff --git a/drivers/char/rio/sam.h b/drivers/char/rio/sam.h
index 31494054b213..6f754e19015d 100644
--- a/drivers/char/rio/sam.h
+++ b/drivers/char/rio/sam.h
@@ -43,10 +43,6 @@
43#endif 43#endif
44 44
45 45
46#if !defined( HOST ) && !defined( INKERNEL )
47#define RTA 1
48#endif
49
50#define NUM_FREE_LIST_UNITS 500 46#define NUM_FREE_LIST_UNITS 500
51 47
52#ifndef FALSE 48#ifndef FALSE
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 0949dcef0697..7edc6a4dbdc4 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -433,7 +433,7 @@ static void rp_do_receive(struct r_port *info,
433 count += ToRecv; 433 count += ToRecv;
434 } 434 }
435 /* Push the data up to the tty layer */ 435 /* Push the data up to the tty layer */
436 ld->receive_buf(tty, cbuf, fbuf, count); 436 ld->receive_buf(tty, chead, fhead, count);
437done: 437done:
438 tty_ldisc_deref(ld); 438 tty_ldisc_deref(ld);
439} 439}
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index 64bf89cb574f..c2490e270f1f 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -931,7 +931,7 @@ static int sx_set_real_termios (void *ptr)
931 case CS6:sx_write_channel_byte (port, hi_mask, 0x3f);break; 931 case CS6:sx_write_channel_byte (port, hi_mask, 0x3f);break;
932 case CS5:sx_write_channel_byte (port, hi_mask, 0x1f);break; 932 case CS5:sx_write_channel_byte (port, hi_mask, 0x1f);break;
933 default: 933 default:
934 printk (KERN_INFO "sx: Invalid wordsize: %d\n", CFLAG & CSIZE); 934 printk (KERN_INFO "sx: Invalid wordsize: %u\n", CFLAG & CSIZE);
935 break; 935 break;
936 } 936 }
937 937
@@ -958,7 +958,7 @@ static int sx_set_real_termios (void *ptr)
958 } else { 958 } else {
959 set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags); 959 set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags);
960 } 960 }
961 sx_dprintk (SX_DEBUG_TERMIOS, "iflags: %x(%d) ", 961 sx_dprintk (SX_DEBUG_TERMIOS, "iflags: %x(%d) ",
962 port->gs.tty->termios->c_iflag, 962 port->gs.tty->termios->c_iflag,
963 I_OTHER(port->gs.tty)); 963 I_OTHER(port->gs.tty));
964 964
@@ -973,7 +973,7 @@ static int sx_set_real_termios (void *ptr)
973 } else { 973 } else {
974 clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags); 974 clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags);
975 } 975 }
976 sx_dprintk (SX_DEBUG_TERMIOS, "oflags: %x(%d)\n", 976 sx_dprintk (SX_DEBUG_TERMIOS, "oflags: %x(%d)\n",
977 port->gs.tty->termios->c_oflag, 977 port->gs.tty->termios->c_oflag,
978 O_OTHER(port->gs.tty)); 978 O_OTHER(port->gs.tty));
979 /* port->c_dcd = sx_get_CD (port); */ 979 /* port->c_dcd = sx_get_CD (port); */
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index eb8b5be4e249..076e07c1da38 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -253,6 +253,7 @@ static void tty_buffer_free_all(struct tty_struct *tty)
253 253
254static void tty_buffer_init(struct tty_struct *tty) 254static void tty_buffer_init(struct tty_struct *tty)
255{ 255{
256 spin_lock_init(&tty->buf.lock);
256 tty->buf.head = NULL; 257 tty->buf.head = NULL;
257 tty->buf.tail = NULL; 258 tty->buf.tail = NULL;
258 tty->buf.free = NULL; 259 tty->buf.free = NULL;
@@ -266,6 +267,7 @@ static struct tty_buffer *tty_buffer_alloc(size_t size)
266 p->used = 0; 267 p->used = 0;
267 p->size = size; 268 p->size = size;
268 p->next = NULL; 269 p->next = NULL;
270 p->active = 0;
269 p->char_buf_ptr = (char *)(p->data); 271 p->char_buf_ptr = (char *)(p->data);
270 p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; 272 p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
271/* printk("Flip create %p\n", p); */ 273/* printk("Flip create %p\n", p); */
@@ -312,25 +314,36 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
312 314
313int tty_buffer_request_room(struct tty_struct *tty, size_t size) 315int tty_buffer_request_room(struct tty_struct *tty, size_t size)
314{ 316{
315 struct tty_buffer *b = tty->buf.tail, *n; 317 struct tty_buffer *b, *n;
316 int left = 0; 318 int left;
319 unsigned long flags;
320
321 spin_lock_irqsave(&tty->buf.lock, flags);
317 322
318 /* OPTIMISATION: We could keep a per tty "zero" sized buffer to 323 /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
319 remove this conditional if its worth it. This would be invisible 324 remove this conditional if its worth it. This would be invisible
320 to the callers */ 325 to the callers */
321 if(b != NULL) 326 if ((b = tty->buf.tail) != NULL) {
322 left = b->size - b->used; 327 left = b->size - b->used;
323 if(left >= size) 328 b->active = 1;
324 return size; 329 } else
325 /* This is the slow path - looking for new buffers to use */ 330 left = 0;
326 n = tty_buffer_find(tty, size); 331
327 if(n == NULL) 332 if (left < size) {
328 return left; 333 /* This is the slow path - looking for new buffers to use */
329 if(b != NULL) 334 if ((n = tty_buffer_find(tty, size)) != NULL) {
330 b->next = n; 335 if (b != NULL) {
331 else 336 b->next = n;
332 tty->buf.head = n; 337 b->active = 0;
333 tty->buf.tail = n; 338 } else
339 tty->buf.head = n;
340 tty->buf.tail = n;
341 n->active = 1;
342 } else
343 size = left;
344 }
345
346 spin_unlock_irqrestore(&tty->buf.lock, flags);
334 return size; 347 return size;
335} 348}
336 349
@@ -396,10 +409,12 @@ EXPORT_SYMBOL_GPL(tty_insert_flip_string_flags);
396int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size) 409int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size)
397{ 410{
398 int space = tty_buffer_request_room(tty, size); 411 int space = tty_buffer_request_room(tty, size);
399 struct tty_buffer *tb = tty->buf.tail; 412 if (likely(space)) {
400 *chars = tb->char_buf_ptr + tb->used; 413 struct tty_buffer *tb = tty->buf.tail;
401 memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); 414 *chars = tb->char_buf_ptr + tb->used;
402 tb->used += space; 415 memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
416 tb->used += space;
417 }
403 return space; 418 return space;
404} 419}
405 420
@@ -416,10 +431,12 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
416int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size) 431int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size)
417{ 432{
418 int space = tty_buffer_request_room(tty, size); 433 int space = tty_buffer_request_room(tty, size);
419 struct tty_buffer *tb = tty->buf.tail; 434 if (likely(space)) {
420 *chars = tb->char_buf_ptr + tb->used; 435 struct tty_buffer *tb = tty->buf.tail;
421 *flags = tb->flag_buf_ptr + tb->used; 436 *chars = tb->char_buf_ptr + tb->used;
422 tb->used += space; 437 *flags = tb->flag_buf_ptr + tb->used;
438 tb->used += space;
439 }
423 return space; 440 return space;
424} 441}
425 442
@@ -2747,20 +2764,20 @@ static void flush_to_ldisc(void *private_)
2747 schedule_delayed_work(&tty->buf.work, 1); 2764 schedule_delayed_work(&tty->buf.work, 1);
2748 goto out; 2765 goto out;
2749 } 2766 }
2750 spin_lock_irqsave(&tty->read_lock, flags); 2767 spin_lock_irqsave(&tty->buf.lock, flags);
2751 while((tbuf = tty->buf.head) != NULL) { 2768 while((tbuf = tty->buf.head) != NULL && !tbuf->active) {
2752 tty->buf.head = tbuf->next; 2769 tty->buf.head = tbuf->next;
2753 if (tty->buf.head == NULL) 2770 if (tty->buf.head == NULL)
2754 tty->buf.tail = NULL; 2771 tty->buf.tail = NULL;
2755 spin_unlock_irqrestore(&tty->read_lock, flags); 2772 spin_unlock_irqrestore(&tty->buf.lock, flags);
2756 /* printk("Process buffer %p for %d\n", tbuf, tbuf->used); */ 2773 /* printk("Process buffer %p for %d\n", tbuf, tbuf->used); */
2757 disc->receive_buf(tty, tbuf->char_buf_ptr, 2774 disc->receive_buf(tty, tbuf->char_buf_ptr,
2758 tbuf->flag_buf_ptr, 2775 tbuf->flag_buf_ptr,
2759 tbuf->used); 2776 tbuf->used);
2760 spin_lock_irqsave(&tty->read_lock, flags); 2777 spin_lock_irqsave(&tty->buf.lock, flags);
2761 tty_buffer_free(tty, tbuf); 2778 tty_buffer_free(tty, tbuf);
2762 } 2779 }
2763 spin_unlock_irqrestore(&tty->read_lock, flags); 2780 spin_unlock_irqrestore(&tty->buf.lock, flags);
2764out: 2781out:
2765 tty_ldisc_deref(disc); 2782 tty_ldisc_deref(disc);
2766} 2783}
@@ -2852,6 +2869,12 @@ EXPORT_SYMBOL(tty_get_baud_rate);
2852 2869
2853void tty_flip_buffer_push(struct tty_struct *tty) 2870void tty_flip_buffer_push(struct tty_struct *tty)
2854{ 2871{
2872 unsigned long flags;
2873 spin_lock_irqsave(&tty->buf.lock, flags);
2874 if (tty->buf.tail != NULL)
2875 tty->buf.tail->active = 0;
2876 spin_unlock_irqrestore(&tty->buf.lock, flags);
2877
2855 if (tty->low_latency) 2878 if (tty->low_latency)
2856 flush_to_ldisc((void *) tty); 2879 flush_to_ldisc((void *) tty);
2857 else 2880 else
diff --git a/drivers/char/watchdog/sbc_epx_c3.c b/drivers/char/watchdog/sbc_epx_c3.c
index 951764614ebf..7a4dfb95d087 100644
--- a/drivers/char/watchdog/sbc_epx_c3.c
+++ b/drivers/char/watchdog/sbc_epx_c3.c
@@ -25,6 +25,7 @@
25#include <linux/notifier.h> 25#include <linux/notifier.h>
26#include <linux/reboot.h> 26#include <linux/reboot.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/ioport.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/io.h> 30#include <asm/io.h>
30 31
@@ -181,11 +182,14 @@ static int __init watchdog_init(void)
181{ 182{
182 int ret; 183 int ret;
183 184
185 if (!request_region(EPXC3_WATCHDOG_CTL_REG, 2, "epxc3_watchdog"))
186 return -EBUSY;
187
184 ret = register_reboot_notifier(&epx_c3_notifier); 188 ret = register_reboot_notifier(&epx_c3_notifier);
185 if (ret) { 189 if (ret) {
186 printk(KERN_ERR PFX "cannot register reboot notifier " 190 printk(KERN_ERR PFX "cannot register reboot notifier "
187 "(err=%d)\n", ret); 191 "(err=%d)\n", ret);
188 return ret; 192 goto out;
189 } 193 }
190 194
191 ret = misc_register(&epx_c3_miscdev); 195 ret = misc_register(&epx_c3_miscdev);
@@ -193,18 +197,23 @@ static int __init watchdog_init(void)
193 printk(KERN_ERR PFX "cannot register miscdev on minor=%d " 197 printk(KERN_ERR PFX "cannot register miscdev on minor=%d "
194 "(err=%d)\n", WATCHDOG_MINOR, ret); 198 "(err=%d)\n", WATCHDOG_MINOR, ret);
195 unregister_reboot_notifier(&epx_c3_notifier); 199 unregister_reboot_notifier(&epx_c3_notifier);
196 return ret; 200 goto out;
197 } 201 }
198 202
199 printk(banner); 203 printk(banner);
200 204
201 return 0; 205 return 0;
206
207out:
208 release_region(EPXC3_WATCHDOG_CTL_REG, 2);
209 return ret;
202} 210}
203 211
204static void __exit watchdog_exit(void) 212static void __exit watchdog_exit(void)
205{ 213{
206 misc_deregister(&epx_c3_miscdev); 214 misc_deregister(&epx_c3_miscdev);
207 unregister_reboot_notifier(&epx_c3_notifier); 215 unregister_reboot_notifier(&epx_c3_notifier);
216 release_region(EPXC3_WATCHDOG_CTL_REG, 2);
208} 217}
209 218
210module_init(watchdog_init); 219module_init(watchdog_init);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 4819e7fc00dd..18a455651121 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -11,7 +11,6 @@ menu 'EDAC - error detection and reporting (RAS)'
11config EDAC 11config EDAC
12 tristate "EDAC core system error reporting" 12 tristate "EDAC core system error reporting"
13 depends on X86 13 depends on X86
14 default y
15 help 14 help
16 EDAC is designed to report errors in the core system. 15 EDAC is designed to report errors in the core system.
17 These are low-level errors that are reported in the CPU or 16 These are low-level errors that are reported in the CPU or
@@ -46,7 +45,7 @@ config EDAC_MM_EDAC
46 45
47config EDAC_AMD76X 46config EDAC_AMD76X
48 tristate "AMD 76x (760, 762, 768)" 47 tristate "AMD 76x (760, 762, 768)"
49 depends on EDAC_MM_EDAC && PCI 48 depends on EDAC_MM_EDAC && PCI && X86_32
50 help 49 help
51 Support for error detection and correction on the AMD 76x 50 Support for error detection and correction on the AMD 76x
52 series of chipsets used with the Athlon processor. 51 series of chipsets used with the Athlon processor.
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 770a5a633079..c454ded2b060 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1039,10 +1039,10 @@ MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1039 1039
1040 1040
1041static struct pci_driver e752x_driver = { 1041static struct pci_driver e752x_driver = {
1042 name: BS_MOD_STR, 1042 .name = BS_MOD_STR,
1043 probe: e752x_init_one, 1043 .probe = e752x_init_one,
1044 remove: __devexit_p(e752x_remove_one), 1044 .remove = __devexit_p(e752x_remove_one),
1045 id_table: e752x_pci_tbl, 1045 .id_table = e752x_pci_tbl,
1046}; 1046};
1047 1047
1048 1048
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 4be9bd0a1267..b10ee4698b1d 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -14,7 +14,6 @@
14 14
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <linux/version.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 1c81174595b3..d633081fa4c5 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -52,9 +52,9 @@ config IDE
52 52
53if IDE 53if IDE
54 54
55config IDE_MAX_HWIFS 55config IDE_MAX_HWIFS
56 int "Max IDE interfaces" 56 int "Max IDE interfaces"
57 depends on ALPHA || SUPERH 57 depends on ALPHA || SUPERH || IA64
58 default 4 58 default 4
59 help 59 help
60 This is the maximum number of IDE hardware interfaces that will 60 This is the maximum number of IDE hardware interfaces that will
@@ -162,8 +162,8 @@ config BLK_DEV_IDECS
162 tristate "PCMCIA IDE support" 162 tristate "PCMCIA IDE support"
163 depends on PCMCIA 163 depends on PCMCIA
164 help 164 help
165 Support for outboard IDE disks, tape drives, and CD-ROM drives 165 Support for Compact Flash cards, outboard IDE disks, tape drives,
166 connected through a PCMCIA card. 166 and CD-ROM drives connected through a PCMCIA card.
167 167
168config BLK_DEV_IDECD 168config BLK_DEV_IDECD
169 tristate "Include IDE/ATAPI CDROM support" 169 tristate "Include IDE/ATAPI CDROM support"
@@ -267,7 +267,7 @@ config IDE_TASK_IOCTL
267 help 267 help
268 This is a direct raw access to the media. It is a complex but 268 This is a direct raw access to the media. It is a complex but
269 elegant solution to test and validate the domain of the hardware and 269 elegant solution to test and validate the domain of the hardware and
270 perform below the driver data recover if needed. This is the most 270 perform below the driver data recovery if needed. This is the most
271 basic form of media-forensics. 271 basic form of media-forensics.
272 272
273 If you are unsure, say N here. 273 If you are unsure, say N here.
@@ -525,7 +525,7 @@ config BLK_DEV_CS5520
525 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)" 525 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
526 depends on EXPERIMENTAL 526 depends on EXPERIMENTAL
527 help 527 help
528 Include support for PIO tuning an virtual DMA on the Cyrix MediaGX 528 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
529 5510/5520 chipset. This will automatically be detected and 529 5510/5520 chipset. This will automatically be detected and
530 configured if found. 530 configured if found.
531 531
@@ -662,7 +662,7 @@ config PDC202XX_BURST
662 662
663 It was originally designed for the PDC20246/Ultra33, whose BIOS will 663 It was originally designed for the PDC20246/Ultra33, whose BIOS will
664 only setup UDMA on the first two PDC20246 cards. It has also been 664 only setup UDMA on the first two PDC20246 cards. It has also been
665 used succesfully on a PDC20265/Ultra100, allowing use of UDMA modes 665 used successfully on a PDC20265/Ultra100, allowing use of UDMA modes
666 when the PDC20265 BIOS has been disabled (for faster boot up). 666 when the PDC20265 BIOS has been disabled (for faster boot up).
667 667
668 Please read the comments at the top of 668 Please read the comments at the top of
@@ -673,13 +673,6 @@ config PDC202XX_BURST
673config BLK_DEV_PDC202XX_NEW 673config BLK_DEV_PDC202XX_NEW
674 tristate "PROMISE PDC202{68|69|70|71|75|76|77} support" 674 tristate "PROMISE PDC202{68|69|70|71|75|76|77} support"
675 675
676# FIXME - probably wants to be one for old and for new
677config PDC202XX_FORCE
678 bool "Enable controller even if disabled by BIOS"
679 depends on BLK_DEV_PDC202XX_NEW
680 help
681 Enable the PDC202xx controller even if it has been disabled in the BIOS setup.
682
683config BLK_DEV_SVWKS 676config BLK_DEV_SVWKS
684 tristate "ServerWorks OSB4/CSB5/CSB6 chipsets support" 677 tristate "ServerWorks OSB4/CSB5/CSB6 chipsets support"
685 help 678 help
@@ -722,7 +715,7 @@ config BLK_DEV_SIS5513
722config BLK_DEV_SLC90E66 715config BLK_DEV_SLC90E66
723 tristate "SLC90E66 chipset support" 716 tristate "SLC90E66 chipset support"
724 help 717 help
725 This driver ensures (U)DMA support for Victroy66 SouthBridges for 718 This driver ensures (U)DMA support for Victory66 SouthBridges for
726 SMsC with Intel NorthBridges. This is an Ultra66 based chipset. 719 SMsC with Intel NorthBridges. This is an Ultra66 based chipset.
727 The nice thing about it is that you can mix Ultra/DMA/PIO devices 720 The nice thing about it is that you can mix Ultra/DMA/PIO devices
728 and it will handle timing cycles. Since this is an improved 721 and it will handle timing cycles. Since this is an improved
@@ -1060,7 +1053,7 @@ config IDEDMA_IVB
1060 in that mode with an 80c ribbon. 1053 in that mode with an 80c ribbon.
1061 1054
1062 If you are experiencing compatibility or performance problems, you 1055 If you are experiencing compatibility or performance problems, you
1063 MAY try to answering Y here. However, it does not necessarily solve 1056 MAY try to answer Y here. However, it does not necessarily solve
1064 any of your problems, it could even cause more of them. 1057 any of your problems, it could even cause more of them.
1065 1058
1066 It is normally safe to answer Y; however, the default is N. 1059 It is normally safe to answer Y; however, the default is N.
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index ca25f9e3d0f4..6c60a9d2afd8 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive)
776 ide_id_has_flush_cache_ext(id)); 776 ide_id_has_flush_cache_ext(id));
777 777
778 printk(KERN_INFO "%s: cache flushes %ssupported\n", 778 printk(KERN_INFO "%s: cache flushes %ssupported\n",
779 drive->name, barrier ? "" : "not"); 779 drive->name, barrier ? "" : "not ");
780 780
781 if (barrier) { 781 if (barrier) {
782 ordered = QUEUE_ORDERED_DRAIN_FLUSH; 782 ordered = QUEUE_ORDERED_DRAIN_FLUSH;
@@ -889,11 +889,7 @@ static void idedisk_setup (ide_drive_t *drive)
889 if (drive->id_read == 0) 889 if (drive->id_read == 0)
890 return; 890 return;
891 891
892 /* 892 if (drive->removable) {
893 * CompactFlash cards and their brethern look just like hard drives
894 * to us, but they are removable and don't have a doorlock mechanism.
895 */
896 if (drive->removable && !(drive->is_flash)) {
897 /* 893 /*
898 * Removable disks (eg. SYQUEST); ignore 'WD' drives 894 * Removable disks (eg. SYQUEST); ignore 'WD' drives
899 */ 895 */
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8d50df4526a4..c01615dec202 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -55,8 +55,8 @@
55#include <asm/io.h> 55#include <asm/io.h>
56#include <asm/bitops.h> 56#include <asm/bitops.h>
57 57
58int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, 58static int __ide_end_request(ide_drive_t *drive, struct request *rq,
59 int nr_sectors) 59 int uptodate, int nr_sectors)
60{ 60{
61 int ret = 1; 61 int ret = 1;
62 62
@@ -91,7 +91,6 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
91 91
92 return ret; 92 return ret;
93} 93}
94EXPORT_SYMBOL(__ide_end_request);
95 94
96/** 95/**
97 * ide_end_request - complete an IDE I/O 96 * ide_end_request - complete an IDE I/O
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index af7af958ab3e..b72dde70840a 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -1243,6 +1243,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1243 */ 1243 */
1244 if (stat == 0xff) 1244 if (stat == 0xff)
1245 return -ENODEV; 1245 return -ENODEV;
1246 touch_softlockup_watchdog();
1246 } 1247 }
1247 return -EBUSY; 1248 return -EBUSY;
1248} 1249}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index e7425546b4b1..427d1c204174 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -125,45 +125,6 @@ static void ide_disk_init_mult_count(ide_drive_t *drive)
125} 125}
126 126
127/** 127/**
128 * drive_is_flashcard - check for compact flash
129 * @drive: drive to check
130 *
131 * CompactFlash cards and their brethern pretend to be removable
132 * hard disks, except:
133 * (1) they never have a slave unit, and
134 * (2) they don't have doorlock mechanisms.
135 * This test catches them, and is invoked elsewhere when setting
136 * appropriate config bits.
137 *
138 * FIXME: This treatment is probably applicable for *all* PCMCIA (PC CARD)
139 * devices, so in linux 2.3.x we should change this to just treat all
140 * PCMCIA drives this way, and get rid of the model-name tests below
141 * (too big of an interface change for 2.4.x).
142 * At that time, we might also consider parameterizing the timeouts and
143 * retries, since these are MUCH faster than mechanical drives. -M.Lord
144 */
145
146static inline int drive_is_flashcard (ide_drive_t *drive)
147{
148 struct hd_driveid *id = drive->id;
149
150 if (drive->removable) {
151 if (id->config == 0x848a) return 1; /* CompactFlash */
152 if (!strncmp(id->model, "KODAK ATA_FLASH", 15) /* Kodak */
153 || !strncmp(id->model, "Hitachi CV", 10) /* Hitachi */
154 || !strncmp(id->model, "SunDisk SDCFB", 13) /* old SanDisk */
155 || !strncmp(id->model, "SanDisk SDCFB", 13) /* SanDisk */
156 || !strncmp(id->model, "HAGIWARA HPC", 12) /* Hagiwara */
157 || !strncmp(id->model, "LEXAR ATA_FLASH", 15) /* Lexar */
158 || !strncmp(id->model, "ATA_FLASH", 9)) /* Simple Tech */
159 {
160 return 1; /* yes, it is a flash memory card */
161 }
162 }
163 return 0; /* no, it is not a flash memory card */
164}
165
166/**
167 * do_identify - identify a drive 128 * do_identify - identify a drive
168 * @drive: drive to identify 129 * @drive: drive to identify
169 * @cmd: command used 130 * @cmd: command used
@@ -278,13 +239,17 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
278 /* 239 /*
279 * Not an ATAPI device: looks like a "regular" hard disk 240 * Not an ATAPI device: looks like a "regular" hard disk
280 */ 241 */
281 if (id->config & (1<<7)) 242
243 /*
244 * 0x848a = CompactFlash device
245 * These are *not* removable in Linux definition of the term
246 */
247
248 if ((id->config != 0x848a) && (id->config & (1<<7)))
282 drive->removable = 1; 249 drive->removable = 1;
283 250
284 if (drive_is_flashcard(drive))
285 drive->is_flash = 1;
286 drive->media = ide_disk; 251 drive->media = ide_disk;
287 printk("%s DISK drive\n", (drive->is_flash) ? "CFA" : "ATA" ); 252 printk("%s DISK drive\n", (id->config == 0x848a) ? "CFA" : "ATA" );
288 QUIRK_LIST(drive); 253 QUIRK_LIST(drive);
289 return; 254 return;
290 255
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index afeb02bbb722..b2cc43702f65 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -242,7 +242,6 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
242 drive->name[2] = 'a' + (index * MAX_DRIVES) + unit; 242 drive->name[2] = 'a' + (index * MAX_DRIVES) + unit;
243 drive->max_failures = IDE_DEFAULT_MAX_FAILURES; 243 drive->max_failures = IDE_DEFAULT_MAX_FAILURES;
244 drive->using_dma = 0; 244 drive->using_dma = 0;
245 drive->is_flash = 0;
246 drive->vdma = 0; 245 drive->vdma = 0;
247 INIT_LIST_HEAD(&drive->list); 246 INIT_LIST_HEAD(&drive->list);
248 init_completion(&drive->gendev_rel_comp); 247 init_completion(&drive->gendev_rel_comp);
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index a21b1e11eef4..c743e68c33aa 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -262,6 +262,21 @@ static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const ch
262 else 262 else
263 pci_set_drvdata(dev, (void *) aec6xxx_34_base); 263 pci_set_drvdata(dev, (void *) aec6xxx_34_base);
264 264
265 /* These are necessary to get AEC6280 Macintosh cards to work */
266 if ((dev->device == PCI_DEVICE_ID_ARTOP_ATP865) ||
267 (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R)) {
268 u8 reg49h = 0, reg4ah = 0;
269 /* Clear reset and test bits. */
270 pci_read_config_byte(dev, 0x49, &reg49h);
271 pci_write_config_byte(dev, 0x49, reg49h & ~0x30);
272 /* Enable chip interrupt output. */
273 pci_read_config_byte(dev, 0x4a, &reg4ah);
274 pci_write_config_byte(dev, 0x4a, reg4ah & ~0x01);
275 /* Enable burst mode. */
276 pci_read_config_byte(dev, 0x4a, &reg4ah);
277 pci_write_config_byte(dev, 0x4a, reg4ah | 0x80);
278 }
279
265 return dev->irq; 280 return dev->irq;
266} 281}
267 282
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 7b589d948bf9..940bdd4c5784 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -1288,6 +1288,10 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1288 goto init_hpt37X_done; 1288 goto init_hpt37X_done;
1289 } 1289 }
1290 } 1290 }
1291 if (!pci_get_drvdata(dev)) {
1292 printk("No Clock Stabilization!!!\n");
1293 return;
1294 }
1291pll_recal: 1295pll_recal:
1292 if (adjust & 1) 1296 if (adjust & 1)
1293 pll -= (adjust >> 1); 1297 pll -= (adjust >> 1);
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 108fda83fea4..38f41b377ff6 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -733,7 +733,7 @@ static void __devinit it8212_disable_raid(struct pci_dev *dev)
733 733
734 pci_write_config_dword(dev,0x4C, 0x02040204); 734 pci_write_config_dword(dev,0x4C, 0x02040204);
735 pci_write_config_byte(dev, 0x42, 0x36); 735 pci_write_config_byte(dev, 0x42, 0x36);
736 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0); 736 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
737} 737}
738 738
739static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const char *name) 739static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const char *name)
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index fe06ebb0e5bf..acd63173199b 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -420,9 +420,6 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
420 .init_hwif = init_hwif_pdc202new, 420 .init_hwif = init_hwif_pdc202new,
421 .channels = 2, 421 .channels = 2,
422 .autodma = AUTODMA, 422 .autodma = AUTODMA,
423#ifndef CONFIG_PDC202XX_FORCE
424 .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
425#endif
426 .bootable = OFF_BOARD, 423 .bootable = OFF_BOARD,
427 },{ /* 3 */ 424 },{ /* 3 */
428 .name = "PDC20271", 425 .name = "PDC20271",
@@ -447,9 +444,6 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
447 .init_hwif = init_hwif_pdc202new, 444 .init_hwif = init_hwif_pdc202new,
448 .channels = 2, 445 .channels = 2,
449 .autodma = AUTODMA, 446 .autodma = AUTODMA,
450#ifndef CONFIG_PDC202XX_FORCE
451 .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
452#endif
453 .bootable = OFF_BOARD, 447 .bootable = OFF_BOARD,
454 },{ /* 6 */ 448 },{ /* 6 */
455 .name = "PDC20277", 449 .name = "PDC20277",
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index ad9d95817f95..6f8f8645b02c 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -786,9 +786,6 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
786 .init_dma = init_dma_pdc202xx, 786 .init_dma = init_dma_pdc202xx,
787 .channels = 2, 787 .channels = 2,
788 .autodma = AUTODMA, 788 .autodma = AUTODMA,
789#ifndef CONFIG_PDC202XX_FORCE
790 .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
791#endif
792 .bootable = OFF_BOARD, 789 .bootable = OFF_BOARD,
793 .extra = 16, 790 .extra = 16,
794 },{ /* 1 */ 791 },{ /* 1 */
@@ -799,9 +796,6 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
799 .init_dma = init_dma_pdc202xx, 796 .init_dma = init_dma_pdc202xx,
800 .channels = 2, 797 .channels = 2,
801 .autodma = AUTODMA, 798 .autodma = AUTODMA,
802#ifndef CONFIG_PDC202XX_FORCE
803 .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
804#endif
805 .bootable = OFF_BOARD, 799 .bootable = OFF_BOARD,
806 .extra = 48, 800 .extra = 48,
807 .flags = IDEPCI_FLAG_FORCE_PDC, 801 .flags = IDEPCI_FLAG_FORCE_PDC,
@@ -813,9 +807,6 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
813 .init_dma = init_dma_pdc202xx, 807 .init_dma = init_dma_pdc202xx,
814 .channels = 2, 808 .channels = 2,
815 .autodma = AUTODMA, 809 .autodma = AUTODMA,
816#ifndef CONFIG_PDC202XX_FORCE
817 .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
818#endif
819 .bootable = OFF_BOARD, 810 .bootable = OFF_BOARD,
820 .extra = 48, 811 .extra = 48,
821 },{ /* 3 */ 812 },{ /* 3 */
@@ -826,9 +817,6 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
826 .init_dma = init_dma_pdc202xx, 817 .init_dma = init_dma_pdc202xx,
827 .channels = 2, 818 .channels = 2,
828 .autodma = AUTODMA, 819 .autodma = AUTODMA,
829#ifndef CONFIG_PDC202XX_FORCE
830 .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
831#endif
832 .bootable = OFF_BOARD, 820 .bootable = OFF_BOARD,
833 .extra = 48, 821 .extra = 48,
834 .flags = IDEPCI_FLAG_FORCE_PDC, 822 .flags = IDEPCI_FLAG_FORCE_PDC,
@@ -840,9 +828,6 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
840 .init_dma = init_dma_pdc202xx, 828 .init_dma = init_dma_pdc202xx,
841 .channels = 2, 829 .channels = 2,
842 .autodma = AUTODMA, 830 .autodma = AUTODMA,
843#ifndef CONFIG_PDC202XX_FORCE
844 .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
845#endif
846 .bootable = OFF_BOARD, 831 .bootable = OFF_BOARD,
847 .extra = 48, 832 .extra = 48,
848 } 833 }
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index b3e77df63cef..e9b83e1a3028 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -135,6 +135,7 @@ static u8 piix_ratemask (ide_drive_t *drive)
135 case PCI_DEVICE_ID_INTEL_ICH6_19: 135 case PCI_DEVICE_ID_INTEL_ICH6_19:
136 case PCI_DEVICE_ID_INTEL_ICH7_21: 136 case PCI_DEVICE_ID_INTEL_ICH7_21:
137 case PCI_DEVICE_ID_INTEL_ESB2_18: 137 case PCI_DEVICE_ID_INTEL_ESB2_18:
138 case PCI_DEVICE_ID_INTEL_ICH8_6:
138 mode = 3; 139 mode = 3;
139 break; 140 break;
140 /* UDMA 66 capable */ 141 /* UDMA 66 capable */
@@ -449,6 +450,7 @@ static unsigned int __devinit init_chipset_piix (struct pci_dev *dev, const char
449 case PCI_DEVICE_ID_INTEL_ICH6_19: 450 case PCI_DEVICE_ID_INTEL_ICH6_19:
450 case PCI_DEVICE_ID_INTEL_ICH7_21: 451 case PCI_DEVICE_ID_INTEL_ICH7_21:
451 case PCI_DEVICE_ID_INTEL_ESB2_18: 452 case PCI_DEVICE_ID_INTEL_ESB2_18:
453 case PCI_DEVICE_ID_INTEL_ICH8_6:
452 { 454 {
453 unsigned int extra = 0; 455 unsigned int extra = 0;
454 pci_read_config_dword(dev, 0x54, &extra); 456 pci_read_config_dword(dev, 0x54, &extra);
@@ -575,6 +577,7 @@ static ide_pci_device_t piix_pci_info[] __devinitdata = {
575 /* 21 */ DECLARE_PIIX_DEV("ICH7"), 577 /* 21 */ DECLARE_PIIX_DEV("ICH7"),
576 /* 22 */ DECLARE_PIIX_DEV("ICH4"), 578 /* 22 */ DECLARE_PIIX_DEV("ICH4"),
577 /* 23 */ DECLARE_PIIX_DEV("ESB2"), 579 /* 23 */ DECLARE_PIIX_DEV("ESB2"),
580 /* 24 */ DECLARE_PIIX_DEV("ICH8M"),
578}; 581};
579 582
580/** 583/**
@@ -651,6 +654,7 @@ static struct pci_device_id piix_pci_tbl[] = {
651 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 21}, 654 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 21},
652 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 22}, 655 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 22},
653 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 23}, 656 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 23},
657 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 24},
654 { 0, }, 658 { 0, },
655}; 659};
656MODULE_DEVICE_TABLE(pci, piix_pci_tbl); 660MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 1b85ce166af8..11fe537e2f6f 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -216,7 +216,7 @@ struct Layer1 {
216#define GROUP_TEI 127 216#define GROUP_TEI 127
217#define TEI_SAPI 63 217#define TEI_SAPI 63
218#define CTRL_SAPI 0 218#define CTRL_SAPI 0
219#define PACKET_NOACK 250 219#define PACKET_NOACK 7
220 220
221/* Layer2 Flags */ 221/* Layer2 Flags */
222 222
diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c
index 3314a5a19854..94c9afb7017c 100644
--- a/drivers/isdn/sc/ioctl.c
+++ b/drivers/isdn/sc/ioctl.c
@@ -71,14 +71,14 @@ int sc_ioctl(int card, scs_ioctl *data)
71 /* 71 /*
72 * Get the SRec from user space 72 * Get the SRec from user space
73 */ 73 */
74 if (copy_from_user(srec, data->dataptr, sizeof(srec))) { 74 if (copy_from_user(srec, data->dataptr, SCIOC_SRECSIZE)) {
75 kfree(rcvmsg); 75 kfree(rcvmsg);
76 kfree(srec); 76 kfree(srec);
77 return -EFAULT; 77 return -EFAULT;
78 } 78 }
79 79
80 status = send_and_receive(card, CMPID, cmReqType2, cmReqClass0, cmReqLoadProc, 80 status = send_and_receive(card, CMPID, cmReqType2, cmReqClass0, cmReqLoadProc,
81 0, sizeof(srec), srec, rcvmsg, SAR_TIMEOUT); 81 0, SCIOC_SRECSIZE, srec, rcvmsg, SAR_TIMEOUT);
82 kfree(rcvmsg); 82 kfree(rcvmsg);
83 kfree(srec); 83 kfree(srec);
84 84
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 74039db846ba..d73779a42417 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -545,7 +545,8 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region)
545 return 0; 545 return 0;
546 546
547 do { 547 do {
548 *region = find_next_zero_bit((unsigned long *) lc->sync_bits, 548 *region = ext2_find_next_zero_bit(
549 (unsigned long *) lc->sync_bits,
549 lc->region_count, 550 lc->region_count,
550 lc->sync_search); 551 lc->sync_search);
551 lc->sync_search = *region + 1; 552 lc->sync_search = *region + 1;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 7145cd150f7b..d05e3125d298 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1024,7 +1024,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1024 rdev-> sb_size = (rdev->sb_size | bmask)+1; 1024 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1025 1025
1026 if (refdev == 0) 1026 if (refdev == 0)
1027 return 1; 1027 ret = 1;
1028 else { 1028 else {
1029 __u64 ev1, ev2; 1029 __u64 ev1, ev2;
1030 struct mdp_superblock_1 *refsb = 1030 struct mdp_superblock_1 *refsb =
@@ -1044,7 +1044,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1044 ev2 = le64_to_cpu(refsb->events); 1044 ev2 = le64_to_cpu(refsb->events);
1045 1045
1046 if (ev1 > ev2) 1046 if (ev1 > ev2)
1047 return 1; 1047 ret = 1;
1048 else
1049 ret = 0;
1048 } 1050 }
1049 if (minor_version) 1051 if (minor_version)
1050 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; 1052 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
@@ -1058,7 +1060,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1058 1060
1059 if (le32_to_cpu(sb->size) > rdev->size*2) 1061 if (le32_to_cpu(sb->size) > rdev->size*2)
1060 return -EINVAL; 1062 return -EINVAL;
1061 return 0; 1063 return ret;
1062} 1064}
1063 1065
1064static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1066static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
@@ -1081,7 +1083,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1081 mddev->size = le64_to_cpu(sb->size)/2; 1083 mddev->size = le64_to_cpu(sb->size)/2;
1082 mddev->events = le64_to_cpu(sb->events); 1084 mddev->events = le64_to_cpu(sb->events);
1083 mddev->bitmap_offset = 0; 1085 mddev->bitmap_offset = 0;
1084 mddev->default_bitmap_offset = 1024; 1086 mddev->default_bitmap_offset = 1024 >> 9;
1085 1087
1086 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1088 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1087 memcpy(mddev->uuid, sb->set_uuid, 16); 1089 memcpy(mddev->uuid, sb->set_uuid, 16);
@@ -1161,6 +1163,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1161 1163
1162 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors); 1164 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
1163 1165
1166 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1167 sb->size = cpu_to_le64(mddev->size<<1);
1168
1164 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1169 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1165 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1170 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1166 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1171 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -2686,14 +2691,6 @@ static int do_md_stop(mddev_t * mddev, int ro)
2686 set_disk_ro(disk, 1); 2691 set_disk_ro(disk, 1);
2687 } 2692 }
2688 2693
2689 bitmap_destroy(mddev);
2690 if (mddev->bitmap_file) {
2691 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
2692 fput(mddev->bitmap_file);
2693 mddev->bitmap_file = NULL;
2694 }
2695 mddev->bitmap_offset = 0;
2696
2697 /* 2694 /*
2698 * Free resources if final stop 2695 * Free resources if final stop
2699 */ 2696 */
@@ -2703,6 +2700,14 @@ static int do_md_stop(mddev_t * mddev, int ro)
2703 struct gendisk *disk; 2700 struct gendisk *disk;
2704 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 2701 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
2705 2702
2703 bitmap_destroy(mddev);
2704 if (mddev->bitmap_file) {
2705 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
2706 fput(mddev->bitmap_file);
2707 mddev->bitmap_file = NULL;
2708 }
2709 mddev->bitmap_offset = 0;
2710
2706 ITERATE_RDEV(mddev,rdev,tmp) 2711 ITERATE_RDEV(mddev,rdev,tmp)
2707 if (rdev->raid_disk >= 0) { 2712 if (rdev->raid_disk >= 0) {
2708 char nm[20]; 2713 char nm[20];
@@ -2939,6 +2944,8 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
2939 info.ctime = mddev->ctime; 2944 info.ctime = mddev->ctime;
2940 info.level = mddev->level; 2945 info.level = mddev->level;
2941 info.size = mddev->size; 2946 info.size = mddev->size;
2947 if (info.size != mddev->size) /* overflow */
2948 info.size = -1;
2942 info.nr_disks = nr; 2949 info.nr_disks = nr;
2943 info.raid_disks = mddev->raid_disks; 2950 info.raid_disks = mddev->raid_disks;
2944 info.md_minor = mddev->md_minor; 2951 info.md_minor = mddev->md_minor;
@@ -3465,7 +3472,7 @@ static int update_size(mddev_t *mddev, unsigned long size)
3465 bdev = bdget_disk(mddev->gendisk, 0); 3472 bdev = bdget_disk(mddev->gendisk, 0);
3466 if (bdev) { 3473 if (bdev) {
3467 mutex_lock(&bdev->bd_inode->i_mutex); 3474 mutex_lock(&bdev->bd_inode->i_mutex);
3468 i_size_write(bdev->bd_inode, mddev->array_size << 10); 3475 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
3469 mutex_unlock(&bdev->bd_inode->i_mutex); 3476 mutex_unlock(&bdev->bd_inode->i_mutex);
3470 bdput(bdev); 3477 bdput(bdev);
3471 } 3478 }
@@ -3485,17 +3492,6 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
3485 if (mddev->sync_thread) 3492 if (mddev->sync_thread)
3486 return -EBUSY; 3493 return -EBUSY;
3487 rv = mddev->pers->reshape(mddev, raid_disks); 3494 rv = mddev->pers->reshape(mddev, raid_disks);
3488 if (!rv) {
3489 struct block_device *bdev;
3490
3491 bdev = bdget_disk(mddev->gendisk, 0);
3492 if (bdev) {
3493 mutex_lock(&bdev->bd_inode->i_mutex);
3494 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3495 mutex_unlock(&bdev->bd_inode->i_mutex);
3496 bdput(bdev);
3497 }
3498 }
3499 return rv; 3495 return rv;
3500} 3496}
3501 3497
@@ -3531,7 +3527,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
3531 ) 3527 )
3532 return -EINVAL; 3528 return -EINVAL;
3533 /* Check there is only one change */ 3529 /* Check there is only one change */
3534 if (mddev->size != info->size) cnt++; 3530 if (info->size >= 0 && mddev->size != info->size) cnt++;
3535 if (mddev->raid_disks != info->raid_disks) cnt++; 3531 if (mddev->raid_disks != info->raid_disks) cnt++;
3536 if (mddev->layout != info->layout) cnt++; 3532 if (mddev->layout != info->layout) cnt++;
3537 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; 3533 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
@@ -3548,7 +3544,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
3548 else 3544 else
3549 return mddev->pers->reconfig(mddev, info->layout, -1); 3545 return mddev->pers->reconfig(mddev, info->layout, -1);
3550 } 3546 }
3551 if (mddev->size != info->size) 3547 if (info->size >= 0 && mddev->size != info->size)
3552 rv = update_size(mddev, info->size); 3548 rv = update_size(mddev, info->size);
3553 3549
3554 if (mddev->raid_disks != info->raid_disks) 3550 if (mddev->raid_disks != info->raid_disks)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index d03f99cf4b7d..678f4dbbea1d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -372,7 +372,7 @@ out_free_conf:
372 kfree(conf); 372 kfree(conf);
373 mddev->private = NULL; 373 mddev->private = NULL;
374out: 374out:
375 return 1; 375 return -ENOMEM;
376} 376}
377 377
378static int raid0_stop (mddev_t *mddev) 378static int raid0_stop (mddev_t *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9130d051b474..ab90a6d12020 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -565,6 +565,8 @@ rb_out:
565 565
566 if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL) 566 if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
567 atomic_inc(&conf->mirrors[disk].rdev->nr_pending); 567 atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
568 else
569 disk = -1;
568 rcu_read_unlock(); 570 rcu_read_unlock();
569 571
570 return disk; 572 return disk;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 25976bfb6f9c..2dba305daf3c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -350,7 +350,8 @@ static void shrink_stripes(raid5_conf_t *conf)
350 while (drop_one_stripe(conf)) 350 while (drop_one_stripe(conf))
351 ; 351 ;
352 352
353 kmem_cache_destroy(conf->slab_cache); 353 if (conf->slab_cache)
354 kmem_cache_destroy(conf->slab_cache);
354 conf->slab_cache = NULL; 355 conf->slab_cache = NULL;
355} 356}
356 357
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index f618a53b98be..cd477ebf2ee4 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -115,7 +115,7 @@ static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
115 list_add_tail(&sh->lru, &conf->inactive_list); 115 list_add_tail(&sh->lru, &conf->inactive_list);
116 atomic_dec(&conf->active_stripes); 116 atomic_dec(&conf->active_stripes);
117 if (!conf->inactive_blocked || 117 if (!conf->inactive_blocked ||
118 atomic_read(&conf->active_stripes) < (NR_STRIPES*3/4)) 118 atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
119 wake_up(&conf->wait_for_stripe); 119 wake_up(&conf->wait_for_stripe);
120 } 120 }
121 } 121 }
@@ -273,7 +273,8 @@ static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector
273 conf->inactive_blocked = 1; 273 conf->inactive_blocked = 1;
274 wait_event_lock_irq(conf->wait_for_stripe, 274 wait_event_lock_irq(conf->wait_for_stripe,
275 !list_empty(&conf->inactive_list) && 275 !list_empty(&conf->inactive_list) &&
276 (atomic_read(&conf->active_stripes) < (NR_STRIPES *3/4) 276 (atomic_read(&conf->active_stripes)
277 < (conf->max_nr_stripes *3/4)
277 || !conf->inactive_blocked), 278 || !conf->inactive_blocked),
278 conf->device_lock, 279 conf->device_lock,
279 unplug_slaves(conf->mddev); 280 unplug_slaves(conf->mddev);
@@ -302,9 +303,31 @@ static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector
302 return sh; 303 return sh;
303} 304}
304 305
305static int grow_stripes(raid6_conf_t *conf, int num) 306static int grow_one_stripe(raid6_conf_t *conf)
306{ 307{
307 struct stripe_head *sh; 308 struct stripe_head *sh;
309 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
310 if (!sh)
311 return 0;
312 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
313 sh->raid_conf = conf;
314 spin_lock_init(&sh->lock);
315
316 if (grow_buffers(sh, conf->raid_disks)) {
317 shrink_buffers(sh, conf->raid_disks);
318 kmem_cache_free(conf->slab_cache, sh);
319 return 0;
320 }
321 /* we just created an active stripe so... */
322 atomic_set(&sh->count, 1);
323 atomic_inc(&conf->active_stripes);
324 INIT_LIST_HEAD(&sh->lru);
325 release_stripe(sh);
326 return 1;
327}
328
329static int grow_stripes(raid6_conf_t *conf, int num)
330{
308 kmem_cache_t *sc; 331 kmem_cache_t *sc;
309 int devs = conf->raid_disks; 332 int devs = conf->raid_disks;
310 333
@@ -316,45 +339,35 @@ static int grow_stripes(raid6_conf_t *conf, int num)
316 if (!sc) 339 if (!sc)
317 return 1; 340 return 1;
318 conf->slab_cache = sc; 341 conf->slab_cache = sc;
319 while (num--) { 342 while (num--)
320 sh = kmem_cache_alloc(sc, GFP_KERNEL); 343 if (!grow_one_stripe(conf))
321 if (!sh)
322 return 1;
323 memset(sh, 0, sizeof(*sh) + (devs-1)*sizeof(struct r5dev));
324 sh->raid_conf = conf;
325 spin_lock_init(&sh->lock);
326
327 if (grow_buffers(sh, conf->raid_disks)) {
328 shrink_buffers(sh, conf->raid_disks);
329 kmem_cache_free(sc, sh);
330 return 1; 344 return 1;
331 }
332 /* we just created an active stripe so... */
333 atomic_set(&sh->count, 1);
334 atomic_inc(&conf->active_stripes);
335 INIT_LIST_HEAD(&sh->lru);
336 release_stripe(sh);
337 }
338 return 0; 345 return 0;
339} 346}
340 347
341static void shrink_stripes(raid6_conf_t *conf) 348static int drop_one_stripe(raid6_conf_t *conf)
342{ 349{
343 struct stripe_head *sh; 350 struct stripe_head *sh;
351 spin_lock_irq(&conf->device_lock);
352 sh = get_free_stripe(conf);
353 spin_unlock_irq(&conf->device_lock);
354 if (!sh)
355 return 0;
356 if (atomic_read(&sh->count))
357 BUG();
358 shrink_buffers(sh, conf->raid_disks);
359 kmem_cache_free(conf->slab_cache, sh);
360 atomic_dec(&conf->active_stripes);
361 return 1;
362}
344 363
345 while (1) { 364static void shrink_stripes(raid6_conf_t *conf)
346 spin_lock_irq(&conf->device_lock); 365{
347 sh = get_free_stripe(conf); 366 while (drop_one_stripe(conf))
348 spin_unlock_irq(&conf->device_lock); 367 ;
349 if (!sh) 368
350 break; 369 if (conf->slab_cache)
351 if (atomic_read(&sh->count)) 370 kmem_cache_destroy(conf->slab_cache);
352 BUG();
353 shrink_buffers(sh, conf->raid_disks);
354 kmem_cache_free(conf->slab_cache, sh);
355 atomic_dec(&conf->active_stripes);
356 }
357 kmem_cache_destroy(conf->slab_cache);
358 conf->slab_cache = NULL; 371 conf->slab_cache = NULL;
359} 372}
360 373
@@ -1912,6 +1925,74 @@ static void raid6d (mddev_t *mddev)
1912 PRINTK("--- raid6d inactive\n"); 1925 PRINTK("--- raid6d inactive\n");
1913} 1926}
1914 1927
1928static ssize_t
1929raid6_show_stripe_cache_size(mddev_t *mddev, char *page)
1930{
1931 raid6_conf_t *conf = mddev_to_conf(mddev);
1932 if (conf)
1933 return sprintf(page, "%d\n", conf->max_nr_stripes);
1934 else
1935 return 0;
1936}
1937
1938static ssize_t
1939raid6_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
1940{
1941 raid6_conf_t *conf = mddev_to_conf(mddev);
1942 char *end;
1943 int new;
1944 if (len >= PAGE_SIZE)
1945 return -EINVAL;
1946 if (!conf)
1947 return -ENODEV;
1948
1949 new = simple_strtoul(page, &end, 10);
1950 if (!*page || (*end && *end != '\n') )
1951 return -EINVAL;
1952 if (new <= 16 || new > 32768)
1953 return -EINVAL;
1954 while (new < conf->max_nr_stripes) {
1955 if (drop_one_stripe(conf))
1956 conf->max_nr_stripes--;
1957 else
1958 break;
1959 }
1960 while (new > conf->max_nr_stripes) {
1961 if (grow_one_stripe(conf))
1962 conf->max_nr_stripes++;
1963 else break;
1964 }
1965 return len;
1966}
1967
1968static struct md_sysfs_entry
1969raid6_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
1970 raid6_show_stripe_cache_size,
1971 raid6_store_stripe_cache_size);
1972
1973static ssize_t
1974stripe_cache_active_show(mddev_t *mddev, char *page)
1975{
1976 raid6_conf_t *conf = mddev_to_conf(mddev);
1977 if (conf)
1978 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
1979 else
1980 return 0;
1981}
1982
1983static struct md_sysfs_entry
1984raid6_stripecache_active = __ATTR_RO(stripe_cache_active);
1985
1986static struct attribute *raid6_attrs[] = {
1987 &raid6_stripecache_size.attr,
1988 &raid6_stripecache_active.attr,
1989 NULL,
1990};
1991static struct attribute_group raid6_attrs_group = {
1992 .name = NULL,
1993 .attrs = raid6_attrs,
1994};
1995
1915static int run(mddev_t *mddev) 1996static int run(mddev_t *mddev)
1916{ 1997{
1917 raid6_conf_t *conf; 1998 raid6_conf_t *conf;
@@ -2095,6 +2176,7 @@ static int stop (mddev_t *mddev)
2095 shrink_stripes(conf); 2176 shrink_stripes(conf);
2096 kfree(conf->stripe_hashtbl); 2177 kfree(conf->stripe_hashtbl);
2097 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2178 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2179 sysfs_remove_group(&mddev->kobj, &raid6_attrs_group);
2098 kfree(conf); 2180 kfree(conf);
2099 mddev->private = NULL; 2181 mddev->private = NULL;
2100 return 0; 2182 return 0;
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index 90628562851e..184974cc734d 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -60,4 +60,7 @@ extern void i2o_iop_remove(struct i2o_controller *);
60#define I2O_IN_PORT 0x40 60#define I2O_IN_PORT 0x40
61#define I2O_OUT_PORT 0x44 61#define I2O_OUT_PORT 0x44
62 62
63/* Motorola/Freescale specific register offset */
64#define I2O_MOTOROLA_PORT_OFFSET 0x10400
65
63#define I2O_IRQ_OUTBOUND_POST 0x00000008 66#define I2O_IRQ_OUTBOUND_POST 0x00000008
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index f9e5a23697a1..c08ddac3717d 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -732,7 +732,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
732 cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid); 732 cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
733 msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt)); 733 msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
734 734
735 if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT)) 735 if (!i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
736 status = SUCCESS; 736 status = SUCCESS;
737 737
738 return status; 738 return status;
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index d698d7709c31..4f1515cae5dc 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -88,6 +88,11 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c)
88 struct device *dev = &pdev->dev; 88 struct device *dev = &pdev->dev;
89 int i; 89 int i;
90 90
91 if (pci_request_regions(pdev, OSM_DESCRIPTION)) {
92 printk(KERN_ERR "%s: device already claimed\n", c->name);
93 return -ENODEV;
94 }
95
91 for (i = 0; i < 6; i++) { 96 for (i = 0; i < 6; i++) {
92 /* Skip I/O spaces */ 97 /* Skip I/O spaces */
93 if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) { 98 if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
@@ -163,6 +168,24 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c)
163 c->in_port = c->base.virt + I2O_IN_PORT; 168 c->in_port = c->base.virt + I2O_IN_PORT;
164 c->out_port = c->base.virt + I2O_OUT_PORT; 169 c->out_port = c->base.virt + I2O_OUT_PORT;
165 170
171 /* Motorola/Freescale chip does not follow spec */
172 if (pdev->vendor == PCI_VENDOR_ID_MOTOROLA && pdev->device == 0x18c0) {
173 /* Check if CPU is enabled */
174 if (be32_to_cpu(readl(c->base.virt + 0x10000)) & 0x10000000) {
175 printk(KERN_INFO "%s: MPC82XX needs CPU running to "
176 "service I2O.\n", c->name);
177 i2o_pci_free(c);
178 return -ENODEV;
179 } else {
180 c->irq_status += I2O_MOTOROLA_PORT_OFFSET;
181 c->irq_mask += I2O_MOTOROLA_PORT_OFFSET;
182 c->in_port += I2O_MOTOROLA_PORT_OFFSET;
183 c->out_port += I2O_MOTOROLA_PORT_OFFSET;
184 printk(KERN_INFO "%s: MPC82XX workarounds activated.\n",
185 c->name);
186 }
187 }
188
166 if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) { 189 if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) {
167 i2o_pci_free(c); 190 i2o_pci_free(c);
168 return -ENOMEM; 191 return -ENOMEM;
@@ -298,7 +321,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
298 struct i2o_controller *c; 321 struct i2o_controller *c;
299 int rc; 322 int rc;
300 struct pci_dev *i960 = NULL; 323 struct pci_dev *i960 = NULL;
301 int pci_dev_busy = 0; 324 int enabled = pdev->is_enabled;
302 325
303 printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n"); 326 printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
304 327
@@ -308,16 +331,12 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
308 return -ENODEV; 331 return -ENODEV;
309 } 332 }
310 333
311 if ((rc = pci_enable_device(pdev))) { 334 if (!enabled)
312 printk(KERN_WARNING "i2o: couldn't enable device %s\n", 335 if ((rc = pci_enable_device(pdev))) {
313 pci_name(pdev)); 336 printk(KERN_WARNING "i2o: couldn't enable device %s\n",
314 return rc; 337 pci_name(pdev));
315 } 338 return rc;
316 339 }
317 if (pci_request_regions(pdev, OSM_DESCRIPTION)) {
318 printk(KERN_ERR "i2o: device already claimed\n");
319 return -ENODEV;
320 }
321 340
322 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 341 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
323 printk(KERN_WARNING "i2o: no suitable DMA found for %s\n", 342 printk(KERN_WARNING "i2o: no suitable DMA found for %s\n",
@@ -395,9 +414,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
395 414
396 if ((rc = i2o_pci_alloc(c))) { 415 if ((rc = i2o_pci_alloc(c))) {
397 printk(KERN_ERR "%s: DMA / IO allocation for I2O controller " 416 printk(KERN_ERR "%s: DMA / IO allocation for I2O controller "
398 " failed\n", c->name); 417 "failed\n", c->name);
399 if (rc == -ENODEV)
400 pci_dev_busy = 1;
401 goto free_controller; 418 goto free_controller;
402 } 419 }
403 420
@@ -425,7 +442,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
425 i2o_iop_free(c); 442 i2o_iop_free(c);
426 443
427 disable: 444 disable:
428 if (!pci_dev_busy) 445 if (!enabled)
429 pci_disable_device(pdev); 446 pci_disable_device(pdev);
430 447
431 return rc; 448 return rc;
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index aaf04638054e..227c39a7c1b4 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -194,7 +194,7 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
194 194
195 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); 195 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
196 196
197 switch(cmd->flags) { 197 switch (mmc_rsp_type(cmd->flags)) {
198 case MMC_RSP_R1: 198 case MMC_RSP_R1:
199 mmccmd |= SD_CMD_RT_1; 199 mmccmd |= SD_CMD_RT_1;
200 break; 200 break;
@@ -483,34 +483,35 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
483 cmd = mrq->cmd; 483 cmd = mrq->cmd;
484 cmd->error = MMC_ERR_NONE; 484 cmd->error = MMC_ERR_NONE;
485 485
486 if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_SHORT) { 486 if (cmd->flags & MMC_RSP_PRESENT) {
487 487 if (cmd->flags & MMC_RSP_136) {
488 /* Techincally, we should be getting all 48 bits of the response 488 u32 r[4];
489 * (SD_RESP1 + SD_RESP2), but because our response omits the CRC, 489 int i;
490 * our data ends up being shifted 8 bits to the right. In this case, 490
491 * that means that the OSR data starts at bit 31, so we can just 491 r[0] = au_readl(host->iobase + SD_RESP3);
492 * read RESP0 and return that 492 r[1] = au_readl(host->iobase + SD_RESP2);
493 */ 493 r[2] = au_readl(host->iobase + SD_RESP1);
494 494 r[3] = au_readl(host->iobase + SD_RESP0);
495 cmd->resp[0] = au_readl(host->iobase + SD_RESP0); 495
496 } 496 /* The CRC is omitted from the response, so really
497 else if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_LONG) { 497 * we only got 120 bytes, but the engine expects
498 u32 r[4]; 498 * 128 bits, so we have to shift things up
499 int i; 499 */
500 500
501 r[0] = au_readl(host->iobase + SD_RESP3); 501 for(i = 0; i < 4; i++) {
502 r[1] = au_readl(host->iobase + SD_RESP2); 502 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
503 r[2] = au_readl(host->iobase + SD_RESP1); 503 if (i != 3)
504 r[3] = au_readl(host->iobase + SD_RESP0); 504 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
505 505 }
506 /* The CRC is omitted from the response, so really we only got 506 } else {
507 * 120 bytes, but the engine expects 128 bits, so we have to shift 507 /* Techincally, we should be getting all 48 bits of
508 * things up 508 * the response (SD_RESP1 + SD_RESP2), but because
509 */ 509 * our response omits the CRC, our data ends up
510 510 * being shifted 8 bits to the right. In this case,
511 for(i = 0; i < 4; i++) { 511 * that means that the OSR data starts at bit 31,
512 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8; 512 * so we can just read RESP0 and return that
513 if (i != 3) cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24; 513 */
514 cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
514 } 515 }
515 } 516 }
516 517
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index bfca5c176e88..1888060c5e0c 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -211,7 +211,7 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
211 211
212 appcmd.opcode = MMC_APP_CMD; 212 appcmd.opcode = MMC_APP_CMD;
213 appcmd.arg = rca << 16; 213 appcmd.arg = rca << 16;
214 appcmd.flags = MMC_RSP_R1; 214 appcmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
215 appcmd.retries = 0; 215 appcmd.retries = 0;
216 memset(appcmd.resp, 0, sizeof(appcmd.resp)); 216 memset(appcmd.resp, 0, sizeof(appcmd.resp));
217 appcmd.data = NULL; 217 appcmd.data = NULL;
@@ -331,7 +331,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
331 331
332 cmd.opcode = MMC_SELECT_CARD; 332 cmd.opcode = MMC_SELECT_CARD;
333 cmd.arg = card->rca << 16; 333 cmd.arg = card->rca << 16;
334 cmd.flags = MMC_RSP_R1; 334 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
335 335
336 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); 336 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
337 if (err != MMC_ERR_NONE) 337 if (err != MMC_ERR_NONE)
@@ -358,7 +358,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
358 struct mmc_command cmd; 358 struct mmc_command cmd;
359 cmd.opcode = SD_APP_SET_BUS_WIDTH; 359 cmd.opcode = SD_APP_SET_BUS_WIDTH;
360 cmd.arg = SD_BUS_WIDTH_4; 360 cmd.arg = SD_BUS_WIDTH_4;
361 cmd.flags = MMC_RSP_R1; 361 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
362 362
363 err = mmc_wait_for_app_cmd(host, card->rca, &cmd, 363 err = mmc_wait_for_app_cmd(host, card->rca, &cmd,
364 CMD_RETRIES); 364 CMD_RETRIES);
@@ -386,7 +386,7 @@ static void mmc_deselect_cards(struct mmc_host *host)
386 386
387 cmd.opcode = MMC_SELECT_CARD; 387 cmd.opcode = MMC_SELECT_CARD;
388 cmd.arg = 0; 388 cmd.arg = 0;
389 cmd.flags = MMC_RSP_NONE; 389 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
390 390
391 mmc_wait_for_cmd(host, &cmd, 0); 391 mmc_wait_for_cmd(host, &cmd, 0);
392 } 392 }
@@ -677,7 +677,7 @@ static void mmc_idle_cards(struct mmc_host *host)
677 677
678 cmd.opcode = MMC_GO_IDLE_STATE; 678 cmd.opcode = MMC_GO_IDLE_STATE;
679 cmd.arg = 0; 679 cmd.arg = 0;
680 cmd.flags = MMC_RSP_NONE; 680 cmd.flags = MMC_RSP_NONE | MMC_CMD_BC;
681 681
682 mmc_wait_for_cmd(host, &cmd, 0); 682 mmc_wait_for_cmd(host, &cmd, 0);
683 683
@@ -738,7 +738,7 @@ static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
738 738
739 cmd.opcode = MMC_SEND_OP_COND; 739 cmd.opcode = MMC_SEND_OP_COND;
740 cmd.arg = ocr; 740 cmd.arg = ocr;
741 cmd.flags = MMC_RSP_R3; 741 cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR;
742 742
743 for (i = 100; i; i--) { 743 for (i = 100; i; i--) {
744 err = mmc_wait_for_cmd(host, &cmd, 0); 744 err = mmc_wait_for_cmd(host, &cmd, 0);
@@ -766,7 +766,7 @@ static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
766 766
767 cmd.opcode = SD_APP_OP_COND; 767 cmd.opcode = SD_APP_OP_COND;
768 cmd.arg = ocr; 768 cmd.arg = ocr;
769 cmd.flags = MMC_RSP_R3; 769 cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR;
770 770
771 for (i = 100; i; i--) { 771 for (i = 100; i; i--) {
772 err = mmc_wait_for_app_cmd(host, 0, &cmd, CMD_RETRIES); 772 err = mmc_wait_for_app_cmd(host, 0, &cmd, CMD_RETRIES);
@@ -805,7 +805,7 @@ static void mmc_discover_cards(struct mmc_host *host)
805 805
806 cmd.opcode = MMC_ALL_SEND_CID; 806 cmd.opcode = MMC_ALL_SEND_CID;
807 cmd.arg = 0; 807 cmd.arg = 0;
808 cmd.flags = MMC_RSP_R2; 808 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
809 809
810 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); 810 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
811 if (err == MMC_ERR_TIMEOUT) { 811 if (err == MMC_ERR_TIMEOUT) {
@@ -835,7 +835,7 @@ static void mmc_discover_cards(struct mmc_host *host)
835 835
836 cmd.opcode = SD_SEND_RELATIVE_ADDR; 836 cmd.opcode = SD_SEND_RELATIVE_ADDR;
837 cmd.arg = 0; 837 cmd.arg = 0;
838 cmd.flags = MMC_RSP_R6; 838 cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
839 839
840 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); 840 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
841 if (err != MMC_ERR_NONE) 841 if (err != MMC_ERR_NONE)
@@ -856,7 +856,7 @@ static void mmc_discover_cards(struct mmc_host *host)
856 } else { 856 } else {
857 cmd.opcode = MMC_SET_RELATIVE_ADDR; 857 cmd.opcode = MMC_SET_RELATIVE_ADDR;
858 cmd.arg = card->rca << 16; 858 cmd.arg = card->rca << 16;
859 cmd.flags = MMC_RSP_R1; 859 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
860 860
861 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); 861 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
862 if (err != MMC_ERR_NONE) 862 if (err != MMC_ERR_NONE)
@@ -878,7 +878,7 @@ static void mmc_read_csds(struct mmc_host *host)
878 878
879 cmd.opcode = MMC_SEND_CSD; 879 cmd.opcode = MMC_SEND_CSD;
880 cmd.arg = card->rca << 16; 880 cmd.arg = card->rca << 16;
881 cmd.flags = MMC_RSP_R2; 881 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
882 882
883 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); 883 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
884 if (err != MMC_ERR_NONE) { 884 if (err != MMC_ERR_NONE) {
@@ -920,7 +920,7 @@ static void mmc_read_scrs(struct mmc_host *host)
920 920
921 cmd.opcode = MMC_APP_CMD; 921 cmd.opcode = MMC_APP_CMD;
922 cmd.arg = card->rca << 16; 922 cmd.arg = card->rca << 16;
923 cmd.flags = MMC_RSP_R1; 923 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
924 924
925 err = mmc_wait_for_cmd(host, &cmd, 0); 925 err = mmc_wait_for_cmd(host, &cmd, 0);
926 if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) { 926 if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) {
@@ -932,7 +932,7 @@ static void mmc_read_scrs(struct mmc_host *host)
932 932
933 cmd.opcode = SD_APP_SEND_SCR; 933 cmd.opcode = SD_APP_SEND_SCR;
934 cmd.arg = 0; 934 cmd.arg = 0;
935 cmd.flags = MMC_RSP_R1; 935 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
936 936
937 memset(&data, 0, sizeof(struct mmc_data)); 937 memset(&data, 0, sizeof(struct mmc_data));
938 938
@@ -1003,7 +1003,7 @@ static void mmc_check_cards(struct mmc_host *host)
1003 1003
1004 cmd.opcode = MMC_SEND_STATUS; 1004 cmd.opcode = MMC_SEND_STATUS;
1005 cmd.arg = card->rca << 16; 1005 cmd.arg = card->rca << 16;
1006 cmd.flags = MMC_RSP_R1; 1006 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1007 1007
1008 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); 1008 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
1009 if (err == MMC_ERR_NONE) 1009 if (err == MMC_ERR_NONE)
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 5b014c370e80..8eb2a2ede64b 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -171,14 +171,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
171 brq.mrq.data = &brq.data; 171 brq.mrq.data = &brq.data;
172 172
173 brq.cmd.arg = req->sector << 9; 173 brq.cmd.arg = req->sector << 9;
174 brq.cmd.flags = MMC_RSP_R1; 174 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
175 brq.data.timeout_ns = card->csd.tacc_ns * 10; 175 brq.data.timeout_ns = card->csd.tacc_ns * 10;
176 brq.data.timeout_clks = card->csd.tacc_clks * 10; 176 brq.data.timeout_clks = card->csd.tacc_clks * 10;
177 brq.data.blksz_bits = md->block_bits; 177 brq.data.blksz_bits = md->block_bits;
178 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); 178 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
179 brq.stop.opcode = MMC_STOP_TRANSMISSION; 179 brq.stop.opcode = MMC_STOP_TRANSMISSION;
180 brq.stop.arg = 0; 180 brq.stop.arg = 0;
181 brq.stop.flags = MMC_RSP_R1B; 181 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
182 182
183 if (rq_data_dir(req) == READ) { 183 if (rq_data_dir(req) == READ) {
184 brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK; 184 brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
@@ -223,7 +223,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
223 223
224 cmd.opcode = MMC_SEND_STATUS; 224 cmd.opcode = MMC_SEND_STATUS;
225 cmd.arg = card->rca << 16; 225 cmd.arg = card->rca << 16;
226 cmd.flags = MMC_RSP_R1; 226 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
227 err = mmc_wait_for_cmd(card->host, &cmd, 5); 227 err = mmc_wait_for_cmd(card->host, &cmd, 5);
228 if (err) { 228 if (err) {
229 printk(KERN_ERR "%s: error %d requesting status\n", 229 printk(KERN_ERR "%s: error %d requesting status\n",
@@ -430,7 +430,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
430 mmc_card_claim_host(card); 430 mmc_card_claim_host(card);
431 cmd.opcode = MMC_SET_BLOCKLEN; 431 cmd.opcode = MMC_SET_BLOCKLEN;
432 cmd.arg = 1 << md->block_bits; 432 cmd.arg = 1 << md->block_bits;
433 cmd.flags = MMC_RSP_R1; 433 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
434 err = mmc_wait_for_cmd(card->host, &cmd, 5); 434 err = mmc_wait_for_cmd(card->host, &cmd, 5);
435 mmc_card_release_host(card); 435 mmc_card_release_host(card);
436 436
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index 634ef53e85a5..37ee7f8dc82f 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -124,15 +124,10 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
124 } 124 }
125 125
126 c |= cmd->opcode | MCI_CPSM_ENABLE; 126 c |= cmd->opcode | MCI_CPSM_ENABLE;
127 switch (cmd->flags & MMC_RSP_MASK) { 127 if (cmd->flags & MMC_RSP_PRESENT) {
128 case MMC_RSP_NONE: 128 if (cmd->flags & MMC_RSP_136)
129 default: 129 c |= MCI_CPSM_LONGRSP;
130 break;
131 case MMC_RSP_LONG:
132 c |= MCI_CPSM_LONGRSP;
133 case MMC_RSP_SHORT:
134 c |= MCI_CPSM_RESPONSE; 130 c |= MCI_CPSM_RESPONSE;
135 break;
136 } 131 }
137 if (/*interrupt*/0) 132 if (/*interrupt*/0)
138 c |= MCI_CPSM_INTERRUPT; 133 c |= MCI_CPSM_INTERRUPT;
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index ee8f8a0420d1..285d7d068097 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -178,14 +178,15 @@ static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd,
178 if (cmd->flags & MMC_RSP_BUSY) 178 if (cmd->flags & MMC_RSP_BUSY)
179 cmdat |= CMDAT_BUSY; 179 cmdat |= CMDAT_BUSY;
180 180
181 switch (cmd->flags & (MMC_RSP_MASK | MMC_RSP_CRC)) { 181#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
182 case MMC_RSP_SHORT | MMC_RSP_CRC: 182 switch (RSP_TYPE(mmc_resp_type(cmd))) {
183 case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6 */
183 cmdat |= CMDAT_RESP_SHORT; 184 cmdat |= CMDAT_RESP_SHORT;
184 break; 185 break;
185 case MMC_RSP_SHORT: 186 case RSP_TYPE(MMC_RSP_R3):
186 cmdat |= CMDAT_RESP_R3; 187 cmdat |= CMDAT_RESP_R3;
187 break; 188 break;
188 case MMC_RSP_LONG | MMC_RSP_CRC: 189 case RSP_TYPE(MMC_RSP_R2):
189 cmdat |= CMDAT_RESP_R2; 190 cmdat |= CMDAT_RESP_R2;
190 break; 191 break;
191 default: 192 default:
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index f25757625361..3be397d436fa 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -459,7 +459,7 @@ static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
459 /* 459 /*
460 * Do we expect a reply? 460 * Do we expect a reply?
461 */ 461 */
462 if ((cmd->flags & MMC_RSP_MASK) != MMC_RSP_NONE) { 462 if (cmd->flags & MMC_RSP_PRESENT) {
463 /* 463 /*
464 * Read back status. 464 * Read back status.
465 */ 465 */
@@ -476,10 +476,10 @@ static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
476 cmd->error = MMC_ERR_BADCRC; 476 cmd->error = MMC_ERR_BADCRC;
477 /* All ok */ 477 /* All ok */
478 else { 478 else {
479 if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_SHORT) 479 if (cmd->flags & MMC_RSP_136)
480 wbsd_get_short_reply(host, cmd);
481 else
482 wbsd_get_long_reply(host, cmd); 480 wbsd_get_long_reply(host, cmd);
481 else
482 wbsd_get_short_reply(host, cmd);
483 } 483 }
484 } 484 }
485 485
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 701620b6baed..8b3784e2de89 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -110,8 +110,9 @@ static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const voi
110{ 110{
111 while (len > 0) { 111 while (len > 0) {
112 map_word d; 112 map_word d;
113 d.x[0] = *((uint32_t*)from)++; 113 d.x[0] = *((uint32_t*)from);
114 dc21285_write32(map, d, to); 114 dc21285_write32(map, d, to);
115 from += 4;
115 to += 4; 116 to += 4;
116 len -= 4; 117 len -= 4;
117 } 118 }
@@ -121,8 +122,9 @@ static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const voi
121{ 122{
122 while (len > 0) { 123 while (len > 0) {
123 map_word d; 124 map_word d;
124 d.x[0] = *((uint16_t*)from)++; 125 d.x[0] = *((uint16_t*)from);
125 dc21285_write16(map, d, to); 126 dc21285_write16(map, d, to);
127 from += 2;
126 to += 2; 128 to += 2;
127 len -= 2; 129 len -= 2;
128 } 130 }
@@ -131,8 +133,9 @@ static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const voi
131static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len) 133static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
132{ 134{
133 map_word d; 135 map_word d;
134 d.x[0] = *((uint8_t*)from)++; 136 d.x[0] = *((uint8_t*)from);
135 dc21285_write8(map, d, to); 137 dc21285_write8(map, d, to);
138 from++;
136 to++; 139 to++;
137 len--; 140 len--;
138} 141}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 7488ee7f7caf..7f47124f118d 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -753,9 +753,11 @@ enum tx_desc_status {
753enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; 753enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
754 754
755struct vortex_extra_stats { 755struct vortex_extra_stats {
756 unsigned long tx_deferred; 756 unsigned long tx_deferred;
757 unsigned long tx_multiple_collisions; 757 unsigned long tx_max_collisions;
758 unsigned long rx_bad_ssd; 758 unsigned long tx_multiple_collisions;
759 unsigned long tx_single_collisions;
760 unsigned long rx_bad_ssd;
759}; 761};
760 762
761struct vortex_private { 763struct vortex_private {
@@ -863,12 +865,14 @@ static struct {
863 const char str[ETH_GSTRING_LEN]; 865 const char str[ETH_GSTRING_LEN];
864} ethtool_stats_keys[] = { 866} ethtool_stats_keys[] = {
865 { "tx_deferred" }, 867 { "tx_deferred" },
868 { "tx_max_collisions" },
866 { "tx_multiple_collisions" }, 869 { "tx_multiple_collisions" },
870 { "tx_single_collisions" },
867 { "rx_bad_ssd" }, 871 { "rx_bad_ssd" },
868}; 872};
869 873
870/* number of ETHTOOL_GSTATS u64's */ 874/* number of ETHTOOL_GSTATS u64's */
871#define VORTEX_NUM_STATS 3 875#define VORTEX_NUM_STATS 5
872 876
873static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, 877static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
874 int chip_idx, int card_idx); 878 int chip_idx, int card_idx);
@@ -2108,9 +2112,12 @@ vortex_error(struct net_device *dev, int status)
2108 iowrite8(0, ioaddr + TxStatus); 2112 iowrite8(0, ioaddr + TxStatus);
2109 if (tx_status & 0x30) { /* txJabber or txUnderrun */ 2113 if (tx_status & 0x30) { /* txJabber or txUnderrun */
2110 do_tx_reset = 1; 2114 do_tx_reset = 1;
2111 } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ 2115 } else if (tx_status & 0x08) { /* maxCollisions */
2112 do_tx_reset = 1; 2116 vp->xstats.tx_max_collisions++;
2113 reset_mask = 0x0108; /* Reset interface logic, but not download logic */ 2117 if (vp->drv_flags & MAX_COLLISION_RESET) {
2118 do_tx_reset = 1;
2119 reset_mask = 0x0108; /* Reset interface logic, but not download logic */
2120 }
2114 } else { /* Merely re-enable the transmitter. */ 2121 } else { /* Merely re-enable the transmitter. */
2115 iowrite16(TxEnable, ioaddr + EL3_CMD); 2122 iowrite16(TxEnable, ioaddr + EL3_CMD);
2116 } 2123 }
@@ -2926,7 +2933,6 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2926 EL3WINDOW(6); 2933 EL3WINDOW(6);
2927 vp->stats.tx_carrier_errors += ioread8(ioaddr + 0); 2934 vp->stats.tx_carrier_errors += ioread8(ioaddr + 0);
2928 vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); 2935 vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
2929 vp->stats.collisions += ioread8(ioaddr + 3);
2930 vp->stats.tx_window_errors += ioread8(ioaddr + 4); 2936 vp->stats.tx_window_errors += ioread8(ioaddr + 4);
2931 vp->stats.rx_fifo_errors += ioread8(ioaddr + 5); 2937 vp->stats.rx_fifo_errors += ioread8(ioaddr + 5);
2932 vp->stats.tx_packets += ioread8(ioaddr + 6); 2938 vp->stats.tx_packets += ioread8(ioaddr + 6);
@@ -2939,10 +2945,15 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2939 vp->stats.tx_bytes += ioread16(ioaddr + 12); 2945 vp->stats.tx_bytes += ioread16(ioaddr + 12);
2940 /* Extra stats for get_ethtool_stats() */ 2946 /* Extra stats for get_ethtool_stats() */
2941 vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2); 2947 vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
2948 vp->xstats.tx_single_collisions += ioread8(ioaddr + 3);
2942 vp->xstats.tx_deferred += ioread8(ioaddr + 8); 2949 vp->xstats.tx_deferred += ioread8(ioaddr + 8);
2943 EL3WINDOW(4); 2950 EL3WINDOW(4);
2944 vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12); 2951 vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
2945 2952
2953 vp->stats.collisions = vp->xstats.tx_multiple_collisions
2954 + vp->xstats.tx_single_collisions
2955 + vp->xstats.tx_max_collisions;
2956
2946 { 2957 {
2947 u8 up = ioread8(ioaddr + 13); 2958 u8 up = ioread8(ioaddr + 13);
2948 vp->stats.rx_bytes += (up & 0x0f) << 16; 2959 vp->stats.rx_bytes += (up & 0x0f) << 16;
@@ -3036,8 +3047,10 @@ static void vortex_get_ethtool_stats(struct net_device *dev,
3036 spin_unlock_irqrestore(&vp->lock, flags); 3047 spin_unlock_irqrestore(&vp->lock, flags);
3037 3048
3038 data[0] = vp->xstats.tx_deferred; 3049 data[0] = vp->xstats.tx_deferred;
3039 data[1] = vp->xstats.tx_multiple_collisions; 3050 data[1] = vp->xstats.tx_max_collisions;
3040 data[2] = vp->xstats.rx_bad_ssd; 3051 data[2] = vp->xstats.tx_multiple_collisions;
3052 data[3] = vp->xstats.tx_single_collisions;
3053 data[4] = vp->xstats.rx_bad_ssd;
3041} 3054}
3042 3055
3043 3056
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 1c6d328165bb..0245e40b51a1 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1610,6 +1610,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1610 } 1610 }
1611 else if (!pskb_may_pull(skb, skb->len)) 1611 else if (!pskb_may_pull(skb, skb->len))
1612 goto err; 1612 goto err;
1613 else
1614 skb->ip_summed = CHECKSUM_NONE;
1613 1615
1614 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 1616 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
1615 if (len <= 0) { 1617 if (len <= 0) {
@@ -1690,6 +1692,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1690 kfree_skb(skb); 1692 kfree_skb(skb);
1691 } else { 1693 } else {
1692 skb_pull(skb, 2); /* chop off protocol */ 1694 skb_pull(skb, 2); /* chop off protocol */
1695 skb_postpull_rcsum(skb, skb->data - 2, 2);
1693 skb->dev = ppp->dev; 1696 skb->dev = ppp->dev;
1694 skb->protocol = htons(npindex_to_ethertype[npi]); 1697 skb->protocol = htons(npindex_to_ethertype[npi]);
1695 skb->mac.raw = skb->data; 1698 skb->mac.raw = skb->data;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f2d1dafde087..e7dc653d5bd6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.48" 72#define DRV_MODULE_VERSION "3.49"
73#define DRV_MODULE_RELDATE "Jan 16, 2006" 73#define DRV_MODULE_RELDATE "Feb 2, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -3482,6 +3482,17 @@ static void tg3_reset_task(void *_data)
3482 struct tg3 *tp = _data; 3482 struct tg3 *tp = _data;
3483 unsigned int restart_timer; 3483 unsigned int restart_timer;
3484 3484
3485 tg3_full_lock(tp, 0);
3486 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3487
3488 if (!netif_running(tp->dev)) {
3489 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3490 tg3_full_unlock(tp);
3491 return;
3492 }
3493
3494 tg3_full_unlock(tp);
3495
3485 tg3_netif_stop(tp); 3496 tg3_netif_stop(tp);
3486 3497
3487 tg3_full_lock(tp, 1); 3498 tg3_full_lock(tp, 1);
@@ -3494,10 +3505,12 @@ static void tg3_reset_task(void *_data)
3494 3505
3495 tg3_netif_start(tp); 3506 tg3_netif_start(tp);
3496 3507
3497 tg3_full_unlock(tp);
3498
3499 if (restart_timer) 3508 if (restart_timer)
3500 mod_timer(&tp->timer, jiffies + 1); 3509 mod_timer(&tp->timer, jiffies + 1);
3510
3511 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3512
3513 tg3_full_unlock(tp);
3501} 3514}
3502 3515
3503static void tg3_tx_timeout(struct net_device *dev) 3516static void tg3_tx_timeout(struct net_device *dev)
@@ -6786,6 +6799,13 @@ static int tg3_close(struct net_device *dev)
6786{ 6799{
6787 struct tg3 *tp = netdev_priv(dev); 6800 struct tg3 *tp = netdev_priv(dev);
6788 6801
6802 /* Calling flush_scheduled_work() may deadlock because
6803 * linkwatch_event() may be on the workqueue and it will try to get
6804 * the rtnl_lock which we are holding.
6805 */
6806 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
6807 msleep(1);
6808
6789 netif_stop_queue(dev); 6809 netif_stop_queue(dev);
6790 6810
6791 del_timer_sync(&tp->timer); 6811 del_timer_sync(&tp->timer);
@@ -10880,6 +10900,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
10880 if (dev) { 10900 if (dev) {
10881 struct tg3 *tp = netdev_priv(dev); 10901 struct tg3 *tp = netdev_priv(dev);
10882 10902
10903 flush_scheduled_work();
10883 unregister_netdev(dev); 10904 unregister_netdev(dev);
10884 if (tp->regs) { 10905 if (tp->regs) {
10885 iounmap(tp->regs); 10906 iounmap(tp->regs);
@@ -10901,6 +10922,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10901 if (!netif_running(dev)) 10922 if (!netif_running(dev))
10902 return 0; 10923 return 0;
10903 10924
10925 flush_scheduled_work();
10904 tg3_netif_stop(tp); 10926 tg3_netif_stop(tp);
10905 10927
10906 del_timer_sync(&tp->timer); 10928 del_timer_sync(&tp->timer);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index e8243305f0e8..7f4b7f6ac40d 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2162,6 +2162,7 @@ struct tg3 {
2162#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 2162#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
2163#define TG3_FLAG_10_100_ONLY 0x01000000 2163#define TG3_FLAG_10_100_ONLY 0x01000000
2164#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 2164#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
2165#define TG3_FLAG_IN_RESET_TASK 0x04000000
2165#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 2166#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
2166#define TG3_FLAG_GOT_SERDES_FLOWCTL 0x20000000 2167#define TG3_FLAG_GOT_SERDES_FLOWCTL 0x20000000
2167#define TG3_FLAG_SPLIT_MODE 0x40000000 2168#define TG3_FLAG_SPLIT_MODE 0x40000000
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 9e0229f7e25f..f46e8438e0d2 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1423,7 +1423,7 @@ static void __init ccio_init_resources(struct ioc *ioc)
1423 struct resource *res = ioc->mmio_region; 1423 struct resource *res = ioc->mmio_region;
1424 char *name = kmalloc(14, GFP_KERNEL); 1424 char *name = kmalloc(14, GFP_KERNEL);
1425 1425
1426 sprintf(name, "GSC Bus [%d/]", ioc->hw_path); 1426 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1427 1427
1428 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low); 1428 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1429 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv); 1429 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
@@ -1557,12 +1557,11 @@ static int ccio_probe(struct parisc_device *dev)
1557 int i; 1557 int i;
1558 struct ioc *ioc, **ioc_p = &ioc_list; 1558 struct ioc *ioc, **ioc_p = &ioc_list;
1559 1559
1560 ioc = kmalloc(sizeof(struct ioc), GFP_KERNEL); 1560 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1561 if (ioc == NULL) { 1561 if (ioc == NULL) {
1562 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n"); 1562 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1563 return 1; 1563 return 1;
1564 } 1564 }
1565 memset(ioc, 0, sizeof(struct ioc));
1566 1565
1567 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn"; 1566 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1568 1567
@@ -1578,7 +1577,7 @@ static int ccio_probe(struct parisc_device *dev)
1578 ccio_ioc_init(ioc); 1577 ccio_ioc_init(ioc);
1579 ccio_init_resources(ioc); 1578 ccio_init_resources(ioc);
1580 hppa_dma_ops = &ccio_ops; 1579 hppa_dma_ops = &ccio_ops;
1581 dev->dev.platform_data = kmalloc(sizeof(struct pci_hba_data), GFP_KERNEL); 1580 dev->dev.platform_data = kzalloc(sizeof(struct pci_hba_data), GFP_KERNEL);
1582 1581
1583 /* if this fails, no I/O cards will work, so may as well bug */ 1582 /* if this fails, no I/O cards will work, so may as well bug */
1584 BUG_ON(dev->dev.platform_data == NULL); 1583 BUG_ON(dev->dev.platform_data == NULL);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 216d1d859326..3d1a7f98c676 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -989,14 +989,12 @@ static int __init dino_probe(struct parisc_device *dev)
989*/ 989*/
990 } 990 }
991 991
992 dino_dev = kmalloc(sizeof(struct dino_device), GFP_KERNEL); 992 dino_dev = kzalloc(sizeof(struct dino_device), GFP_KERNEL);
993 if (!dino_dev) { 993 if (!dino_dev) {
994 printk("dino_init_chip - couldn't alloc dino_device\n"); 994 printk("dino_init_chip - couldn't alloc dino_device\n");
995 return 1; 995 return 1;
996 } 996 }
997 997
998 memset(dino_dev, 0, sizeof(struct dino_device));
999
1000 dino_dev->hba.dev = dev; 998 dino_dev->hba.dev = dev;
1001 dino_dev->hba.base_addr = ioremap(hpa, 4096); 999 dino_dev->hba.base_addr = ioremap(hpa, 4096);
1002 dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */ 1000 dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index 5edf93f80757..07dc2b6d4e93 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -60,12 +60,11 @@ static int hppb_probe(struct parisc_device *dev)
60 } 60 }
61 61
62 if(card->hpa) { 62 if(card->hpa) {
63 card->next = kmalloc(sizeof(struct hppb_card), GFP_KERNEL); 63 card->next = kzalloc(sizeof(struct hppb_card), GFP_KERNEL);
64 if(!card->next) { 64 if(!card->next) {
65 printk(KERN_ERR "HP-PB: Unable to allocate memory.\n"); 65 printk(KERN_ERR "HP-PB: Unable to allocate memory.\n");
66 return 1; 66 return 1;
67 } 67 }
68 memset(card->next, '\0', sizeof(struct hppb_card));
69 card = card->next; 68 card = card->next;
70 } 69 }
71 printk(KERN_INFO "Found GeckoBoa at 0x%lx\n", dev->hpa.start); 70 printk(KERN_INFO "Found GeckoBoa at 0x%lx\n", dev->hpa.start);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 19657efa8dc3..8d7a36392eb8 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -873,28 +873,24 @@ void *iosapic_register(unsigned long hpa)
873 return NULL; 873 return NULL;
874 } 874 }
875 875
876 isi = (struct iosapic_info *)kmalloc(sizeof(struct iosapic_info), GFP_KERNEL); 876 isi = (struct iosapic_info *)kzalloc(sizeof(struct iosapic_info), GFP_KERNEL);
877 if (!isi) { 877 if (!isi) {
878 BUG(); 878 BUG();
879 return NULL; 879 return NULL;
880 } 880 }
881 881
882 memset(isi, 0, sizeof(struct iosapic_info));
883
884 isi->addr = ioremap(hpa, 4096); 882 isi->addr = ioremap(hpa, 4096);
885 isi->isi_hpa = hpa; 883 isi->isi_hpa = hpa;
886 isi->isi_version = iosapic_rd_version(isi); 884 isi->isi_version = iosapic_rd_version(isi);
887 isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1; 885 isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1;
888 886
889 vip = isi->isi_vector = (struct vector_info *) 887 vip = isi->isi_vector = (struct vector_info *)
890 kmalloc(sizeof(struct vector_info) * isi->isi_num_vectors, GFP_KERNEL); 888 kzalloc(sizeof(struct vector_info) * isi->isi_num_vectors, GFP_KERNEL);
891 if (vip == NULL) { 889 if (vip == NULL) {
892 kfree(isi); 890 kfree(isi);
893 return NULL; 891 return NULL;
894 } 892 }
895 893
896 memset(vip, 0, sizeof(struct vector_info) * isi->isi_num_vectors);
897
898 for (cnt=0; cnt < isi->isi_num_vectors; cnt++, vip++) { 894 for (cnt=0; cnt < isi->isi_num_vectors; cnt++, vip++) {
899 vip->irqline = (unsigned char) cnt; 895 vip->irqline = (unsigned char) cnt;
900 vip->iosapic = isi; 896 vip->iosapic = isi;
diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c
index 2b3ba1dcf332..cb3d28176129 100644
--- a/drivers/parisc/lasi.c
+++ b/drivers/parisc/lasi.c
@@ -166,11 +166,12 @@ static void lasi_power_off(void)
166int __init 166int __init
167lasi_init_chip(struct parisc_device *dev) 167lasi_init_chip(struct parisc_device *dev)
168{ 168{
169 extern void (*chassis_power_off)(void);
169 struct gsc_asic *lasi; 170 struct gsc_asic *lasi;
170 struct gsc_irq gsc_irq; 171 struct gsc_irq gsc_irq;
171 int ret; 172 int ret;
172 173
173 lasi = kmalloc(sizeof(*lasi), GFP_KERNEL); 174 lasi = kzalloc(sizeof(*lasi), GFP_KERNEL);
174 if (!lasi) 175 if (!lasi)
175 return -ENOMEM; 176 return -ENOMEM;
176 177
@@ -222,7 +223,7 @@ lasi_init_chip(struct parisc_device *dev)
222 * ensure that only the first LASI (the one controlling the power off) 223 * ensure that only the first LASI (the one controlling the power off)
223 * should set the HPA here */ 224 * should set the HPA here */
224 lasi_power_off_hpa = lasi->hpa; 225 lasi_power_off_hpa = lasi->hpa;
225 pm_power_off = lasi_power_off; 226 chassis_power_off = lasi_power_off;
226 227
227 return ret; 228 return ret;
228} 229}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index cbae8c8963fa..e8a2a4a852f5 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1565,7 +1565,7 @@ lba_driver_probe(struct parisc_device *dev)
1565 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) { 1565 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
1566 func_class &= 0xff; 1566 func_class &= 0xff;
1567 version = kmalloc(6, GFP_KERNEL); 1567 version = kmalloc(6, GFP_KERNEL);
1568 sprintf(version,"TR%d.%d",(func_class >> 4),(func_class & 0xf)); 1568 snprintf(version, 6, "TR%d.%d",(func_class >> 4),(func_class & 0xf));
1569 /* We could use one printk for both Elroy and Mercury, 1569 /* We could use one printk for both Elroy and Mercury,
1570 * but for the mask for func_class. 1570 * but for the mask for func_class.
1571 */ 1571 */
@@ -1586,14 +1586,12 @@ lba_driver_probe(struct parisc_device *dev)
1586 ** have an IRT entry will get NULL back from iosapic code. 1586 ** have an IRT entry will get NULL back from iosapic code.
1587 */ 1587 */
1588 1588
1589 lba_dev = kmalloc(sizeof(struct lba_device), GFP_KERNEL); 1589 lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL);
1590 if (!lba_dev) { 1590 if (!lba_dev) {
1591 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n"); 1591 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
1592 return(1); 1592 return(1);
1593 } 1593 }
1594 1594
1595 memset(lba_dev, 0, sizeof(struct lba_device));
1596
1597 1595
1598 /* ---------- First : initialize data we already have --------- */ 1596 /* ---------- First : initialize data we already have --------- */
1599 1597
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 42a3c54e8e6c..a28e17898fbd 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Interfaces to retrieve and set PDC Stable options (firmware) 2 * Interfaces to retrieve and set PDC Stable options (firmware)
3 * 3 *
4 * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org> 4 * Copyright (C) 2005-2006 Thibaut VARENE <varenet@parisc-linux.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -26,11 +26,19 @@
26 * 26 *
27 * Since locations between 96 and 192 are the various paths, most (if not 27 * Since locations between 96 and 192 are the various paths, most (if not
28 * all) PA-RISC machines should have them. Anyway, for safety reasons, the 28 * all) PA-RISC machines should have them. Anyway, for safety reasons, the
29 * following code can deal with only 96 bytes of Stable Storage, and all 29 * following code can deal with just 96 bytes of Stable Storage, and all
30 * sizes between 96 and 192 bytes (provided they are multiple of struct 30 * sizes between 96 and 192 bytes (provided they are multiple of struct
31 * device_path size, eg: 128, 160 and 192) to provide full information. 31 * device_path size, eg: 128, 160 and 192) to provide full information.
32 * The code makes no use of data above 192 bytes. One last word: there's one 32 * The code makes no use of data above 192 bytes. One last word: there's one
33 * path we can always count on: the primary path. 33 * path we can always count on: the primary path.
34 *
35 * The current policy wrt file permissions is:
36 * - write: root only
37 * - read: (reading triggers PDC calls) ? root only : everyone
38 * The rationale is that PDC calls could hog (DoS) the machine.
39 *
40 * TODO:
41 * - timer/fastsize write calls
34 */ 42 */
35 43
36#undef PDCS_DEBUG 44#undef PDCS_DEBUG
@@ -50,13 +58,15 @@
50#include <linux/kobject.h> 58#include <linux/kobject.h>
51#include <linux/device.h> 59#include <linux/device.h>
52#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/spinlock.h>
53 62
54#include <asm/pdc.h> 63#include <asm/pdc.h>
55#include <asm/page.h> 64#include <asm/page.h>
56#include <asm/uaccess.h> 65#include <asm/uaccess.h>
57#include <asm/hardware.h> 66#include <asm/hardware.h>
58 67
59#define PDCS_VERSION "0.10" 68#define PDCS_VERSION "0.22"
69#define PDCS_PREFIX "PDC Stable Storage"
60 70
61#define PDCS_ADDR_PPRI 0x00 71#define PDCS_ADDR_PPRI 0x00
62#define PDCS_ADDR_OSID 0x40 72#define PDCS_ADDR_OSID 0x40
@@ -70,10 +80,12 @@ MODULE_DESCRIPTION("sysfs interface to HP PDC Stable Storage data");
70MODULE_LICENSE("GPL"); 80MODULE_LICENSE("GPL");
71MODULE_VERSION(PDCS_VERSION); 81MODULE_VERSION(PDCS_VERSION);
72 82
83/* holds Stable Storage size. Initialized once and for all, no lock needed */
73static unsigned long pdcs_size __read_mostly; 84static unsigned long pdcs_size __read_mostly;
74 85
75/* This struct defines what we need to deal with a parisc pdc path entry */ 86/* This struct defines what we need to deal with a parisc pdc path entry */
76struct pdcspath_entry { 87struct pdcspath_entry {
88 rwlock_t rw_lock; /* to protect path entry access */
77 short ready; /* entry record is valid if != 0 */ 89 short ready; /* entry record is valid if != 0 */
78 unsigned long addr; /* entry address in stable storage */ 90 unsigned long addr; /* entry address in stable storage */
79 char *name; /* entry name */ 91 char *name; /* entry name */
@@ -121,6 +133,8 @@ struct pdcspath_attribute paths_attr_##_name = { \
121 * content of the stable storage WRT various paths in these structs. We read 133 * content of the stable storage WRT various paths in these structs. We read
122 * these structs when reading the files, and we will write to these structs when 134 * these structs when reading the files, and we will write to these structs when
123 * writing to the files, and only then write them back to the Stable Storage. 135 * writing to the files, and only then write them back to the Stable Storage.
136 *
137 * This function expects to be called with @entry->rw_lock write-hold.
124 */ 138 */
125static int 139static int
126pdcspath_fetch(struct pdcspath_entry *entry) 140pdcspath_fetch(struct pdcspath_entry *entry)
@@ -160,14 +174,15 @@ pdcspath_fetch(struct pdcspath_entry *entry)
160 * pointer, from which it'll find out the corresponding hardware path. 174 * pointer, from which it'll find out the corresponding hardware path.
161 * For now we do not handle the case where there's an error in writing to the 175 * For now we do not handle the case where there's an error in writing to the
162 * Stable Storage area, so you'd better not mess up the data :P 176 * Stable Storage area, so you'd better not mess up the data :P
177 *
178 * This function expects to be called with @entry->rw_lock write-hold.
163 */ 179 */
164static int 180static void
165pdcspath_store(struct pdcspath_entry *entry) 181pdcspath_store(struct pdcspath_entry *entry)
166{ 182{
167 struct device_path *devpath; 183 struct device_path *devpath;
168 184
169 if (!entry) 185 BUG_ON(!entry);
170 return -EINVAL;
171 186
172 devpath = &entry->devpath; 187 devpath = &entry->devpath;
173 188
@@ -176,10 +191,8 @@ pdcspath_store(struct pdcspath_entry *entry)
176 First case, we don't have a preset hwpath... */ 191 First case, we don't have a preset hwpath... */
177 if (!entry->ready) { 192 if (!entry->ready) {
178 /* ...but we have a device, map it */ 193 /* ...but we have a device, map it */
179 if (entry->dev) 194 BUG_ON(!entry->dev);
180 device_to_hwpath(entry->dev, (struct hardware_path *)devpath); 195 device_to_hwpath(entry->dev, (struct hardware_path *)devpath);
181 else
182 return -EINVAL;
183 } 196 }
184 /* else, we expect the provided hwpath to be valid. */ 197 /* else, we expect the provided hwpath to be valid. */
185 198
@@ -191,15 +204,13 @@ pdcspath_store(struct pdcspath_entry *entry)
191 printk(KERN_ERR "%s: an error occured when writing to PDC.\n" 204 printk(KERN_ERR "%s: an error occured when writing to PDC.\n"
192 "It is likely that the Stable Storage data has been corrupted.\n" 205 "It is likely that the Stable Storage data has been corrupted.\n"
193 "Please check it carefully upon next reboot.\n", __func__); 206 "Please check it carefully upon next reboot.\n", __func__);
194 return -EIO; 207 WARN_ON(1);
195 } 208 }
196 209
197 /* kobject is already registered */ 210 /* kobject is already registered */
198 entry->ready = 2; 211 entry->ready = 2;
199 212
200 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev); 213 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev);
201
202 return 0;
203} 214}
204 215
205/** 216/**
@@ -214,14 +225,17 @@ pdcspath_hwpath_read(struct pdcspath_entry *entry, char *buf)
214{ 225{
215 char *out = buf; 226 char *out = buf;
216 struct device_path *devpath; 227 struct device_path *devpath;
217 unsigned short i; 228 short i;
218 229
219 if (!entry || !buf) 230 if (!entry || !buf)
220 return -EINVAL; 231 return -EINVAL;
221 232
233 read_lock(&entry->rw_lock);
222 devpath = &entry->devpath; 234 devpath = &entry->devpath;
235 i = entry->ready;
236 read_unlock(&entry->rw_lock);
223 237
224 if (!entry->ready) 238 if (!i) /* entry is not ready */
225 return -ENODATA; 239 return -ENODATA;
226 240
227 for (i = 0; i < 6; i++) { 241 for (i = 0; i < 6; i++) {
@@ -242,7 +256,7 @@ pdcspath_hwpath_read(struct pdcspath_entry *entry, char *buf)
242 * 256 *
243 * We will call this function to change the current hardware path. 257 * We will call this function to change the current hardware path.
244 * Hardware paths are to be given '/'-delimited, without brackets. 258 * Hardware paths are to be given '/'-delimited, without brackets.
245 * We take care to make sure that the provided path actually maps to an existing 259 * We make sure that the provided path actually maps to an existing
246 * device, BUT nothing would prevent some foolish user to set the path to some 260 * device, BUT nothing would prevent some foolish user to set the path to some
247 * PCI bridge or even a CPU... 261 * PCI bridge or even a CPU...
248 * A better work around would be to make sure we are at the end of a device tree 262 * A better work around would be to make sure we are at the end of a device tree
@@ -298,17 +312,19 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
298 } 312 }
299 313
300 /* So far so good, let's get in deep */ 314 /* So far so good, let's get in deep */
315 write_lock(&entry->rw_lock);
301 entry->ready = 0; 316 entry->ready = 0;
302 entry->dev = dev; 317 entry->dev = dev;
303 318
304 /* Now, dive in. Write back to the hardware */ 319 /* Now, dive in. Write back to the hardware */
305 WARN_ON(pdcspath_store(entry)); /* this warn should *NEVER* happen */ 320 pdcspath_store(entry);
306 321
307 /* Update the symlink to the real device */ 322 /* Update the symlink to the real device */
308 sysfs_remove_link(&entry->kobj, "device"); 323 sysfs_remove_link(&entry->kobj, "device");
309 sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); 324 sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device");
325 write_unlock(&entry->rw_lock);
310 326
311 printk(KERN_INFO "PDC Stable Storage: changed \"%s\" path to \"%s\"\n", 327 printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n",
312 entry->name, buf); 328 entry->name, buf);
313 329
314 return count; 330 return count;
@@ -326,14 +342,17 @@ pdcspath_layer_read(struct pdcspath_entry *entry, char *buf)
326{ 342{
327 char *out = buf; 343 char *out = buf;
328 struct device_path *devpath; 344 struct device_path *devpath;
329 unsigned short i; 345 short i;
330 346
331 if (!entry || !buf) 347 if (!entry || !buf)
332 return -EINVAL; 348 return -EINVAL;
333 349
350 read_lock(&entry->rw_lock);
334 devpath = &entry->devpath; 351 devpath = &entry->devpath;
352 i = entry->ready;
353 read_unlock(&entry->rw_lock);
335 354
336 if (!entry->ready) 355 if (!i) /* entry is not ready */
337 return -ENODATA; 356 return -ENODATA;
338 357
339 for (i = 0; devpath->layers[i] && (likely(i < 6)); i++) 358 for (i = 0; devpath->layers[i] && (likely(i < 6)); i++)
@@ -388,15 +407,17 @@ pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count
388 } 407 }
389 408
390 /* So far so good, let's get in deep */ 409 /* So far so good, let's get in deep */
410 write_lock(&entry->rw_lock);
391 411
392 /* First, overwrite the current layers with the new ones, not touching 412 /* First, overwrite the current layers with the new ones, not touching
393 the hardware path. */ 413 the hardware path. */
394 memcpy(&entry->devpath.layers, &layers, sizeof(layers)); 414 memcpy(&entry->devpath.layers, &layers, sizeof(layers));
395 415
396 /* Now, dive in. Write back to the hardware */ 416 /* Now, dive in. Write back to the hardware */
397 WARN_ON(pdcspath_store(entry)); /* this warn should *NEVER* happen */ 417 pdcspath_store(entry);
418 write_unlock(&entry->rw_lock);
398 419
399 printk(KERN_INFO "PDC Stable Storage: changed \"%s\" layers to \"%s\"\n", 420 printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" layers to \"%s\"\n",
400 entry->name, buf); 421 entry->name, buf);
401 422
402 return count; 423 return count;
@@ -415,9 +436,6 @@ pdcspath_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
415 struct pdcspath_attribute *pdcs_attr = to_pdcspath_attribute(attr); 436 struct pdcspath_attribute *pdcs_attr = to_pdcspath_attribute(attr);
416 ssize_t ret = 0; 437 ssize_t ret = 0;
417 438
418 if (!capable(CAP_SYS_ADMIN))
419 return -EACCES;
420
421 if (pdcs_attr->show) 439 if (pdcs_attr->show)
422 ret = pdcs_attr->show(entry, buf); 440 ret = pdcs_attr->show(entry, buf);
423 441
@@ -454,8 +472,8 @@ static struct sysfs_ops pdcspath_attr_ops = {
454}; 472};
455 473
456/* These are the two attributes of any PDC path. */ 474/* These are the two attributes of any PDC path. */
457static PATHS_ATTR(hwpath, 0600, pdcspath_hwpath_read, pdcspath_hwpath_write); 475static PATHS_ATTR(hwpath, 0644, pdcspath_hwpath_read, pdcspath_hwpath_write);
458static PATHS_ATTR(layer, 0600, pdcspath_layer_read, pdcspath_layer_write); 476static PATHS_ATTR(layer, 0644, pdcspath_layer_read, pdcspath_layer_write);
459 477
460static struct attribute *paths_subsys_attrs[] = { 478static struct attribute *paths_subsys_attrs[] = {
461 &paths_attr_hwpath.attr, 479 &paths_attr_hwpath.attr,
@@ -484,36 +502,119 @@ static struct pdcspath_entry *pdcspath_entries[] = {
484 NULL, 502 NULL,
485}; 503};
486 504
505
506/* For more insight of what's going on here, refer to PDC Procedures doc,
507 * Section PDC_STABLE */
508
487/** 509/**
488 * pdcs_info_read - Pretty printing of the remaining useful data. 510 * pdcs_size_read - Stable Storage size output.
489 * @entry: An allocated and populated subsytem struct. We don't use it tho. 511 * @entry: An allocated and populated subsytem struct. We don't use it tho.
490 * @buf: The output buffer to write to. 512 * @buf: The output buffer to write to.
491 *
492 * We will call this function to format the output of the 'info' attribute file.
493 * Please refer to PDC Procedures documentation, section PDC_STABLE to get a
494 * better insight of what we're doing here.
495 */ 513 */
496static ssize_t 514static ssize_t
497pdcs_info_read(struct subsystem *entry, char *buf) 515pdcs_size_read(struct subsystem *entry, char *buf)
498{ 516{
499 char *out = buf; 517 char *out = buf;
500 __u32 result;
501 struct device_path devpath;
502 char *tmpstr = NULL;
503 518
504 if (!entry || !buf) 519 if (!entry || !buf)
505 return -EINVAL; 520 return -EINVAL;
506 521
507 /* show the size of the stable storage */ 522 /* show the size of the stable storage */
508 out += sprintf(out, "Stable Storage size: %ld bytes\n", pdcs_size); 523 out += sprintf(out, "%ld\n", pdcs_size);
509 524
510 /* deal with flags */ 525 return out - buf;
511 if (pdc_stable_read(PDCS_ADDR_PPRI, &devpath, sizeof(devpath)) != PDC_OK) 526}
512 return -EIO; 527
528/**
529 * pdcs_auto_read - Stable Storage autoboot/search flag output.
530 * @entry: An allocated and populated subsytem struct. We don't use it tho.
531 * @buf: The output buffer to write to.
532 * @knob: The PF_AUTOBOOT or PF_AUTOSEARCH flag
533 */
534static ssize_t
535pdcs_auto_read(struct subsystem *entry, char *buf, int knob)
536{
537 char *out = buf;
538 struct pdcspath_entry *pathentry;
513 539
514 out += sprintf(out, "Autoboot: %s\n", (devpath.flags & PF_AUTOBOOT) ? "On" : "Off"); 540 if (!entry || !buf)
515 out += sprintf(out, "Autosearch: %s\n", (devpath.flags & PF_AUTOSEARCH) ? "On" : "Off"); 541 return -EINVAL;
516 out += sprintf(out, "Timer: %u s\n", (devpath.flags & PF_TIMER) ? (1 << (devpath.flags & PF_TIMER)) : 0); 542
543 /* Current flags are stored in primary boot path entry */
544 pathentry = &pdcspath_entry_primary;
545
546 read_lock(&pathentry->rw_lock);
547 out += sprintf(out, "%s\n", (pathentry->devpath.flags & knob) ?
548 "On" : "Off");
549 read_unlock(&pathentry->rw_lock);
550
551 return out - buf;
552}
553
554/**
555 * pdcs_autoboot_read - Stable Storage autoboot flag output.
556 * @entry: An allocated and populated subsytem struct. We don't use it tho.
557 * @buf: The output buffer to write to.
558 */
559static inline ssize_t
560pdcs_autoboot_read(struct subsystem *entry, char *buf)
561{
562 return pdcs_auto_read(entry, buf, PF_AUTOBOOT);
563}
564
565/**
566 * pdcs_autosearch_read - Stable Storage autoboot flag output.
567 * @entry: An allocated and populated subsytem struct. We don't use it tho.
568 * @buf: The output buffer to write to.
569 */
570static inline ssize_t
571pdcs_autosearch_read(struct subsystem *entry, char *buf)
572{
573 return pdcs_auto_read(entry, buf, PF_AUTOSEARCH);
574}
575
576/**
577 * pdcs_timer_read - Stable Storage timer count output (in seconds).
578 * @entry: An allocated and populated subsytem struct. We don't use it tho.
579 * @buf: The output buffer to write to.
580 *
581 * The value of the timer field correponds to a number of seconds in powers of 2.
582 */
583static ssize_t
584pdcs_timer_read(struct subsystem *entry, char *buf)
585{
586 char *out = buf;
587 struct pdcspath_entry *pathentry;
588
589 if (!entry || !buf)
590 return -EINVAL;
591
592 /* Current flags are stored in primary boot path entry */
593 pathentry = &pdcspath_entry_primary;
594
595 /* print the timer value in seconds */
596 read_lock(&pathentry->rw_lock);
597 out += sprintf(out, "%u\n", (pathentry->devpath.flags & PF_TIMER) ?
598 (1 << (pathentry->devpath.flags & PF_TIMER)) : 0);
599 read_unlock(&pathentry->rw_lock);
600
601 return out - buf;
602}
603
604/**
605 * pdcs_osid_read - Stable Storage OS ID register output.
606 * @entry: An allocated and populated subsytem struct. We don't use it tho.
607 * @buf: The output buffer to write to.
608 */
609static ssize_t
610pdcs_osid_read(struct subsystem *entry, char *buf)
611{
612 char *out = buf;
613 __u32 result;
614 char *tmpstr = NULL;
615
616 if (!entry || !buf)
617 return -EINVAL;
517 618
518 /* get OSID */ 619 /* get OSID */
519 if (pdc_stable_read(PDCS_ADDR_OSID, &result, sizeof(result)) != PDC_OK) 620 if (pdc_stable_read(PDCS_ADDR_OSID, &result, sizeof(result)) != PDC_OK)
@@ -529,13 +630,31 @@ pdcs_info_read(struct subsystem *entry, char *buf)
529 case 0x0005: tmpstr = "Novell Netware dependent data"; break; 630 case 0x0005: tmpstr = "Novell Netware dependent data"; break;
530 default: tmpstr = "Unknown"; break; 631 default: tmpstr = "Unknown"; break;
531 } 632 }
532 out += sprintf(out, "OS ID: %s (0x%.4x)\n", tmpstr, (result >> 16)); 633 out += sprintf(out, "%s (0x%.4x)\n", tmpstr, (result >> 16));
634
635 return out - buf;
636}
637
638/**
639 * pdcs_fastsize_read - Stable Storage FastSize register output.
640 * @entry: An allocated and populated subsytem struct. We don't use it tho.
641 * @buf: The output buffer to write to.
642 *
643 * This register holds the amount of system RAM to be tested during boot sequence.
644 */
645static ssize_t
646pdcs_fastsize_read(struct subsystem *entry, char *buf)
647{
648 char *out = buf;
649 __u32 result;
650
651 if (!entry || !buf)
652 return -EINVAL;
533 653
534 /* get fast-size */ 654 /* get fast-size */
535 if (pdc_stable_read(PDCS_ADDR_FSIZ, &result, sizeof(result)) != PDC_OK) 655 if (pdc_stable_read(PDCS_ADDR_FSIZ, &result, sizeof(result)) != PDC_OK)
536 return -EIO; 656 return -EIO;
537 657
538 out += sprintf(out, "Memory tested: ");
539 if ((result & 0x0F) < 0x0E) 658 if ((result & 0x0F) < 0x0E)
540 out += sprintf(out, "%d kB", (1<<(result & 0x0F))*256); 659 out += sprintf(out, "%d kB", (1<<(result & 0x0F))*256);
541 else 660 else
@@ -546,22 +665,18 @@ pdcs_info_read(struct subsystem *entry, char *buf)
546} 665}
547 666
548/** 667/**
549 * pdcs_info_write - This function handles boot flag modifying. 668 * pdcs_auto_write - This function handles autoboot/search flag modifying.
550 * @entry: An allocated and populated subsytem struct. We don't use it tho. 669 * @entry: An allocated and populated subsytem struct. We don't use it tho.
551 * @buf: The input buffer to read from. 670 * @buf: The input buffer to read from.
552 * @count: The number of bytes to be read. 671 * @count: The number of bytes to be read.
672 * @knob: The PF_AUTOBOOT or PF_AUTOSEARCH flag
553 * 673 *
554 * We will call this function to change the current boot flags. 674 * We will call this function to change the current autoboot flag.
555 * We expect a precise syntax: 675 * We expect a precise syntax:
556 * \"n n\" (n == 0 or 1) to toggle respectively AutoBoot and AutoSearch 676 * \"n\" (n == 0 or 1) to toggle AutoBoot Off or On
557 *
558 * As of now there is no incentive on my side to provide more "knobs" to that
559 * interface, since modifying the rest of the data is pretty meaningless when
560 * the machine is running and for the expected use of that facility, such as
561 * PALO setting up the boot disk when installing a Linux distribution...
562 */ 677 */
563static ssize_t 678static ssize_t
564pdcs_info_write(struct subsystem *entry, const char *buf, size_t count) 679pdcs_auto_write(struct subsystem *entry, const char *buf, size_t count, int knob)
565{ 680{
566 struct pdcspath_entry *pathentry; 681 struct pdcspath_entry *pathentry;
567 unsigned char flags; 682 unsigned char flags;
@@ -582,7 +697,9 @@ pdcs_info_write(struct subsystem *entry, const char *buf, size_t count)
582 pathentry = &pdcspath_entry_primary; 697 pathentry = &pdcspath_entry_primary;
583 698
584 /* Be nice to the existing flag record */ 699 /* Be nice to the existing flag record */
700 read_lock(&pathentry->rw_lock);
585 flags = pathentry->devpath.flags; 701 flags = pathentry->devpath.flags;
702 read_unlock(&pathentry->rw_lock);
586 703
587 DPRINTK("%s: flags before: 0x%X\n", __func__, flags); 704 DPRINTK("%s: flags before: 0x%X\n", __func__, flags);
588 705
@@ -595,50 +712,85 @@ pdcs_info_write(struct subsystem *entry, const char *buf, size_t count)
595 if ((c != 0) && (c != 1)) 712 if ((c != 0) && (c != 1))
596 goto parse_error; 713 goto parse_error;
597 if (c == 0) 714 if (c == 0)
598 flags &= ~PF_AUTOBOOT; 715 flags &= ~knob;
599 else 716 else
600 flags |= PF_AUTOBOOT; 717 flags |= knob;
601
602 if (*temp++ != ' ')
603 goto parse_error;
604
605 c = *temp++ - '0';
606 if ((c != 0) && (c != 1))
607 goto parse_error;
608 if (c == 0)
609 flags &= ~PF_AUTOSEARCH;
610 else
611 flags |= PF_AUTOSEARCH;
612 718
613 DPRINTK("%s: flags after: 0x%X\n", __func__, flags); 719 DPRINTK("%s: flags after: 0x%X\n", __func__, flags);
614 720
615 /* So far so good, let's get in deep */ 721 /* So far so good, let's get in deep */
722 write_lock(&pathentry->rw_lock);
616 723
617 /* Change the path entry flags first */ 724 /* Change the path entry flags first */
618 pathentry->devpath.flags = flags; 725 pathentry->devpath.flags = flags;
619 726
620 /* Now, dive in. Write back to the hardware */ 727 /* Now, dive in. Write back to the hardware */
621 WARN_ON(pdcspath_store(pathentry)); /* this warn should *NEVER* happen */ 728 pdcspath_store(pathentry);
729 write_unlock(&pathentry->rw_lock);
622 730
623 printk(KERN_INFO "PDC Stable Storage: changed flags to \"%s\"\n", buf); 731 printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" to \"%s\"\n",
732 (knob & PF_AUTOBOOT) ? "autoboot" : "autosearch",
733 (flags & knob) ? "On" : "Off");
624 734
625 return count; 735 return count;
626 736
627parse_error: 737parse_error:
628 printk(KERN_WARNING "%s: Parse error: expect \"n n\" (n == 0 or 1) for AB and AS\n", __func__); 738 printk(KERN_WARNING "%s: Parse error: expect \"n\" (n == 0 or 1)\n", __func__);
629 return -EINVAL; 739 return -EINVAL;
630} 740}
631 741
632/* The last attribute (the 'root' one actually) with all remaining data. */ 742/**
633static PDCS_ATTR(info, 0600, pdcs_info_read, pdcs_info_write); 743 * pdcs_autoboot_write - This function handles autoboot flag modifying.
744 * @entry: An allocated and populated subsytem struct. We don't use it tho.
745 * @buf: The input buffer to read from.
746 * @count: The number of bytes to be read.
747 *
748 * We will call this function to change the current boot flags.
749 * We expect a precise syntax:
750 * \"n\" (n == 0 or 1) to toggle AutoSearch Off or On
751 */
752static inline ssize_t
753pdcs_autoboot_write(struct subsystem *entry, const char *buf, size_t count)
754{
755 return pdcs_auto_write(entry, buf, count, PF_AUTOBOOT);
756}
757
758/**
759 * pdcs_autosearch_write - This function handles autosearch flag modifying.
760 * @entry: An allocated and populated subsytem struct. We don't use it tho.
761 * @buf: The input buffer to read from.
762 * @count: The number of bytes to be read.
763 *
764 * We will call this function to change the current boot flags.
765 * We expect a precise syntax:
766 * \"n\" (n == 0 or 1) to toggle AutoSearch Off or On
767 */
768static inline ssize_t
769pdcs_autosearch_write(struct subsystem *entry, const char *buf, size_t count)
770{
771 return pdcs_auto_write(entry, buf, count, PF_AUTOSEARCH);
772}
773
774/* The remaining attributes. */
775static PDCS_ATTR(size, 0444, pdcs_size_read, NULL);
776static PDCS_ATTR(autoboot, 0644, pdcs_autoboot_read, pdcs_autoboot_write);
777static PDCS_ATTR(autosearch, 0644, pdcs_autosearch_read, pdcs_autosearch_write);
778static PDCS_ATTR(timer, 0444, pdcs_timer_read, NULL);
779static PDCS_ATTR(osid, 0400, pdcs_osid_read, NULL);
780static PDCS_ATTR(fastsize, 0400, pdcs_fastsize_read, NULL);
634 781
635static struct subsys_attribute *pdcs_subsys_attrs[] = { 782static struct subsys_attribute *pdcs_subsys_attrs[] = {
636 &pdcs_attr_info, 783 &pdcs_attr_size,
637 NULL, /* maybe more in the future? */ 784 &pdcs_attr_autoboot,
785 &pdcs_attr_autosearch,
786 &pdcs_attr_timer,
787 &pdcs_attr_osid,
788 &pdcs_attr_fastsize,
789 NULL,
638}; 790};
639 791
640static decl_subsys(paths, &ktype_pdcspath, NULL); 792static decl_subsys(paths, &ktype_pdcspath, NULL);
641static decl_subsys(pdc, NULL, NULL); 793static decl_subsys(stable, NULL, NULL);
642 794
643/** 795/**
644 * pdcs_register_pathentries - Prepares path entries kobjects for sysfs usage. 796 * pdcs_register_pathentries - Prepares path entries kobjects for sysfs usage.
@@ -656,8 +808,16 @@ pdcs_register_pathentries(void)
656 struct pdcspath_entry *entry; 808 struct pdcspath_entry *entry;
657 int err; 809 int err;
658 810
811 /* Initialize the entries rw_lock before anything else */
812 for (i = 0; (entry = pdcspath_entries[i]); i++)
813 rwlock_init(&entry->rw_lock);
814
659 for (i = 0; (entry = pdcspath_entries[i]); i++) { 815 for (i = 0; (entry = pdcspath_entries[i]); i++) {
660 if (pdcspath_fetch(entry) < 0) 816 write_lock(&entry->rw_lock);
817 err = pdcspath_fetch(entry);
818 write_unlock(&entry->rw_lock);
819
820 if (err < 0)
661 continue; 821 continue;
662 822
663 if ((err = kobject_set_name(&entry->kobj, "%s", entry->name))) 823 if ((err = kobject_set_name(&entry->kobj, "%s", entry->name)))
@@ -667,13 +827,14 @@ pdcs_register_pathentries(void)
667 return err; 827 return err;
668 828
669 /* kobject is now registered */ 829 /* kobject is now registered */
830 write_lock(&entry->rw_lock);
670 entry->ready = 2; 831 entry->ready = 2;
671 832
672 if (!entry->dev)
673 continue;
674
675 /* Add a nice symlink to the real device */ 833 /* Add a nice symlink to the real device */
676 sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); 834 if (entry->dev)
835 sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device");
836
837 write_unlock(&entry->rw_lock);
677 } 838 }
678 839
679 return 0; 840 return 0;
@@ -688,14 +849,17 @@ pdcs_unregister_pathentries(void)
688 unsigned short i; 849 unsigned short i;
689 struct pdcspath_entry *entry; 850 struct pdcspath_entry *entry;
690 851
691 for (i = 0; (entry = pdcspath_entries[i]); i++) 852 for (i = 0; (entry = pdcspath_entries[i]); i++) {
853 read_lock(&entry->rw_lock);
692 if (entry->ready >= 2) 854 if (entry->ready >= 2)
693 kobject_unregister(&entry->kobj); 855 kobject_unregister(&entry->kobj);
856 read_unlock(&entry->rw_lock);
857 }
694} 858}
695 859
696/* 860/*
697 * For now we register the pdc subsystem with the firmware subsystem 861 * For now we register the stable subsystem with the firmware subsystem
698 * and the paths subsystem with the pdc subsystem 862 * and the paths subsystem with the stable subsystem
699 */ 863 */
700static int __init 864static int __init
701pdc_stable_init(void) 865pdc_stable_init(void)
@@ -707,19 +871,23 @@ pdc_stable_init(void)
707 if (pdc_stable_get_size(&pdcs_size) != PDC_OK) 871 if (pdc_stable_get_size(&pdcs_size) != PDC_OK)
708 return -ENODEV; 872 return -ENODEV;
709 873
710 printk(KERN_INFO "PDC Stable Storage facility v%s\n", PDCS_VERSION); 874 /* make sure we have enough data */
875 if (pdcs_size < 96)
876 return -ENODATA;
877
878 printk(KERN_INFO PDCS_PREFIX " facility v%s\n", PDCS_VERSION);
711 879
712 /* For now we'll register the pdc subsys within this driver */ 880 /* For now we'll register the stable subsys within this driver */
713 if ((rc = firmware_register(&pdc_subsys))) 881 if ((rc = firmware_register(&stable_subsys)))
714 goto fail_firmreg; 882 goto fail_firmreg;
715 883
716 /* Don't forget the info entry */ 884 /* Don't forget the root entries */
717 for (i = 0; (attr = pdcs_subsys_attrs[i]) && !error; i++) 885 for (i = 0; (attr = pdcs_subsys_attrs[i]) && !error; i++)
718 if (attr->show) 886 if (attr->show)
719 error = subsys_create_file(&pdc_subsys, attr); 887 error = subsys_create_file(&stable_subsys, attr);
720 888
721 /* register the paths subsys as a subsystem of pdc subsys */ 889 /* register the paths subsys as a subsystem of stable subsys */
722 kset_set_kset_s(&paths_subsys, pdc_subsys); 890 kset_set_kset_s(&paths_subsys, stable_subsys);
723 if ((rc= subsystem_register(&paths_subsys))) 891 if ((rc= subsystem_register(&paths_subsys)))
724 goto fail_subsysreg; 892 goto fail_subsysreg;
725 893
@@ -734,10 +902,10 @@ fail_pdcsreg:
734 subsystem_unregister(&paths_subsys); 902 subsystem_unregister(&paths_subsys);
735 903
736fail_subsysreg: 904fail_subsysreg:
737 firmware_unregister(&pdc_subsys); 905 firmware_unregister(&stable_subsys);
738 906
739fail_firmreg: 907fail_firmreg:
740 printk(KERN_INFO "PDC Stable Storage bailing out\n"); 908 printk(KERN_INFO PDCS_PREFIX " bailing out\n");
741 return rc; 909 return rc;
742} 910}
743 911
@@ -747,7 +915,7 @@ pdc_stable_exit(void)
747 pdcs_unregister_pathentries(); 915 pdcs_unregister_pathentries();
748 subsystem_unregister(&paths_subsys); 916 subsystem_unregister(&paths_subsys);
749 917
750 firmware_unregister(&pdc_subsys); 918 firmware_unregister(&stable_subsys);
751} 919}
752 920
753 921
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index c85653f315aa..52f265e97729 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -2064,14 +2064,13 @@ sba_driver_callback(struct parisc_device *dev)
2064 printk(KERN_INFO "%s found %s at 0x%lx\n", 2064 printk(KERN_INFO "%s found %s at 0x%lx\n",
2065 MODULE_NAME, version, dev->hpa.start); 2065 MODULE_NAME, version, dev->hpa.start);
2066 2066
2067 sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL); 2067 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
2068 if (!sba_dev) { 2068 if (!sba_dev) {
2069 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n"); 2069 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
2070 return -ENOMEM; 2070 return -ENOMEM;
2071 } 2071 }
2072 2072
2073 parisc_set_drvdata(dev, sba_dev); 2073 parisc_set_drvdata(dev, sba_dev);
2074 memset(sba_dev, 0, sizeof(struct sba_device));
2075 2074
2076 for(i=0; i<MAX_IOC; i++) 2075 for(i=0; i<MAX_IOC; i++)
2077 spin_lock_init(&(sba_dev->ioc[i].res_lock)); 2076 spin_lock_init(&(sba_dev->ioc[i].res_lock));
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index d14888e149bb..ba971fecd0d8 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -89,6 +89,9 @@ static struct superio_device sio_dev;
89#define DBG_INIT(x...) 89#define DBG_INIT(x...)
90#endif 90#endif
91 91
92#define SUPERIO "SuperIO"
93#define PFX SUPERIO ": "
94
92static irqreturn_t 95static irqreturn_t
93superio_interrupt(int parent_irq, void *devp, struct pt_regs *regs) 96superio_interrupt(int parent_irq, void *devp, struct pt_regs *regs)
94{ 97{
@@ -117,7 +120,7 @@ superio_interrupt(int parent_irq, void *devp, struct pt_regs *regs)
117 local_irq = results & 0x0f; 120 local_irq = results & 0x0f;
118 121
119 if (local_irq == 2 || local_irq > 7) { 122 if (local_irq == 2 || local_irq > 7) {
120 printk(KERN_ERR "SuperIO: slave interrupted!\n"); 123 printk(KERN_ERR PFX "slave interrupted!\n");
121 return IRQ_HANDLED; 124 return IRQ_HANDLED;
122 } 125 }
123 126
@@ -128,7 +131,7 @@ superio_interrupt(int parent_irq, void *devp, struct pt_regs *regs)
128 outb(OCW3_ISR,IC_PIC1+0); 131 outb(OCW3_ISR,IC_PIC1+0);
129 results = inb(IC_PIC1+0); 132 results = inb(IC_PIC1+0);
130 if ((results & 0x80) == 0) { /* if ISR7 not set: spurious */ 133 if ((results & 0x80) == 0) { /* if ISR7 not set: spurious */
131 printk(KERN_WARNING "SuperIO: spurious interrupt!\n"); 134 printk(KERN_WARNING PFX "spurious interrupt!\n");
132 return IRQ_HANDLED; 135 return IRQ_HANDLED;
133 } 136 }
134 } 137 }
@@ -163,27 +166,27 @@ superio_init(struct pci_dev *pcidev)
163 /* ...then properly fixup the USB to point at suckyio PIC */ 166 /* ...then properly fixup the USB to point at suckyio PIC */
164 sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev); 167 sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev);
165 168
166 printk(KERN_INFO "SuperIO: Found NS87560 Legacy I/O device at %s (IRQ %i) \n", 169 printk(KERN_INFO PFX "Found NS87560 Legacy I/O device at %s (IRQ %i) \n",
167 pci_name(pdev), pdev->irq); 170 pci_name(pdev), pdev->irq);
168 171
169 pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base); 172 pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base);
170 sio->sp1_base &= ~1; 173 sio->sp1_base &= ~1;
171 printk (KERN_INFO "SuperIO: Serial port 1 at 0x%x\n", sio->sp1_base); 174 printk(KERN_INFO PFX "Serial port 1 at 0x%x\n", sio->sp1_base);
172 175
173 pci_read_config_dword (pdev, SIO_SP2BAR, &sio->sp2_base); 176 pci_read_config_dword (pdev, SIO_SP2BAR, &sio->sp2_base);
174 sio->sp2_base &= ~1; 177 sio->sp2_base &= ~1;
175 printk (KERN_INFO "SuperIO: Serial port 2 at 0x%x\n", sio->sp2_base); 178 printk(KERN_INFO PFX "Serial port 2 at 0x%x\n", sio->sp2_base);
176 179
177 pci_read_config_dword (pdev, SIO_PPBAR, &sio->pp_base); 180 pci_read_config_dword (pdev, SIO_PPBAR, &sio->pp_base);
178 sio->pp_base &= ~1; 181 sio->pp_base &= ~1;
179 printk (KERN_INFO "SuperIO: Parallel port at 0x%x\n", sio->pp_base); 182 printk(KERN_INFO PFX "Parallel port at 0x%x\n", sio->pp_base);
180 183
181 pci_read_config_dword (pdev, SIO_FDCBAR, &sio->fdc_base); 184 pci_read_config_dword (pdev, SIO_FDCBAR, &sio->fdc_base);
182 sio->fdc_base &= ~1; 185 sio->fdc_base &= ~1;
183 printk (KERN_INFO "SuperIO: Floppy controller at 0x%x\n", sio->fdc_base); 186 printk(KERN_INFO PFX "Floppy controller at 0x%x\n", sio->fdc_base);
184 pci_read_config_dword (pdev, SIO_ACPIBAR, &sio->acpi_base); 187 pci_read_config_dword (pdev, SIO_ACPIBAR, &sio->acpi_base);
185 sio->acpi_base &= ~1; 188 sio->acpi_base &= ~1;
186 printk (KERN_INFO "SuperIO: ACPI at 0x%x\n", sio->acpi_base); 189 printk(KERN_INFO PFX "ACPI at 0x%x\n", sio->acpi_base);
187 190
188 request_region (IC_PIC1, 0x1f, "pic1"); 191 request_region (IC_PIC1, 0x1f, "pic1");
189 request_region (IC_PIC2, 0x1f, "pic2"); 192 request_region (IC_PIC2, 0x1f, "pic2");
@@ -263,14 +266,14 @@ superio_init(struct pci_dev *pcidev)
263 /* Setup USB power regulation */ 266 /* Setup USB power regulation */
264 outb(1, sio->acpi_base + USB_REG_CR); 267 outb(1, sio->acpi_base + USB_REG_CR);
265 if (inb(sio->acpi_base + USB_REG_CR) & 1) 268 if (inb(sio->acpi_base + USB_REG_CR) & 1)
266 printk(KERN_INFO "SuperIO: USB regulator enabled\n"); 269 printk(KERN_INFO PFX "USB regulator enabled\n");
267 else 270 else
268 printk(KERN_ERR "USB regulator not initialized!\n"); 271 printk(KERN_ERR PFX "USB regulator not initialized!\n");
269 272
270 if (request_irq(pdev->irq, superio_interrupt, SA_INTERRUPT, 273 if (request_irq(pdev->irq, superio_interrupt, SA_INTERRUPT,
271 "SuperIO", (void *)sio)) { 274 SUPERIO, (void *)sio)) {
272 275
273 printk(KERN_ERR "SuperIO: could not get irq\n"); 276 printk(KERN_ERR PFX "could not get irq\n");
274 BUG(); 277 BUG();
275 return; 278 return;
276 } 279 }
@@ -284,7 +287,7 @@ static void superio_disable_irq(unsigned int irq)
284 u8 r8; 287 u8 r8;
285 288
286 if ((irq < 1) || (irq == 2) || (irq > 7)) { 289 if ((irq < 1) || (irq == 2) || (irq > 7)) {
287 printk(KERN_ERR "SuperIO: Illegal irq number.\n"); 290 printk(KERN_ERR PFX "Illegal irq number.\n");
288 BUG(); 291 BUG();
289 return; 292 return;
290 } 293 }
@@ -301,7 +304,7 @@ static void superio_enable_irq(unsigned int irq)
301 u8 r8; 304 u8 r8;
302 305
303 if ((irq < 1) || (irq == 2) || (irq > 7)) { 306 if ((irq < 1) || (irq == 2) || (irq > 7)) {
304 printk(KERN_ERR "SuperIO: Illegal irq number (%d).\n", irq); 307 printk(KERN_ERR PFX "Illegal irq number (%d).\n", irq);
305 BUG(); 308 BUG();
306 return; 309 return;
307 } 310 }
@@ -319,7 +322,7 @@ static unsigned int superio_startup_irq(unsigned int irq)
319} 322}
320 323
321static struct hw_interrupt_type superio_interrupt_type = { 324static struct hw_interrupt_type superio_interrupt_type = {
322 .typename = "SuperIO", 325 .typename = SUPERIO,
323 .startup = superio_startup_irq, 326 .startup = superio_startup_irq,
324 .shutdown = superio_disable_irq, 327 .shutdown = superio_disable_irq,
325 .enable = superio_enable_irq, 328 .enable = superio_enable_irq,
@@ -413,7 +416,7 @@ static void __devinit superio_serial_init(void)
413 416
414 retval = early_serial_setup(&serial[0]); 417 retval = early_serial_setup(&serial[0]);
415 if (retval < 0) { 418 if (retval < 0) {
416 printk(KERN_WARNING "SuperIO: Register Serial #0 failed.\n"); 419 printk(KERN_WARNING PFX "Register Serial #0 failed.\n");
417 return; 420 return;
418 } 421 }
419 422
@@ -423,7 +426,7 @@ static void __devinit superio_serial_init(void)
423 retval = early_serial_setup(&serial[1]); 426 retval = early_serial_setup(&serial[1]);
424 427
425 if (retval < 0) 428 if (retval < 0)
426 printk(KERN_WARNING "SuperIO: Register Serial #1 failed.\n"); 429 printk(KERN_WARNING PFX "Register Serial #1 failed.\n");
427#endif /* CONFIG_SERIAL_8250 */ 430#endif /* CONFIG_SERIAL_8250 */
428} 431}
429 432
@@ -437,7 +440,7 @@ static void __devinit superio_parport_init(void)
437 PARPORT_DMA_NONE /* dma */, 440 PARPORT_DMA_NONE /* dma */,
438 NULL /*struct pci_dev* */) ) 441 NULL /*struct pci_dev* */) )
439 442
440 printk(KERN_WARNING "SuperIO: Probing parallel port failed.\n"); 443 printk(KERN_WARNING PFX "Probing parallel port failed.\n");
441#endif /* CONFIG_PARPORT_PC */ 444#endif /* CONFIG_PARPORT_PC */
442} 445}
443 446
@@ -499,7 +502,7 @@ static struct pci_device_id superio_tbl[] = {
499}; 502};
500 503
501static struct pci_driver superio_driver = { 504static struct pci_driver superio_driver = {
502 .name = "SuperIO", 505 .name = SUPERIO,
503 .id_table = superio_tbl, 506 .id_table = superio_tbl,
504 .probe = superio_probe, 507 .probe = superio_probe,
505}; 508};
diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c
index 17dce2adf7fe..813c2c24ab1e 100644
--- a/drivers/parisc/wax.c
+++ b/drivers/parisc/wax.c
@@ -76,7 +76,7 @@ wax_init_chip(struct parisc_device *dev)
76 struct gsc_irq gsc_irq; 76 struct gsc_irq gsc_irq;
77 int ret; 77 int ret;
78 78
79 wax = kmalloc(sizeof(*wax), GFP_KERNEL); 79 wax = kzalloc(sizeof(*wax), GFP_KERNEL);
80 if (!wax) 80 if (!wax)
81 return -ENOMEM; 81 return -ENOMEM;
82 82
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index f605dea57224..f63c387976cf 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -90,6 +90,15 @@ config PARPORT_ARC
90 depends on ARM && PARPORT 90 depends on ARM && PARPORT
91 select PARPORT_NOT_PC 91 select PARPORT_NOT_PC
92 92
93config PARPORT_IP32
94 tristate "SGI IP32 builtin port (EXPERIMENTAL)"
95 depends on SGI_IP32 && PARPORT && EXPERIMENTAL
96 select PARPORT_NOT_PC
97 help
98 Say Y here if you need support for the parallel port on
99 SGI O2 machines. This code is also available as a module (say M),
100 called parport_ip32. If in doubt, saying N is the safe plan.
101
93config PARPORT_AMIGA 102config PARPORT_AMIGA
94 tristate "Amiga builtin port" 103 tristate "Amiga builtin port"
95 depends on AMIGA && PARPORT 104 depends on AMIGA && PARPORT
diff --git a/drivers/parport/Makefile b/drivers/parport/Makefile
index 5372212bb9d9..a19de35f8de2 100644
--- a/drivers/parport/Makefile
+++ b/drivers/parport/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_PARPORT_MFC3) += parport_mfc3.o
17obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o 17obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o
18obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o 18obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o
19obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o 19obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o
20obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 5b887ba5aaf9..690b239ad3a7 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -61,10 +61,10 @@ static void timeout_waiting_on_port (unsigned long cookie)
61 * set to zero, it returns immediately. 61 * set to zero, it returns immediately.
62 * 62 *
63 * If an interrupt occurs before the timeout period elapses, this 63 * If an interrupt occurs before the timeout period elapses, this
64 * function returns one immediately. If it times out, it returns 64 * function returns zero immediately. If it times out, it returns
65 * a value greater than zero. An error code less than zero 65 * one. An error code less than zero indicates an error (most
66 * indicates an error (most likely a pending signal), and the 66 * likely a pending signal), and the calling code should finish
67 * calling code should finish what it's doing as soon as it can. 67 * what it's doing as soon as it can.
68 */ 68 */
69 69
70int parport_wait_event (struct parport *port, signed long timeout) 70int parport_wait_event (struct parport *port, signed long timeout)
@@ -110,7 +110,7 @@ int parport_wait_event (struct parport *port, signed long timeout)
110 * 110 *
111 * If the status lines take on the desired values before the 111 * If the status lines take on the desired values before the
112 * timeout period elapses, parport_poll_peripheral() returns zero 112 * timeout period elapses, parport_poll_peripheral() returns zero
113 * immediately. A zero return value greater than zero indicates 113 * immediately. A return value greater than zero indicates
114 * a timeout. An error code (less than zero) indicates an error, 114 * a timeout. An error code (less than zero) indicates an error,
115 * most likely a signal that arrived, and the caller should 115 * most likely a signal that arrived, and the caller should
116 * finish what it is doing as soon as possible. 116 * finish what it is doing as soon as possible.
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index fde29a75f888..1de52d9febf9 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -249,7 +249,7 @@ struct parport *__devinit parport_gsc_probe_port (unsigned long base,
249 struct parport tmp; 249 struct parport tmp;
250 struct parport *p = &tmp; 250 struct parport *p = &tmp;
251 251
252 priv = kmalloc (sizeof (struct parport_gsc_private), GFP_KERNEL); 252 priv = kzalloc (sizeof (struct parport_gsc_private), GFP_KERNEL);
253 if (!priv) { 253 if (!priv) {
254 printk (KERN_DEBUG "parport (0x%lx): no memory!\n", base); 254 printk (KERN_DEBUG "parport (0x%lx): no memory!\n", base);
255 return NULL; 255 return NULL;
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
new file mode 100644
index 000000000000..46e06e596d73
--- /dev/null
+++ b/drivers/parport/parport_ip32.c
@@ -0,0 +1,2253 @@
1/* Low-level parallel port routines for built-in port on SGI IP32
2 *
3 * Author: Arnaud Giersch <arnaud.giersch@free.fr>
4 *
5 * Based on parport_pc.c by
6 * Phil Blundell, Tim Waugh, Jose Renau, David Campbell,
7 * Andrea Arcangeli, et al.
8 *
9 * Thanks to Ilya A. Volynets-Evenbakh for his help.
10 *
11 * Copyright (C) 2005, 2006 Arnaud Giersch.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but WITHOUT
19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * more details.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc., 59
25 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 */
27
28/* Current status:
29 *
30 * Basic SPP and PS2 modes are supported.
31 * Support for parallel port IRQ is present.
32 * Hardware SPP (a.k.a. compatibility), EPP, and ECP modes are
33 * supported.
34 * SPP/ECP FIFO can be driven in PIO or DMA mode. PIO mode can work with
35 * or without interrupt support.
36 *
37 * Hardware ECP mode is not fully implemented (ecp_read_data and
38 * ecp_write_addr are actually missing).
39 *
40 * To do:
41 *
42 * Fully implement ECP mode.
43 * EPP and ECP mode need to be tested. I currently do not own any
44 * peripheral supporting these extended mode, and cannot test them.
45 * If DMA mode works well, decide if support for PIO FIFO modes should be
46 * dropped.
47 * Use the io{read,write} family functions when they become available in
48 * the linux-mips.org tree. Note: the MIPS specific functions readsb()
49 * and writesb() are to be translated by ioread8_rep() and iowrite8_rep()
50 * respectively.
51 */
52
53/* The built-in parallel port on the SGI 02 workstation (a.k.a. IP32) is an
54 * IEEE 1284 parallel port driven by a Texas Instrument TL16PIR552PH chip[1].
55 * This chip supports SPP, bidirectional, EPP and ECP modes. It has a 16 byte
56 * FIFO buffer and supports DMA transfers.
57 *
58 * [1] http://focus.ti.com/docs/prod/folders/print/tl16pir552.html
59 *
60 * Theoretically, we could simply use the parport_pc module. It is however
61 * not so simple. The parport_pc code assumes that the parallel port
62 * registers are port-mapped. On the O2, they are memory-mapped.
63 * Furthermore, each register is replicated on 256 consecutive addresses (as
64 * it is for the built-in serial ports on the same chip).
65 */
66
67/*--- Some configuration defines ---------------------------------------*/
68
69/* DEBUG_PARPORT_IP32
70 * 0 disable debug
71 * 1 standard level: pr_debug1 is enabled
72 * 2 parport_ip32_dump_state is enabled
73 * >=3 verbose level: pr_debug is enabled
74 */
75#if !defined(DEBUG_PARPORT_IP32)
76# define DEBUG_PARPORT_IP32 0 /* 0 (disabled) for production */
77#endif
78
79/*----------------------------------------------------------------------*/
80
81/* Setup DEBUG macros. This is done before any includes, just in case we
82 * activate pr_debug() with DEBUG_PARPORT_IP32 >= 3.
83 */
84#if DEBUG_PARPORT_IP32 == 1
85# warning DEBUG_PARPORT_IP32 == 1
86#elif DEBUG_PARPORT_IP32 == 2
87# warning DEBUG_PARPORT_IP32 == 2
88#elif DEBUG_PARPORT_IP32 >= 3
89# warning DEBUG_PARPORT_IP32 >= 3
90# if !defined(DEBUG)
91# define DEBUG /* enable pr_debug() in kernel.h */
92# endif
93#endif
94
95#include <linux/completion.h>
96#include <linux/delay.h>
97#include <linux/dma-mapping.h>
98#include <linux/err.h>
99#include <linux/init.h>
100#include <linux/interrupt.h>
101#include <linux/jiffies.h>
102#include <linux/kernel.h>
103#include <linux/module.h>
104#include <linux/parport.h>
105#include <linux/sched.h>
106#include <linux/spinlock.h>
107#include <linux/stddef.h>
108#include <linux/types.h>
109#include <asm/io.h>
110#include <asm/ip32/ip32_ints.h>
111#include <asm/ip32/mace.h>
112
113/*--- Global variables -------------------------------------------------*/
114
115/* Verbose probing on by default for debugging. */
116#if DEBUG_PARPORT_IP32 >= 1
117# define DEFAULT_VERBOSE_PROBING 1
118#else
119# define DEFAULT_VERBOSE_PROBING 0
120#endif
121
122/* Default prefix for printk */
123#define PPIP32 "parport_ip32: "
124
125/*
126 * These are the module parameters:
127 * @features: bit mask of features to enable/disable
128 * (all enabled by default)
129 * @verbose_probing: log chit-chat during initialization
130 */
131#define PARPORT_IP32_ENABLE_IRQ (1U << 0)
132#define PARPORT_IP32_ENABLE_DMA (1U << 1)
133#define PARPORT_IP32_ENABLE_SPP (1U << 2)
134#define PARPORT_IP32_ENABLE_EPP (1U << 3)
135#define PARPORT_IP32_ENABLE_ECP (1U << 4)
136static unsigned int features = ~0U;
137static int verbose_probing = DEFAULT_VERBOSE_PROBING;
138
139/* We do not support more than one port. */
140static struct parport *this_port = NULL;
141
142/* Timing constants for FIFO modes. */
143#define FIFO_NFAULT_TIMEOUT 100 /* milliseconds */
144#define FIFO_POLLING_INTERVAL 50 /* microseconds */
145
146/*--- I/O register definitions -----------------------------------------*/
147
148/**
149 * struct parport_ip32_regs - virtual addresses of parallel port registers
150 * @data: Data Register
151 * @dsr: Device Status Register
152 * @dcr: Device Control Register
153 * @eppAddr: EPP Address Register
154 * @eppData0: EPP Data Register 0
155 * @eppData1: EPP Data Register 1
156 * @eppData2: EPP Data Register 2
157 * @eppData3: EPP Data Register 3
158 * @ecpAFifo: ECP Address FIFO
159 * @fifo: General FIFO register. The same address is used for:
160 * - cFifo, the Parallel Port DATA FIFO
161 * - ecpDFifo, the ECP Data FIFO
162 * - tFifo, the ECP Test FIFO
163 * @cnfgA: Configuration Register A
164 * @cnfgB: Configuration Register B
165 * @ecr: Extended Control Register
166 */
167struct parport_ip32_regs {
168 void __iomem *data;
169 void __iomem *dsr;
170 void __iomem *dcr;
171 void __iomem *eppAddr;
172 void __iomem *eppData0;
173 void __iomem *eppData1;
174 void __iomem *eppData2;
175 void __iomem *eppData3;
176 void __iomem *ecpAFifo;
177 void __iomem *fifo;
178 void __iomem *cnfgA;
179 void __iomem *cnfgB;
180 void __iomem *ecr;
181};
182
183/* Device Status Register */
184#define DSR_nBUSY (1U << 7) /* PARPORT_STATUS_BUSY */
185#define DSR_nACK (1U << 6) /* PARPORT_STATUS_ACK */
186#define DSR_PERROR (1U << 5) /* PARPORT_STATUS_PAPEROUT */
187#define DSR_SELECT (1U << 4) /* PARPORT_STATUS_SELECT */
188#define DSR_nFAULT (1U << 3) /* PARPORT_STATUS_ERROR */
189#define DSR_nPRINT (1U << 2) /* specific to TL16PIR552 */
190/* #define DSR_reserved (1U << 1) */
191#define DSR_TIMEOUT (1U << 0) /* EPP timeout */
192
193/* Device Control Register */
194/* #define DCR_reserved (1U << 7) | (1U << 6) */
195#define DCR_DIR (1U << 5) /* direction */
196#define DCR_IRQ (1U << 4) /* interrupt on nAck */
197#define DCR_SELECT (1U << 3) /* PARPORT_CONTROL_SELECT */
198#define DCR_nINIT (1U << 2) /* PARPORT_CONTROL_INIT */
199#define DCR_AUTOFD (1U << 1) /* PARPORT_CONTROL_AUTOFD */
200#define DCR_STROBE (1U << 0) /* PARPORT_CONTROL_STROBE */
201
202/* ECP Configuration Register A */
203#define CNFGA_IRQ (1U << 7)
204#define CNFGA_ID_MASK ((1U << 6) | (1U << 5) | (1U << 4))
205#define CNFGA_ID_SHIFT 4
206#define CNFGA_ID_16 (00U << CNFGA_ID_SHIFT)
207#define CNFGA_ID_8 (01U << CNFGA_ID_SHIFT)
208#define CNFGA_ID_32 (02U << CNFGA_ID_SHIFT)
209/* #define CNFGA_reserved (1U << 3) */
210#define CNFGA_nBYTEINTRANS (1U << 2)
211#define CNFGA_PWORDLEFT ((1U << 1) | (1U << 0))
212
213/* ECP Configuration Register B */
214#define CNFGB_COMPRESS (1U << 7)
215#define CNFGB_INTRVAL (1U << 6)
216#define CNFGB_IRQ_MASK ((1U << 5) | (1U << 4) | (1U << 3))
217#define CNFGB_IRQ_SHIFT 3
218#define CNFGB_DMA_MASK ((1U << 2) | (1U << 1) | (1U << 0))
219#define CNFGB_DMA_SHIFT 0
220
221/* Extended Control Register */
222#define ECR_MODE_MASK ((1U << 7) | (1U << 6) | (1U << 5))
223#define ECR_MODE_SHIFT 5
224#define ECR_MODE_SPP (00U << ECR_MODE_SHIFT)
225#define ECR_MODE_PS2 (01U << ECR_MODE_SHIFT)
226#define ECR_MODE_PPF (02U << ECR_MODE_SHIFT)
227#define ECR_MODE_ECP (03U << ECR_MODE_SHIFT)
228#define ECR_MODE_EPP (04U << ECR_MODE_SHIFT)
229/* #define ECR_MODE_reserved (05U << ECR_MODE_SHIFT) */
230#define ECR_MODE_TST (06U << ECR_MODE_SHIFT)
231#define ECR_MODE_CFG (07U << ECR_MODE_SHIFT)
232#define ECR_nERRINTR (1U << 4)
233#define ECR_DMAEN (1U << 3)
234#define ECR_SERVINTR (1U << 2)
235#define ECR_F_FULL (1U << 1)
236#define ECR_F_EMPTY (1U << 0)
237
238/*--- Private data -----------------------------------------------------*/
239
240/**
241 * enum parport_ip32_irq_mode - operation mode of interrupt handler
242 * @PARPORT_IP32_IRQ_FWD: forward interrupt to the upper parport layer
243 * @PARPORT_IP32_IRQ_HERE: interrupt is handled locally
244 */
245enum parport_ip32_irq_mode { PARPORT_IP32_IRQ_FWD, PARPORT_IP32_IRQ_HERE };
246
247/**
248 * struct parport_ip32_private - private stuff for &struct parport
249 * @regs: register addresses
250 * @dcr_cache: cached contents of DCR
251 * @dcr_writable: bit mask of writable DCR bits
252 * @pword: number of bytes per PWord
253 * @fifo_depth: number of PWords that FIFO will hold
254 * @readIntrThreshold: minimum number of PWords we can read
255 * if we get an interrupt
256 * @writeIntrThreshold: minimum number of PWords we can write
257 * if we get an interrupt
258 * @irq_mode: operation mode of interrupt handler for this port
259 * @irq_complete: mutex used to wait for an interrupt to occur
260 */
261struct parport_ip32_private {
262 struct parport_ip32_regs regs;
263 unsigned int dcr_cache;
264 unsigned int dcr_writable;
265 unsigned int pword;
266 unsigned int fifo_depth;
267 unsigned int readIntrThreshold;
268 unsigned int writeIntrThreshold;
269 enum parport_ip32_irq_mode irq_mode;
270 struct completion irq_complete;
271};
272
273/*--- Debug code -------------------------------------------------------*/
274
275/*
276 * pr_debug1 - print debug messages
277 *
278 * This is like pr_debug(), but is defined for %DEBUG_PARPORT_IP32 >= 1
279 */
280#if DEBUG_PARPORT_IP32 >= 1
281# define pr_debug1(...) printk(KERN_DEBUG __VA_ARGS__)
282#else /* DEBUG_PARPORT_IP32 < 1 */
283# define pr_debug1(...) do { } while (0)
284#endif
285
286/*
287 * pr_trace, pr_trace1 - trace function calls
288 * @p: pointer to &struct parport
289 * @fmt: printk format string
290 * @...: parameters for format string
291 *
292 * Macros used to trace function calls. The given string is formatted after
293 * function name. pr_trace() uses pr_debug(), and pr_trace1() uses
294 * pr_debug1(). __pr_trace() is the low-level macro and is not to be used
295 * directly.
296 */
297#define __pr_trace(pr, p, fmt, ...) \
298 pr("%s: %s" fmt "\n", \
299 ({ const struct parport *__p = (p); \
300 __p ? __p->name : "parport_ip32"; }), \
301 __func__ , ##__VA_ARGS__)
302#define pr_trace(p, fmt, ...) __pr_trace(pr_debug, p, fmt , ##__VA_ARGS__)
303#define pr_trace1(p, fmt, ...) __pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__)
304
305/*
306 * __pr_probe, pr_probe - print message if @verbose_probing is true
307 * @p: pointer to &struct parport
308 * @fmt: printk format string
309 * @...: parameters for format string
310 *
311 * For new lines, use pr_probe(). Use __pr_probe() for continued lines.
312 */
313#define __pr_probe(...) \
314 do { if (verbose_probing) printk(__VA_ARGS__); } while (0)
315#define pr_probe(p, fmt, ...) \
316 __pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__)
317
318/*
319 * parport_ip32_dump_state - print register status of parport
320 * @p: pointer to &struct parport
321 * @str: string to add in message
322 * @show_ecp_config: shall we dump ECP configuration registers too?
323 *
324 * This function is only here for debugging purpose, and should be used with
325 * care. Reading the parallel port registers may have undesired side effects.
326 * Especially if @show_ecp_config is true, the parallel port is resetted.
327 * This function is only defined if %DEBUG_PARPORT_IP32 >= 2.
328 */
329#if DEBUG_PARPORT_IP32 >= 2
330static void parport_ip32_dump_state(struct parport *p, char *str,
331 unsigned int show_ecp_config)
332{
333 struct parport_ip32_private * const priv = p->physport->private_data;
334 unsigned int i;
335
336 printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str);
337 {
338 static const char ecr_modes[8][4] = {"SPP", "PS2", "PPF",
339 "ECP", "EPP", "???",
340 "TST", "CFG"};
341 unsigned int ecr = readb(priv->regs.ecr);
342 printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr);
343 printk(" %s",
344 ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
345 if (ecr & ECR_nERRINTR)
346 printk(",nErrIntrEn");
347 if (ecr & ECR_DMAEN)
348 printk(",dmaEn");
349 if (ecr & ECR_SERVINTR)
350 printk(",serviceIntr");
351 if (ecr & ECR_F_FULL)
352 printk(",f_full");
353 if (ecr & ECR_F_EMPTY)
354 printk(",f_empty");
355 printk("\n");
356 }
357 if (show_ecp_config) {
358 unsigned int oecr, cnfgA, cnfgB;
359 oecr = readb(priv->regs.ecr);
360 writeb(ECR_MODE_PS2, priv->regs.ecr);
361 writeb(ECR_MODE_CFG, priv->regs.ecr);
362 cnfgA = readb(priv->regs.cnfgA);
363 cnfgB = readb(priv->regs.cnfgB);
364 writeb(ECR_MODE_PS2, priv->regs.ecr);
365 writeb(oecr, priv->regs.ecr);
366 printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA);
367 printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
368 switch (cnfgA & CNFGA_ID_MASK) {
369 case CNFGA_ID_8:
370 printk(",8 bits");
371 break;
372 case CNFGA_ID_16:
373 printk(",16 bits");
374 break;
375 case CNFGA_ID_32:
376 printk(",32 bits");
377 break;
378 default:
379 printk(",unknown ID");
380 break;
381 }
382 if (!(cnfgA & CNFGA_nBYTEINTRANS))
383 printk(",ByteInTrans");
384 if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
385 printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT,
386 ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
387 printk("\n");
388 printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB);
389 printk(" irq=%u,dma=%u",
390 (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
391 (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
392 printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
393 if (cnfgB & CNFGB_COMPRESS)
394 printk(",compress");
395 printk("\n");
396 }
397 for (i = 0; i < 2; i++) {
398 unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
399 printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x",
400 i ? "soft" : "hard", dcr);
401 printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
402 if (dcr & DCR_IRQ)
403 printk(",ackIntEn");
404 if (!(dcr & DCR_SELECT))
405 printk(",nSelectIn");
406 if (dcr & DCR_nINIT)
407 printk(",nInit");
408 if (!(dcr & DCR_AUTOFD))
409 printk(",nAutoFD");
410 if (!(dcr & DCR_STROBE))
411 printk(",nStrobe");
412 printk("\n");
413 }
414#define sep (f++ ? ',' : ' ')
415 {
416 unsigned int f = 0;
417 unsigned int dsr = readb(priv->regs.dsr);
418 printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr);
419 if (!(dsr & DSR_nBUSY))
420 printk("%cBusy", sep);
421 if (dsr & DSR_nACK)
422 printk("%cnAck", sep);
423 if (dsr & DSR_PERROR)
424 printk("%cPError", sep);
425 if (dsr & DSR_SELECT)
426 printk("%cSelect", sep);
427 if (dsr & DSR_nFAULT)
428 printk("%cnFault", sep);
429 if (!(dsr & DSR_nPRINT))
430 printk("%c(Print)", sep);
431 if (dsr & DSR_TIMEOUT)
432 printk("%cTimeout", sep);
433 printk("\n");
434 }
435#undef sep
436}
437#else /* DEBUG_PARPORT_IP32 < 2 */
438#define parport_ip32_dump_state(...) do { } while (0)
439#endif
440
441/*
442 * CHECK_EXTRA_BITS - track and log extra bits
443 * @p: pointer to &struct parport
444 * @b: byte to inspect
445 * @m: bit mask of authorized bits
446 *
447 * This is used to track and log extra bits that should not be there in
448 * parport_ip32_write_control() and parport_ip32_frob_control(). It is only
449 * defined if %DEBUG_PARPORT_IP32 >= 1.
450 */
451#if DEBUG_PARPORT_IP32 >= 1
452#define CHECK_EXTRA_BITS(p, b, m) \
453 do { \
454 unsigned int __b = (b), __m = (m); \
455 if (__b & ~__m) \
456 pr_debug1(PPIP32 "%s: extra bits in %s(%s): " \
457 "0x%02x/0x%02x\n", \
458 (p)->name, __func__, #b, __b, __m); \
459 } while (0)
460#else /* DEBUG_PARPORT_IP32 < 1 */
461#define CHECK_EXTRA_BITS(...) do { } while (0)
462#endif
463
464/*--- IP32 parallel port DMA operations --------------------------------*/
465
466/**
467 * struct parport_ip32_dma_data - private data needed for DMA operation
468 * @dir: DMA direction (from or to device)
469 * @buf: buffer physical address
470 * @len: buffer length
471 * @next: address of next bytes to DMA transfer
472 * @left: number of bytes remaining
473 * @ctx: next context to write (0: context_a; 1: context_b)
474 * @irq_on: are the DMA IRQs currently enabled?
475 * @lock: spinlock to protect access to the structure
476 */
477struct parport_ip32_dma_data {
478 enum dma_data_direction dir;
479 dma_addr_t buf;
480 dma_addr_t next;
481 size_t len;
482 size_t left;
483 unsigned int ctx;
484 unsigned int irq_on;
485 spinlock_t lock;
486};
487static struct parport_ip32_dma_data parport_ip32_dma;
488
489/**
490 * parport_ip32_dma_setup_context - setup next DMA context
491 * @limit: maximum data size for the context
492 *
493 * The alignment constraints must be verified in caller function, and the
494 * parameter @limit must be set accordingly.
495 */
496static void parport_ip32_dma_setup_context(unsigned int limit)
497{
498 unsigned long flags;
499
500 spin_lock_irqsave(&parport_ip32_dma.lock, flags);
501 if (parport_ip32_dma.left > 0) {
502 /* Note: ctxreg is "volatile" here only because
503 * mace->perif.ctrl.parport.context_a and context_b are
504 * "volatile". */
505 volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ?
506 &mace->perif.ctrl.parport.context_a :
507 &mace->perif.ctrl.parport.context_b;
508 u64 count;
509 u64 ctxval;
510 if (parport_ip32_dma.left <= limit) {
511 count = parport_ip32_dma.left;
512 ctxval = MACEPAR_CONTEXT_LASTFLAG;
513 } else {
514 count = limit;
515 ctxval = 0;
516 }
517
518 pr_trace(NULL,
519 "(%u): 0x%04x:0x%04x, %u -> %u%s",
520 limit,
521 (unsigned int)parport_ip32_dma.buf,
522 (unsigned int)parport_ip32_dma.next,
523 (unsigned int)count,
524 parport_ip32_dma.ctx, ctxval ? "*" : "");
525
526 ctxval |= parport_ip32_dma.next &
527 MACEPAR_CONTEXT_BASEADDR_MASK;
528 ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) &
529 MACEPAR_CONTEXT_DATALEN_MASK;
530 writeq(ctxval, ctxreg);
531 parport_ip32_dma.next += count;
532 parport_ip32_dma.left -= count;
533 parport_ip32_dma.ctx ^= 1U;
534 }
535 /* If there is nothing more to send, disable IRQs to avoid to
536 * face an IRQ storm which can lock the machine. Disable them
537 * only once. */
538 if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) {
539 pr_debug(PPIP32 "IRQ off (ctx)\n");
540 disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
541 disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
542 parport_ip32_dma.irq_on = 0;
543 }
544 spin_unlock_irqrestore(&parport_ip32_dma.lock, flags);
545}
546
547/**
548 * parport_ip32_dma_interrupt - DMA interrupt handler
549 * @irq: interrupt number
550 * @dev_id: unused
551 * @regs: pointer to &struct pt_regs
552 */
553static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id,
554 struct pt_regs *regs)
555{
556 if (parport_ip32_dma.left)
557 pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx);
558 parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
559 return IRQ_HANDLED;
560}
561
562#if DEBUG_PARPORT_IP32
563static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id,
564 struct pt_regs *regs)
565{
566 pr_trace1(NULL, "(%d)", irq);
567 return IRQ_HANDLED;
568}
569#endif
570
571/**
572 * parport_ip32_dma_start - begins a DMA transfer
573 * @dir: DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE
574 * @addr: pointer to data buffer
575 * @count: buffer size
576 *
577 * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
578 * correctly balanced.
579 */
580static int parport_ip32_dma_start(enum dma_data_direction dir,
581 void *addr, size_t count)
582{
583 unsigned int limit;
584 u64 ctrl;
585
586 pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count);
587
588 /* FIXME - add support for DMA_FROM_DEVICE. In this case, buffer must
589 * be 64 bytes aligned. */
590 BUG_ON(dir != DMA_TO_DEVICE);
591
592 /* Reset DMA controller */
593 ctrl = MACEPAR_CTLSTAT_RESET;
594 writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
595
596 /* DMA IRQs should normally be enabled */
597 if (!parport_ip32_dma.irq_on) {
598 WARN_ON(1);
599 enable_irq(MACEISA_PAR_CTXA_IRQ);
600 enable_irq(MACEISA_PAR_CTXB_IRQ);
601 parport_ip32_dma.irq_on = 1;
602 }
603
604 /* Prepare DMA pointers */
605 parport_ip32_dma.dir = dir;
606 parport_ip32_dma.buf = dma_map_single(NULL, addr, count, dir);
607 parport_ip32_dma.len = count;
608 parport_ip32_dma.next = parport_ip32_dma.buf;
609 parport_ip32_dma.left = parport_ip32_dma.len;
610 parport_ip32_dma.ctx = 0;
611
612 /* Setup DMA direction and first two contexts */
613 ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION;
614 writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
615 /* Single transfer should not cross a 4K page boundary */
616 limit = MACEPAR_CONTEXT_DATA_BOUND -
617 (parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1));
618 parport_ip32_dma_setup_context(limit);
619 parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
620
621 /* Real start of DMA transfer */
622 ctrl |= MACEPAR_CTLSTAT_ENABLE;
623 writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
624
625 return 0;
626}
627
628/**
629 * parport_ip32_dma_stop - ends a running DMA transfer
630 *
631 * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
632 * correctly balanced.
633 */
634static void parport_ip32_dma_stop(void)
635{
636 u64 ctx_a;
637 u64 ctx_b;
638 u64 ctrl;
639 u64 diag;
640 size_t res[2]; /* {[0] = res_a, [1] = res_b} */
641
642 pr_trace(NULL, "()");
643
644 /* Disable IRQs */
645 spin_lock_irq(&parport_ip32_dma.lock);
646 if (parport_ip32_dma.irq_on) {
647 pr_debug(PPIP32 "IRQ off (stop)\n");
648 disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
649 disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
650 parport_ip32_dma.irq_on = 0;
651 }
652 spin_unlock_irq(&parport_ip32_dma.lock);
653 /* Force IRQ synchronization, even if the IRQs were disabled
654 * elsewhere. */
655 synchronize_irq(MACEISA_PAR_CTXA_IRQ);
656 synchronize_irq(MACEISA_PAR_CTXB_IRQ);
657
658 /* Stop DMA transfer */
659 ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
660 ctrl &= ~MACEPAR_CTLSTAT_ENABLE;
661 writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
662
663 /* Adjust residue (parport_ip32_dma.left) */
664 ctx_a = readq(&mace->perif.ctrl.parport.context_a);
665 ctx_b = readq(&mace->perif.ctrl.parport.context_b);
666 ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
667 diag = readq(&mace->perif.ctrl.parport.diagnostic);
668 res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ?
669 1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >>
670 MACEPAR_CONTEXT_DATALEN_SHIFT) :
671 0;
672 res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ?
673 1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >>
674 MACEPAR_CONTEXT_DATALEN_SHIFT) :
675 0;
676 if (diag & MACEPAR_DIAG_DMACTIVE)
677 res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] =
678 1 + ((diag & MACEPAR_DIAG_CTRMASK) >>
679 MACEPAR_DIAG_CTRSHIFT);
680 parport_ip32_dma.left += res[0] + res[1];
681
682 /* Reset DMA controller, and re-enable IRQs */
683 ctrl = MACEPAR_CTLSTAT_RESET;
684 writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
685 pr_debug(PPIP32 "IRQ on (stop)\n");
686 enable_irq(MACEISA_PAR_CTXA_IRQ);
687 enable_irq(MACEISA_PAR_CTXB_IRQ);
688 parport_ip32_dma.irq_on = 1;
689
690 dma_unmap_single(NULL, parport_ip32_dma.buf, parport_ip32_dma.len,
691 parport_ip32_dma.dir);
692}
693
694/**
695 * parport_ip32_dma_get_residue - get residue from last DMA transfer
696 *
697 * Returns the number of bytes remaining from last DMA transfer.
698 */
699static inline size_t parport_ip32_dma_get_residue(void)
700{
701 return parport_ip32_dma.left;
702}
703
704/**
705 * parport_ip32_dma_register - initialize DMA engine
706 *
707 * Returns zero for success.
708 */
709static int parport_ip32_dma_register(void)
710{
711 int err;
712
713 spin_lock_init(&parport_ip32_dma.lock);
714 parport_ip32_dma.irq_on = 1;
715
716 /* Reset DMA controller */
717 writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat);
718
719 /* Request IRQs */
720 err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt,
721 0, "parport_ip32", NULL);
722 if (err)
723 goto fail_a;
724 err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt,
725 0, "parport_ip32", NULL);
726 if (err)
727 goto fail_b;
728#if DEBUG_PARPORT_IP32
729 /* FIXME - what is this IRQ for? */
730 err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt,
731 0, "parport_ip32", NULL);
732 if (err)
733 goto fail_merr;
734#endif
735 return 0;
736
737#if DEBUG_PARPORT_IP32
738fail_merr:
739 free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
740#endif
741fail_b:
742 free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
743fail_a:
744 return err;
745}
746
747/**
748 * parport_ip32_dma_unregister - release and free resources for DMA engine
749 */
750static void parport_ip32_dma_unregister(void)
751{
752#if DEBUG_PARPORT_IP32
753 free_irq(MACEISA_PAR_MERR_IRQ, NULL);
754#endif
755 free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
756 free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
757}
758
759/*--- Interrupt handlers and associates --------------------------------*/
760
761/**
762 * parport_ip32_wakeup - wakes up code waiting for an interrupt
763 * @p: pointer to &struct parport
764 */
765static inline void parport_ip32_wakeup(struct parport *p)
766{
767 struct parport_ip32_private * const priv = p->physport->private_data;
768 complete(&priv->irq_complete);
769}
770
771/**
772 * parport_ip32_interrupt - interrupt handler
773 * @irq: interrupt number
774 * @dev_id: pointer to &struct parport
775 * @regs: pointer to &struct pt_regs
776 *
777 * Caught interrupts are forwarded to the upper parport layer if IRQ_mode is
778 * %PARPORT_IP32_IRQ_FWD.
779 */
780static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id,
781 struct pt_regs *regs)
782{
783 struct parport * const p = dev_id;
784 struct parport_ip32_private * const priv = p->physport->private_data;
785 enum parport_ip32_irq_mode irq_mode = priv->irq_mode;
786 switch (irq_mode) {
787 case PARPORT_IP32_IRQ_FWD:
788 parport_generic_irq(irq, p, regs);
789 break;
790 case PARPORT_IP32_IRQ_HERE:
791 parport_ip32_wakeup(p);
792 break;
793 }
794 return IRQ_HANDLED;
795}
796
797/*--- Some utility function to manipulate ECR register -----------------*/
798
799/**
800 * parport_ip32_read_econtrol - read contents of the ECR register
801 * @p: pointer to &struct parport
802 */
803static inline unsigned int parport_ip32_read_econtrol(struct parport *p)
804{
805 struct parport_ip32_private * const priv = p->physport->private_data;
806 return readb(priv->regs.ecr);
807}
808
809/**
810 * parport_ip32_write_econtrol - write new contents to the ECR register
811 * @p: pointer to &struct parport
812 * @c: new value to write
813 */
814static inline void parport_ip32_write_econtrol(struct parport *p,
815 unsigned int c)
816{
817 struct parport_ip32_private * const priv = p->physport->private_data;
818 writeb(c, priv->regs.ecr);
819}
820
821/**
822 * parport_ip32_frob_econtrol - change bits from the ECR register
823 * @p: pointer to &struct parport
824 * @mask: bit mask of bits to change
825 * @val: new value for changed bits
826 *
827 * Read from the ECR, mask out the bits in @mask, exclusive-or with the bits
828 * in @val, and write the result to the ECR.
829 */
830static inline void parport_ip32_frob_econtrol(struct parport *p,
831 unsigned int mask,
832 unsigned int val)
833{
834 unsigned int c;
835 c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;
836 parport_ip32_write_econtrol(p, c);
837}
838
839/**
840 * parport_ip32_set_mode - change mode of ECP port
841 * @p: pointer to &struct parport
842 * @mode: new mode to write in ECR
843 *
844 * ECR is reset in a sane state (interrupts and DMA disabled), and placed in
845 * mode @mode. Go through PS2 mode if needed.
846 */
847static void parport_ip32_set_mode(struct parport *p, unsigned int mode)
848{
849 unsigned int omode;
850
851 mode &= ECR_MODE_MASK;
852 omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK;
853
854 if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2
855 || omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) {
856 /* We have to go through PS2 mode */
857 unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
858 parport_ip32_write_econtrol(p, ecr);
859 }
860 parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR);
861}
862
863/*--- Basic functions needed for parport -------------------------------*/
864
865/**
866 * parport_ip32_read_data - return current contents of the DATA register
867 * @p: pointer to &struct parport
868 */
869static inline unsigned char parport_ip32_read_data(struct parport *p)
870{
871 struct parport_ip32_private * const priv = p->physport->private_data;
872 return readb(priv->regs.data);
873}
874
875/**
876 * parport_ip32_write_data - set new contents for the DATA register
877 * @p: pointer to &struct parport
878 * @d: new value to write
879 */
880static inline void parport_ip32_write_data(struct parport *p, unsigned char d)
881{
882 struct parport_ip32_private * const priv = p->physport->private_data;
883 writeb(d, priv->regs.data);
884}
885
886/**
887 * parport_ip32_read_status - return current contents of the DSR register
888 * @p: pointer to &struct parport
889 */
890static inline unsigned char parport_ip32_read_status(struct parport *p)
891{
892 struct parport_ip32_private * const priv = p->physport->private_data;
893 return readb(priv->regs.dsr);
894}
895
896/**
897 * __parport_ip32_read_control - return cached contents of the DCR register
898 * @p: pointer to &struct parport
899 */
900static inline unsigned int __parport_ip32_read_control(struct parport *p)
901{
902 struct parport_ip32_private * const priv = p->physport->private_data;
903 return priv->dcr_cache; /* use soft copy */
904}
905
906/**
907 * __parport_ip32_write_control - set new contents for the DCR register
908 * @p: pointer to &struct parport
909 * @c: new value to write
910 */
911static inline void __parport_ip32_write_control(struct parport *p,
912 unsigned int c)
913{
914 struct parport_ip32_private * const priv = p->physport->private_data;
915 CHECK_EXTRA_BITS(p, c, priv->dcr_writable);
916 c &= priv->dcr_writable; /* only writable bits */
917 writeb(c, priv->regs.dcr);
918 priv->dcr_cache = c; /* update soft copy */
919}
920
921/**
922 * __parport_ip32_frob_control - change bits from the DCR register
923 * @p: pointer to &struct parport
924 * @mask: bit mask of bits to change
925 * @val: new value for changed bits
926 *
927 * This is equivalent to read from the DCR, mask out the bits in @mask,
928 * exclusive-or with the bits in @val, and write the result to the DCR.
929 * Actually, the cached contents of the DCR is used.
930 */
931static inline void __parport_ip32_frob_control(struct parport *p,
932 unsigned int mask,
933 unsigned int val)
934{
935 unsigned int c;
936 c = (__parport_ip32_read_control(p) & ~mask) ^ val;
937 __parport_ip32_write_control(p, c);
938}
939
940/**
941 * parport_ip32_read_control - return cached contents of the DCR register
942 * @p: pointer to &struct parport
943 *
944 * The return value is masked so as to only return the value of %DCR_STROBE,
945 * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
946 */
947static inline unsigned char parport_ip32_read_control(struct parport *p)
948{
949 const unsigned int rm =
950 DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
951 return __parport_ip32_read_control(p) & rm;
952}
953
954/**
955 * parport_ip32_write_control - set new contents for the DCR register
956 * @p: pointer to &struct parport
957 * @c: new value to write
958 *
959 * The value is masked so as to only change the value of %DCR_STROBE,
960 * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
961 */
962static inline void parport_ip32_write_control(struct parport *p,
963 unsigned char c)
964{
965 const unsigned int wm =
966 DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
967 CHECK_EXTRA_BITS(p, c, wm);
968 __parport_ip32_frob_control(p, wm, c & wm);
969}
970
971/**
972 * parport_ip32_frob_control - change bits from the DCR register
973 * @p: pointer to &struct parport
974 * @mask: bit mask of bits to change
975 * @val: new value for changed bits
976 *
977 * This differs from __parport_ip32_frob_control() in that it only allows to
978 * change the value of %DCR_STROBE, %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
979 */
980static inline unsigned char parport_ip32_frob_control(struct parport *p,
981 unsigned char mask,
982 unsigned char val)
983{
984 const unsigned int wm =
985 DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
986 CHECK_EXTRA_BITS(p, mask, wm);
987 CHECK_EXTRA_BITS(p, val, wm);
988 __parport_ip32_frob_control(p, mask & wm, val & wm);
989 return parport_ip32_read_control(p);
990}
991
992/**
993 * parport_ip32_disable_irq - disable interrupts on the rising edge of nACK
994 * @p: pointer to &struct parport
995 */
996static inline void parport_ip32_disable_irq(struct parport *p)
997{
998 __parport_ip32_frob_control(p, DCR_IRQ, 0);
999}
1000
1001/**
1002 * parport_ip32_enable_irq - enable interrupts on the rising edge of nACK
1003 * @p: pointer to &struct parport
1004 */
1005static inline void parport_ip32_enable_irq(struct parport *p)
1006{
1007 __parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ);
1008}
1009
1010/**
1011 * parport_ip32_data_forward - enable host-to-peripheral communications
1012 * @p: pointer to &struct parport
1013 *
1014 * Enable the data line drivers, for 8-bit host-to-peripheral communications.
1015 */
1016static inline void parport_ip32_data_forward(struct parport *p)
1017{
1018 __parport_ip32_frob_control(p, DCR_DIR, 0);
1019}
1020
1021/**
1022 * parport_ip32_data_reverse - enable peripheral-to-host communications
1023 * @p: pointer to &struct parport
1024 *
1025 * Place the data bus in a high impedance state, if @p->modes has the
1026 * PARPORT_MODE_TRISTATE bit set.
1027 */
1028static inline void parport_ip32_data_reverse(struct parport *p)
1029{
1030 __parport_ip32_frob_control(p, DCR_DIR, DCR_DIR);
1031}
1032
1033/**
1034 * parport_ip32_init_state - for core parport code
1035 * @dev: pointer to &struct pardevice
1036 * @s: pointer to &struct parport_state to initialize
1037 */
1038static void parport_ip32_init_state(struct pardevice *dev,
1039 struct parport_state *s)
1040{
1041 s->u.ip32.dcr = DCR_SELECT | DCR_nINIT;
1042 s->u.ip32.ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
1043}
1044
1045/**
1046 * parport_ip32_save_state - for core parport code
1047 * @p: pointer to &struct parport
1048 * @s: pointer to &struct parport_state to save state to
1049 */
1050static void parport_ip32_save_state(struct parport *p,
1051 struct parport_state *s)
1052{
1053 s->u.ip32.dcr = __parport_ip32_read_control(p);
1054 s->u.ip32.ecr = parport_ip32_read_econtrol(p);
1055}
1056
1057/**
1058 * parport_ip32_restore_state - for core parport code
1059 * @p: pointer to &struct parport
1060 * @s: pointer to &struct parport_state to restore state from
1061 */
1062static void parport_ip32_restore_state(struct parport *p,
1063 struct parport_state *s)
1064{
1065 parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK);
1066 parport_ip32_write_econtrol(p, s->u.ip32.ecr);
1067 __parport_ip32_write_control(p, s->u.ip32.dcr);
1068}
1069
1070/*--- EPP mode functions -----------------------------------------------*/
1071
1072/**
1073 * parport_ip32_clear_epp_timeout - clear Timeout bit in EPP mode
1074 * @p: pointer to &struct parport
1075 *
1076 * Returns 1 if the Timeout bit is clear, and 0 otherwise.
1077 */
1078static unsigned int parport_ip32_clear_epp_timeout(struct parport *p)
1079{
1080 struct parport_ip32_private * const priv = p->physport->private_data;
1081 unsigned int cleared;
1082
1083 if (!(parport_ip32_read_status(p) & DSR_TIMEOUT))
1084 cleared = 1;
1085 else {
1086 unsigned int r;
1087 /* To clear timeout some chips require double read */
1088 parport_ip32_read_status(p);
1089 r = parport_ip32_read_status(p);
1090 /* Some reset by writing 1 */
1091 writeb(r | DSR_TIMEOUT, priv->regs.dsr);
1092 /* Others by writing 0 */
1093 writeb(r & ~DSR_TIMEOUT, priv->regs.dsr);
1094
1095 r = parport_ip32_read_status(p);
1096 cleared = !(r & DSR_TIMEOUT);
1097 }
1098
1099 pr_trace(p, "(): %s", cleared ? "cleared" : "failed");
1100 return cleared;
1101}
1102
1103/**
1104 * parport_ip32_epp_read - generic EPP read function
1105 * @eppreg: I/O register to read from
1106 * @p: pointer to &struct parport
1107 * @buf: buffer to store read data
1108 * @len: length of buffer @buf
1109 * @flags: may be PARPORT_EPP_FAST
1110 */
1111static size_t parport_ip32_epp_read(void __iomem *eppreg,
1112 struct parport *p, void *buf,
1113 size_t len, int flags)
1114{
1115 struct parport_ip32_private * const priv = p->physport->private_data;
1116 size_t got;
1117 parport_ip32_set_mode(p, ECR_MODE_EPP);
1118 parport_ip32_data_reverse(p);
1119 parport_ip32_write_control(p, DCR_nINIT);
1120 if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
1121 readsb(eppreg, buf, len);
1122 if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
1123 parport_ip32_clear_epp_timeout(p);
1124 return -EIO;
1125 }
1126 got = len;
1127 } else {
1128 u8 *bufp = buf;
1129 for (got = 0; got < len; got++) {
1130 *bufp++ = readb(eppreg);
1131 if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
1132 parport_ip32_clear_epp_timeout(p);
1133 break;
1134 }
1135 }
1136 }
1137 parport_ip32_data_forward(p);
1138 parport_ip32_set_mode(p, ECR_MODE_PS2);
1139 return got;
1140}
1141
1142/**
1143 * parport_ip32_epp_write - generic EPP write function
1144 * @eppreg: I/O register to write to
1145 * @p: pointer to &struct parport
1146 * @buf: buffer of data to write
1147 * @len: length of buffer @buf
1148 * @flags: may be PARPORT_EPP_FAST
1149 */
1150static size_t parport_ip32_epp_write(void __iomem *eppreg,
1151 struct parport *p, const void *buf,
1152 size_t len, int flags)
1153{
1154 struct parport_ip32_private * const priv = p->physport->private_data;
1155 size_t written;
1156 parport_ip32_set_mode(p, ECR_MODE_EPP);
1157 parport_ip32_data_forward(p);
1158 parport_ip32_write_control(p, DCR_nINIT);
1159 if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
1160 writesb(eppreg, buf, len);
1161 if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
1162 parport_ip32_clear_epp_timeout(p);
1163 return -EIO;
1164 }
1165 written = len;
1166 } else {
1167 const u8 *bufp = buf;
1168 for (written = 0; written < len; written++) {
1169 writeb(*bufp++, eppreg);
1170 if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
1171 parport_ip32_clear_epp_timeout(p);
1172 break;
1173 }
1174 }
1175 }
1176 parport_ip32_set_mode(p, ECR_MODE_PS2);
1177 return written;
1178}
1179
1180/**
1181 * parport_ip32_epp_read_data - read a block of data in EPP mode
1182 * @p: pointer to &struct parport
1183 * @buf: buffer to store read data
1184 * @len: length of buffer @buf
1185 * @flags: may be PARPORT_EPP_FAST
1186 */
1187static size_t parport_ip32_epp_read_data(struct parport *p, void *buf,
1188 size_t len, int flags)
1189{
1190 struct parport_ip32_private * const priv = p->physport->private_data;
1191 return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags);
1192}
1193
1194/**
1195 * parport_ip32_epp_write_data - write a block of data in EPP mode
1196 * @p: pointer to &struct parport
1197 * @buf: buffer of data to write
1198 * @len: length of buffer @buf
1199 * @flags: may be PARPORT_EPP_FAST
1200 */
1201static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf,
1202 size_t len, int flags)
1203{
1204 struct parport_ip32_private * const priv = p->physport->private_data;
1205 return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags);
1206}
1207
1208/**
1209 * parport_ip32_epp_read_addr - read a block of addresses in EPP mode
1210 * @p: pointer to &struct parport
1211 * @buf: buffer to store read data
1212 * @len: length of buffer @buf
1213 * @flags: may be PARPORT_EPP_FAST
1214 */
1215static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf,
1216 size_t len, int flags)
1217{
1218 struct parport_ip32_private * const priv = p->physport->private_data;
1219 return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags);
1220}
1221
1222/**
1223 * parport_ip32_epp_write_addr - write a block of addresses in EPP mode
1224 * @p: pointer to &struct parport
1225 * @buf: buffer of data to write
1226 * @len: length of buffer @buf
1227 * @flags: may be PARPORT_EPP_FAST
1228 */
1229static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf,
1230 size_t len, int flags)
1231{
1232 struct parport_ip32_private * const priv = p->physport->private_data;
1233 return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags);
1234}
1235
1236/*--- ECP mode functions (FIFO) ----------------------------------------*/
1237
1238/**
1239 * parport_ip32_fifo_wait_break - check if the waiting function should return
1240 * @p: pointer to &struct parport
1241 * @expire: timeout expiring date, in jiffies
1242 *
1243 * parport_ip32_fifo_wait_break() checks if the waiting function should return
1244 * immediately or not. The break conditions are:
1245 * - expired timeout;
1246 * - a pending signal;
1247 * - nFault asserted low.
1248 * This function also calls cond_resched().
1249 */
1250static unsigned int parport_ip32_fifo_wait_break(struct parport *p,
1251 unsigned long expire)
1252{
1253 cond_resched();
1254 if (time_after(jiffies, expire)) {
1255 pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name);
1256 return 1;
1257 }
1258 if (signal_pending(current)) {
1259 pr_debug1(PPIP32 "%s: Signal pending\n", p->name);
1260 return 1;
1261 }
1262 if (!(parport_ip32_read_status(p) & DSR_nFAULT)) {
1263 pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name);
1264 return 1;
1265 }
1266 return 0;
1267}
1268
1269/**
1270 * parport_ip32_fwp_wait_polling - wait for FIFO to empty (polling)
1271 * @p: pointer to &struct parport
1272 *
1273 * Returns the number of bytes that can safely be written in the FIFO. A
1274 * return value of zero means that the calling function should terminate as
1275 * fast as possible.
1276 */
1277static unsigned int parport_ip32_fwp_wait_polling(struct parport *p)
1278{
1279 struct parport_ip32_private * const priv = p->physport->private_data;
1280 struct parport * const physport = p->physport;
1281 unsigned long expire;
1282 unsigned int count;
1283 unsigned int ecr;
1284
1285 expire = jiffies + physport->cad->timeout;
1286 count = 0;
1287 while (1) {
1288 if (parport_ip32_fifo_wait_break(p, expire))
1289 break;
1290
1291 /* Check FIFO state. We do nothing when the FIFO is nor full,
1292 * nor empty. It appears that the FIFO full bit is not always
1293 * reliable, the FIFO state is sometimes wrongly reported, and
1294 * the chip gets confused if we give it another byte. */
1295 ecr = parport_ip32_read_econtrol(p);
1296 if (ecr & ECR_F_EMPTY) {
1297 /* FIFO is empty, fill it up */
1298 count = priv->fifo_depth;
1299 break;
1300 }
1301
1302 /* Wait a moment... */
1303 udelay(FIFO_POLLING_INTERVAL);
1304 } /* while (1) */
1305
1306 return count;
1307}
1308
1309/**
1310 * parport_ip32_fwp_wait_interrupt - wait for FIFO to empty (interrupt-driven)
1311 * @p: pointer to &struct parport
1312 *
1313 * Returns the number of bytes that can safely be written in the FIFO. A
1314 * return value of zero means that the calling function should terminate as
1315 * fast as possible.
1316 */
1317static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
1318{
1319 static unsigned int lost_interrupt = 0;
1320 struct parport_ip32_private * const priv = p->physport->private_data;
1321 struct parport * const physport = p->physport;
1322 unsigned long nfault_timeout;
1323 unsigned long expire;
1324 unsigned int count;
1325 unsigned int ecr;
1326
1327 nfault_timeout = min((unsigned long)physport->cad->timeout,
1328 msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
1329 expire = jiffies + physport->cad->timeout;
1330 count = 0;
1331 while (1) {
1332 if (parport_ip32_fifo_wait_break(p, expire))
1333 break;
1334
1335 /* Initialize mutex used to take interrupts into account */
1336 INIT_COMPLETION(priv->irq_complete);
1337
1338 /* Enable serviceIntr */
1339 parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
1340
1341 /* Enabling serviceIntr while the FIFO is empty does not
1342 * always generate an interrupt, so check for emptiness
1343 * now. */
1344 ecr = parport_ip32_read_econtrol(p);
1345 if (!(ecr & ECR_F_EMPTY)) {
1346 /* FIFO is not empty: wait for an interrupt or a
1347 * timeout to occur */
1348 wait_for_completion_interruptible_timeout(
1349 &priv->irq_complete, nfault_timeout);
1350 ecr = parport_ip32_read_econtrol(p);
1351 if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
1352 && !lost_interrupt) {
1353 printk(KERN_WARNING PPIP32
1354 "%s: lost interrupt in %s\n",
1355 p->name, __func__);
1356 lost_interrupt = 1;
1357 }
1358 }
1359
1360 /* Disable serviceIntr */
1361 parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR);
1362
1363 /* Check FIFO state */
1364 if (ecr & ECR_F_EMPTY) {
1365 /* FIFO is empty, fill it up */
1366 count = priv->fifo_depth;
1367 break;
1368 } else if (ecr & ECR_SERVINTR) {
1369 /* FIFO is not empty, but we know that can safely push
1370 * writeIntrThreshold bytes into it */
1371 count = priv->writeIntrThreshold;
1372 break;
1373 }
1374 /* FIFO is not empty, and we did not get any interrupt.
1375 * Either it's time to check for nFault, or a signal is
1376 * pending. This is verified in
1377 * parport_ip32_fifo_wait_break(), so we continue the loop. */
1378 } /* while (1) */
1379
1380 return count;
1381}
1382
1383/**
1384 * parport_ip32_fifo_write_block_pio - write a block of data (PIO mode)
1385 * @p: pointer to &struct parport
1386 * @buf: buffer of data to write
1387 * @len: length of buffer @buf
1388 *
1389 * Uses PIO to write the contents of the buffer @buf into the parallel port
1390 * FIFO. Returns the number of bytes that were actually written. It can work
1391 * with or without the help of interrupts. The parallel port must be
1392 * correctly initialized before calling parport_ip32_fifo_write_block_pio().
1393 */
1394static size_t parport_ip32_fifo_write_block_pio(struct parport *p,
1395 const void *buf, size_t len)
1396{
1397 struct parport_ip32_private * const priv = p->physport->private_data;
1398 const u8 *bufp = buf;
1399 size_t left = len;
1400
1401 priv->irq_mode = PARPORT_IP32_IRQ_HERE;
1402
1403 while (left > 0) {
1404 unsigned int count;
1405
1406 count = (p->irq == PARPORT_IRQ_NONE) ?
1407 parport_ip32_fwp_wait_polling(p) :
1408 parport_ip32_fwp_wait_interrupt(p);
1409 if (count == 0)
1410 break; /* Transmission should be stopped */
1411 if (count > left)
1412 count = left;
1413 if (count == 1) {
1414 writeb(*bufp, priv->regs.fifo);
1415 bufp++, left--;
1416 } else {
1417 writesb(priv->regs.fifo, bufp, count);
1418 bufp += count, left -= count;
1419 }
1420 }
1421
1422 priv->irq_mode = PARPORT_IP32_IRQ_FWD;
1423
1424 return len - left;
1425}
1426
1427/**
1428 * parport_ip32_fifo_write_block_dma - write a block of data (DMA mode)
1429 * @p: pointer to &struct parport
1430 * @buf: buffer of data to write
1431 * @len: length of buffer @buf
1432 *
1433 * Uses DMA to write the contents of the buffer @buf into the parallel port
1434 * FIFO. Returns the number of bytes that were actually written. The
1435 * parallel port must be correctly initialized before calling
1436 * parport_ip32_fifo_write_block_dma().
1437 */
1438static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
1439 const void *buf, size_t len)
1440{
1441 struct parport_ip32_private * const priv = p->physport->private_data;
1442 struct parport * const physport = p->physport;
1443 unsigned long nfault_timeout;
1444 unsigned long expire;
1445 size_t written;
1446 unsigned int ecr;
1447
1448 priv->irq_mode = PARPORT_IP32_IRQ_HERE;
1449
1450 parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len);
1451 INIT_COMPLETION(priv->irq_complete);
1452 parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
1453
1454 nfault_timeout = min((unsigned long)physport->cad->timeout,
1455 msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
1456 expire = jiffies + physport->cad->timeout;
1457 while (1) {
1458 if (parport_ip32_fifo_wait_break(p, expire))
1459 break;
1460 wait_for_completion_interruptible_timeout(&priv->irq_complete,
1461 nfault_timeout);
1462 ecr = parport_ip32_read_econtrol(p);
1463 if (ecr & ECR_SERVINTR)
1464 break; /* DMA transfer just finished */
1465 }
1466 parport_ip32_dma_stop();
1467 written = len - parport_ip32_dma_get_residue();
1468
1469 priv->irq_mode = PARPORT_IP32_IRQ_FWD;
1470
1471 return written;
1472}
1473
1474/**
1475 * parport_ip32_fifo_write_block - write a block of data
1476 * @p: pointer to &struct parport
1477 * @buf: buffer of data to write
1478 * @len: length of buffer @buf
1479 *
1480 * Uses PIO or DMA to write the contents of the buffer @buf into the parallel
1481 * p FIFO. Returns the number of bytes that were actually written.
1482 */
1483static size_t parport_ip32_fifo_write_block(struct parport *p,
1484 const void *buf, size_t len)
1485{
1486 size_t written = 0;
1487 if (len)
1488 /* FIXME - Maybe some threshold value should be set for @len
1489 * under which we revert to PIO mode? */
1490 written = (p->modes & PARPORT_MODE_DMA) ?
1491 parport_ip32_fifo_write_block_dma(p, buf, len) :
1492 parport_ip32_fifo_write_block_pio(p, buf, len);
1493 return written;
1494}
1495
1496/**
1497 * parport_ip32_drain_fifo - wait for FIFO to empty
1498 * @p: pointer to &struct parport
1499 * @timeout: timeout, in jiffies
1500 *
1501 * This function waits for FIFO to empty. It returns 1 when FIFO is empty, or
1502 * 0 if the timeout @timeout is reached before, or if a signal is pending.
1503 */
1504static unsigned int parport_ip32_drain_fifo(struct parport *p,
1505 unsigned long timeout)
1506{
1507 unsigned long expire = jiffies + timeout;
1508 unsigned int polling_interval;
1509 unsigned int counter;
1510
1511 /* Busy wait for approx. 200us */
1512 for (counter = 0; counter < 40; counter++) {
1513 if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
1514 break;
1515 if (time_after(jiffies, expire))
1516 break;
1517 if (signal_pending(current))
1518 break;
1519 udelay(5);
1520 }
1521 /* Poll slowly. Polling interval starts with 1 millisecond, and is
1522 * increased exponentially until 128. */
1523 polling_interval = 1; /* msecs */
1524 while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) {
1525 if (time_after_eq(jiffies, expire))
1526 break;
1527 msleep_interruptible(polling_interval);
1528 if (signal_pending(current))
1529 break;
1530 if (polling_interval < 128)
1531 polling_interval *= 2;
1532 }
1533
1534 return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY);
1535}
1536
1537/**
1538 * parport_ip32_get_fifo_residue - reset FIFO
1539 * @p: pointer to &struct parport
1540 * @mode: current operation mode (ECR_MODE_PPF or ECR_MODE_ECP)
1541 *
1542 * This function resets FIFO, and returns the number of bytes remaining in it.
1543 */
1544static unsigned int parport_ip32_get_fifo_residue(struct parport *p,
1545 unsigned int mode)
1546{
1547 struct parport_ip32_private * const priv = p->physport->private_data;
1548 unsigned int residue;
1549 unsigned int cnfga;
1550
1551 /* FIXME - We are missing one byte if the printer is off-line. I
1552 * don't know how to detect this. It looks that the full bit is not
1553 * always reliable. For the moment, the problem is avoided in most
1554 * cases by testing for BUSY in parport_ip32_compat_write_data().
1555 */
1556 if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
1557 residue = 0;
1558 else {
1559 pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name);
1560
1561 /* Stop all transfers.
1562 *
1563 * Microsoft's document instructs to drive DCR_STROBE to 0,
1564 * but it doesn't work (at least in Compatibility mode, not
1565 * tested in ECP mode). Switching directly to Test mode (as
1566 * in parport_pc) is not an option: it does confuse the port,
1567 * ECP service interrupts are no more working after that. A
1568 * hard reset is then needed to revert to a sane state.
1569 *
1570 * Let's hope that the FIFO is really stuck and that the
1571 * peripheral doesn't wake up now.
1572 */
1573 parport_ip32_frob_control(p, DCR_STROBE, 0);
1574
1575 /* Fill up FIFO */
1576 for (residue = priv->fifo_depth; residue > 0; residue--) {
1577 if (parport_ip32_read_econtrol(p) & ECR_F_FULL)
1578 break;
1579 writeb(0x00, priv->regs.fifo);
1580 }
1581 }
1582 if (residue)
1583 pr_debug1(PPIP32 "%s: %d PWord%s left in FIFO\n",
1584 p->name, residue,
1585 (residue == 1) ? " was" : "s were");
1586
1587 /* Now reset the FIFO */
1588 parport_ip32_set_mode(p, ECR_MODE_PS2);
1589
1590 /* Host recovery for ECP mode */
1591 if (mode == ECR_MODE_ECP) {
1592 parport_ip32_data_reverse(p);
1593 parport_ip32_frob_control(p, DCR_nINIT, 0);
1594 if (parport_wait_peripheral(p, DSR_PERROR, 0))
1595 pr_debug1(PPIP32 "%s: PEerror timeout 1 in %s\n",
1596 p->name, __func__);
1597 parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE);
1598 parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT);
1599 if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR))
1600 pr_debug1(PPIP32 "%s: PEerror timeout 2 in %s\n",
1601 p->name, __func__);
1602 }
1603
1604 /* Adjust residue if needed */
1605 parport_ip32_set_mode(p, ECR_MODE_CFG);
1606 cnfga = readb(priv->regs.cnfgA);
1607 if (!(cnfga & CNFGA_nBYTEINTRANS)) {
1608 pr_debug1(PPIP32 "%s: cnfgA contains 0x%02x\n",
1609 p->name, cnfga);
1610 pr_debug1(PPIP32 "%s: Accounting for extra byte\n",
1611 p->name);
1612 residue++;
1613 }
1614
1615 /* Don't care about partial PWords since we do not support
1616 * PWord != 1 byte. */
1617
1618 /* Back to forward PS2 mode. */
1619 parport_ip32_set_mode(p, ECR_MODE_PS2);
1620 parport_ip32_data_forward(p);
1621
1622 return residue;
1623}
1624
1625/**
1626 * parport_ip32_compat_write_data - write a block of data in SPP mode
1627 * @p: pointer to &struct parport
1628 * @buf: buffer of data to write
1629 * @len: length of buffer @buf
1630 * @flags: ignored
1631 */
1632static size_t parport_ip32_compat_write_data(struct parport *p,
1633 const void *buf, size_t len,
1634 int flags)
1635{
1636 static unsigned int ready_before = 1;
1637 struct parport_ip32_private * const priv = p->physport->private_data;
1638 struct parport * const physport = p->physport;
1639 size_t written = 0;
1640
1641 /* Special case: a timeout of zero means we cannot call schedule().
1642 * Also if O_NONBLOCK is set then use the default implementation. */
1643 if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
1644 return parport_ieee1284_write_compat(p, buf, len, flags);
1645
1646 /* Reset FIFO, go in forward mode, and disable ackIntEn */
1647 parport_ip32_set_mode(p, ECR_MODE_PS2);
1648 parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
1649 parport_ip32_data_forward(p);
1650 parport_ip32_disable_irq(p);
1651 parport_ip32_set_mode(p, ECR_MODE_PPF);
1652 physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
1653
1654 /* Wait for peripheral to become ready */
1655 if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
1656 DSR_nBUSY | DSR_nFAULT)) {
1657 /* Avoid to flood the logs */
1658 if (ready_before)
1659 printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
1660 p->name, __func__);
1661 ready_before = 0;
1662 goto stop;
1663 }
1664 ready_before = 1;
1665
1666 written = parport_ip32_fifo_write_block(p, buf, len);
1667
1668 /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
1669 parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
1670
1671 /* Check for a potential residue */
1672 written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF);
1673
1674 /* Then, wait for BUSY to get low. */
1675 if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
1676 printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
1677 p->name, __func__);
1678
1679stop:
1680 /* Reset FIFO */
1681 parport_ip32_set_mode(p, ECR_MODE_PS2);
1682 physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
1683
1684 return written;
1685}
1686
1687/*
1688 * FIXME - Insert here parport_ip32_ecp_read_data().
1689 */
1690
1691/**
1692 * parport_ip32_ecp_write_data - write a block of data in ECP mode
1693 * @p: pointer to &struct parport
1694 * @buf: buffer of data to write
1695 * @len: length of buffer @buf
1696 * @flags: ignored
1697 */
1698static size_t parport_ip32_ecp_write_data(struct parport *p,
1699 const void *buf, size_t len,
1700 int flags)
1701{
1702 static unsigned int ready_before = 1;
1703 struct parport_ip32_private * const priv = p->physport->private_data;
1704 struct parport * const physport = p->physport;
1705 size_t written = 0;
1706
1707 /* Special case: a timeout of zero means we cannot call schedule().
1708 * Also if O_NONBLOCK is set then use the default implementation. */
1709 if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
1710 return parport_ieee1284_ecp_write_data(p, buf, len, flags);
1711
1712 /* Negotiate to forward mode if necessary. */
1713 if (physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
1714 /* Event 47: Set nInit high. */
1715 parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD,
1716 DCR_nINIT | DCR_AUTOFD);
1717
1718 /* Event 49: PError goes high. */
1719 if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
1720 printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s",
1721 p->name, __func__);
1722 physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
1723 return 0;
1724 }
1725 }
1726
1727 /* Reset FIFO, go in forward mode, and disable ackIntEn */
1728 parport_ip32_set_mode(p, ECR_MODE_PS2);
1729 parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
1730 parport_ip32_data_forward(p);
1731 parport_ip32_disable_irq(p);
1732 parport_ip32_set_mode(p, ECR_MODE_ECP);
1733 physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
1734
1735 /* Wait for peripheral to become ready */
1736 if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
1737 DSR_nBUSY | DSR_nFAULT)) {
1738 /* Avoid to flood the logs */
1739 if (ready_before)
1740 printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
1741 p->name, __func__);
1742 ready_before = 0;
1743 goto stop;
1744 }
1745 ready_before = 1;
1746
1747 written = parport_ip32_fifo_write_block(p, buf, len);
1748
1749 /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
1750 parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
1751
1752 /* Check for a potential residue */
1753 written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP);
1754
1755 /* Then, wait for BUSY to get low. */
1756 if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
1757 printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
1758 p->name, __func__);
1759
1760stop:
1761 /* Reset FIFO */
1762 parport_ip32_set_mode(p, ECR_MODE_PS2);
1763 physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
1764
1765 return written;
1766}
1767
1768/*
1769 * FIXME - Insert here parport_ip32_ecp_write_addr().
1770 */
1771
1772/*--- Default parport operations ---------------------------------------*/
1773
1774static __initdata struct parport_operations parport_ip32_ops = {
1775 .write_data = parport_ip32_write_data,
1776 .read_data = parport_ip32_read_data,
1777
1778 .write_control = parport_ip32_write_control,
1779 .read_control = parport_ip32_read_control,
1780 .frob_control = parport_ip32_frob_control,
1781
1782 .read_status = parport_ip32_read_status,
1783
1784 .enable_irq = parport_ip32_enable_irq,
1785 .disable_irq = parport_ip32_disable_irq,
1786
1787 .data_forward = parport_ip32_data_forward,
1788 .data_reverse = parport_ip32_data_reverse,
1789
1790 .init_state = parport_ip32_init_state,
1791 .save_state = parport_ip32_save_state,
1792 .restore_state = parport_ip32_restore_state,
1793
1794 .epp_write_data = parport_ieee1284_epp_write_data,
1795 .epp_read_data = parport_ieee1284_epp_read_data,
1796 .epp_write_addr = parport_ieee1284_epp_write_addr,
1797 .epp_read_addr = parport_ieee1284_epp_read_addr,
1798
1799 .ecp_write_data = parport_ieee1284_ecp_write_data,
1800 .ecp_read_data = parport_ieee1284_ecp_read_data,
1801 .ecp_write_addr = parport_ieee1284_ecp_write_addr,
1802
1803 .compat_write_data = parport_ieee1284_write_compat,
1804 .nibble_read_data = parport_ieee1284_read_nibble,
1805 .byte_read_data = parport_ieee1284_read_byte,
1806
1807 .owner = THIS_MODULE,
1808};
1809
1810/*--- Device detection -------------------------------------------------*/
1811
1812/**
1813 * parport_ip32_ecp_supported - check for an ECP port
1814 * @p: pointer to the &parport structure
1815 *
1816 * Returns 1 if an ECP port is found, and 0 otherwise. This function actually
1817 * checks if an Extended Control Register seems to be present. On successful
1818 * return, the port is placed in SPP mode.
1819 */
1820static __init unsigned int parport_ip32_ecp_supported(struct parport *p)
1821{
1822 struct parport_ip32_private * const priv = p->physport->private_data;
1823 unsigned int ecr;
1824
1825 ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
1826 writeb(ecr, priv->regs.ecr);
1827 if (readb(priv->regs.ecr) != (ecr | ECR_F_EMPTY))
1828 goto fail;
1829
1830 pr_probe(p, "Found working ECR register\n");
1831 parport_ip32_set_mode(p, ECR_MODE_SPP);
1832 parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
1833 return 1;
1834
1835fail:
1836 pr_probe(p, "ECR register not found\n");
1837 return 0;
1838}
1839
1840/**
1841 * parport_ip32_fifo_supported - check for FIFO parameters
1842 * @p: pointer to the &parport structure
1843 *
1844 * Check for FIFO parameters of an Extended Capabilities Port. Returns 1 on
1845 * success, and 0 otherwise. Adjust FIFO parameters in the parport structure.
1846 * On return, the port is placed in SPP mode.
1847 */
1848static __init unsigned int parport_ip32_fifo_supported(struct parport *p)
1849{
1850 struct parport_ip32_private * const priv = p->physport->private_data;
1851 unsigned int configa, configb;
1852 unsigned int pword;
1853 unsigned int i;
1854
1855 /* Configuration mode */
1856 parport_ip32_set_mode(p, ECR_MODE_CFG);
1857 configa = readb(priv->regs.cnfgA);
1858 configb = readb(priv->regs.cnfgB);
1859
1860 /* Find out PWord size */
1861 switch (configa & CNFGA_ID_MASK) {
1862 case CNFGA_ID_8:
1863 pword = 1;
1864 break;
1865 case CNFGA_ID_16:
1866 pword = 2;
1867 break;
1868 case CNFGA_ID_32:
1869 pword = 4;
1870 break;
1871 default:
1872 pr_probe(p, "Unknown implementation ID: 0x%0x\n",
1873 (configa & CNFGA_ID_MASK) >> CNFGA_ID_SHIFT);
1874 goto fail;
1875 break;
1876 }
1877 if (pword != 1) {
1878 pr_probe(p, "Unsupported PWord size: %u\n", pword);
1879 goto fail;
1880 }
1881 priv->pword = pword;
1882 pr_probe(p, "PWord is %u bits\n", 8 * priv->pword);
1883
1884 /* Check for compression support */
1885 writeb(configb | CNFGB_COMPRESS, priv->regs.cnfgB);
1886 if (readb(priv->regs.cnfgB) & CNFGB_COMPRESS)
1887 pr_probe(p, "Hardware compression detected (unsupported)\n");
1888 writeb(configb & ~CNFGB_COMPRESS, priv->regs.cnfgB);
1889
1890 /* Reset FIFO and go in test mode (no interrupt, no DMA) */
1891 parport_ip32_set_mode(p, ECR_MODE_TST);
1892
1893 /* FIFO must be empty now */
1894 if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
1895 pr_probe(p, "FIFO not reset\n");
1896 goto fail;
1897 }
1898
1899 /* Find out FIFO depth. */
1900 priv->fifo_depth = 0;
1901 for (i = 0; i < 1024; i++) {
1902 if (readb(priv->regs.ecr) & ECR_F_FULL) {
1903 /* FIFO full */
1904 priv->fifo_depth = i;
1905 break;
1906 }
1907 writeb((u8)i, priv->regs.fifo);
1908 }
1909 if (i >= 1024) {
1910 pr_probe(p, "Can't fill FIFO\n");
1911 goto fail;
1912 }
1913 if (!priv->fifo_depth) {
1914 pr_probe(p, "Can't get FIFO depth\n");
1915 goto fail;
1916 }
1917 pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth);
1918
1919 /* Enable interrupts */
1920 parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
1921
1922 /* Find out writeIntrThreshold: number of PWords we know we can write
1923 * if we get an interrupt. */
1924 priv->writeIntrThreshold = 0;
1925 for (i = 0; i < priv->fifo_depth; i++) {
1926 if (readb(priv->regs.fifo) != (u8)i) {
1927 pr_probe(p, "Invalid data in FIFO\n");
1928 goto fail;
1929 }
1930 if (!priv->writeIntrThreshold
1931 && readb(priv->regs.ecr) & ECR_SERVINTR)
1932 /* writeIntrThreshold reached */
1933 priv->writeIntrThreshold = i + 1;
1934 if (i + 1 < priv->fifo_depth
1935 && readb(priv->regs.ecr) & ECR_F_EMPTY) {
1936 /* FIFO empty before the last byte? */
1937 pr_probe(p, "Data lost in FIFO\n");
1938 goto fail;
1939 }
1940 }
1941 if (!priv->writeIntrThreshold) {
1942 pr_probe(p, "Can't get writeIntrThreshold\n");
1943 goto fail;
1944 }
1945 pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold);
1946
1947 /* FIFO must be empty now */
1948 if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
1949 pr_probe(p, "Can't empty FIFO\n");
1950 goto fail;
1951 }
1952
1953 /* Reset FIFO */
1954 parport_ip32_set_mode(p, ECR_MODE_PS2);
1955 /* Set reverse direction (must be in PS2 mode) */
1956 parport_ip32_data_reverse(p);
1957 /* Test FIFO, no interrupt, no DMA */
1958 parport_ip32_set_mode(p, ECR_MODE_TST);
1959 /* Enable interrupts */
1960 parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
1961
1962 /* Find out readIntrThreshold: number of PWords we can read if we get
1963 * an interrupt. */
1964 priv->readIntrThreshold = 0;
1965 for (i = 0; i < priv->fifo_depth; i++) {
1966 writeb(0xaa, priv->regs.fifo);
1967 if (readb(priv->regs.ecr) & ECR_SERVINTR) {
1968 /* readIntrThreshold reached */
1969 priv->readIntrThreshold = i + 1;
1970 break;
1971 }
1972 }
1973 if (!priv->readIntrThreshold) {
1974 pr_probe(p, "Can't get readIntrThreshold\n");
1975 goto fail;
1976 }
1977 pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold);
1978
1979 /* Reset ECR */
1980 parport_ip32_set_mode(p, ECR_MODE_PS2);
1981 parport_ip32_data_forward(p);
1982 parport_ip32_set_mode(p, ECR_MODE_SPP);
1983 return 1;
1984
1985fail:
1986 priv->fifo_depth = 0;
1987 parport_ip32_set_mode(p, ECR_MODE_SPP);
1988 return 0;
1989}
1990
1991/*--- Initialization code ----------------------------------------------*/
1992
1993/**
1994 * parport_ip32_make_isa_registers - compute (ISA) register addresses
1995 * @regs: pointer to &struct parport_ip32_regs to fill
1996 * @base: base address of standard and EPP registers
1997 * @base_hi: base address of ECP registers
1998 * @regshift: how much to shift register offset by
1999 *
2000 * Compute register addresses, according to the ISA standard. The addresses
2001 * of the standard and EPP registers are computed from address @base. The
2002 * addresses of the ECP registers are computed from address @base_hi.
2003 */
2004static void __init
2005parport_ip32_make_isa_registers(struct parport_ip32_regs *regs,
2006 void __iomem *base, void __iomem *base_hi,
2007 unsigned int regshift)
2008{
2009#define r_base(offset) ((u8 __iomem *)base + ((offset) << regshift))
2010#define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift))
2011 *regs = (struct parport_ip32_regs){
2012 .data = r_base(0),
2013 .dsr = r_base(1),
2014 .dcr = r_base(2),
2015 .eppAddr = r_base(3),
2016 .eppData0 = r_base(4),
2017 .eppData1 = r_base(5),
2018 .eppData2 = r_base(6),
2019 .eppData3 = r_base(7),
2020 .ecpAFifo = r_base(0),
2021 .fifo = r_base_hi(0),
2022 .cnfgA = r_base_hi(0),
2023 .cnfgB = r_base_hi(1),
2024 .ecr = r_base_hi(2)
2025 };
2026#undef r_base_hi
2027#undef r_base
2028}
2029
2030/**
2031 * parport_ip32_probe_port - probe and register IP32 built-in parallel port
2032 *
2033 * Returns the new allocated &parport structure. On error, an error code is
2034 * encoded in return value with the ERR_PTR function.
2035 */
2036static __init struct parport *parport_ip32_probe_port(void)
2037{
2038 struct parport_ip32_regs regs;
2039 struct parport_ip32_private *priv = NULL;
2040 struct parport_operations *ops = NULL;
2041 struct parport *p = NULL;
2042 int err;
2043
2044 parport_ip32_make_isa_registers(&regs, &mace->isa.parallel,
2045 &mace->isa.ecp1284, 8 /* regshift */);
2046
2047 ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
2048 priv = kmalloc(sizeof(struct parport_ip32_private), GFP_KERNEL);
2049 p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops);
2050 if (ops == NULL || priv == NULL || p == NULL) {
2051 err = -ENOMEM;
2052 goto fail;
2053 }
2054 p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel);
2055 p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284);
2056 p->private_data = priv;
2057
2058 *ops = parport_ip32_ops;
2059 *priv = (struct parport_ip32_private){
2060 .regs = regs,
2061 .dcr_writable = DCR_DIR | DCR_SELECT | DCR_nINIT |
2062 DCR_AUTOFD | DCR_STROBE,
2063 .irq_mode = PARPORT_IP32_IRQ_FWD,
2064 };
2065 init_completion(&priv->irq_complete);
2066
2067 /* Probe port. */
2068 if (!parport_ip32_ecp_supported(p)) {
2069 err = -ENODEV;
2070 goto fail;
2071 }
2072 parport_ip32_dump_state(p, "begin init", 0);
2073
2074 /* We found what looks like a working ECR register. Simply assume
2075 * that all modes are correctly supported. Enable basic modes. */
2076 p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
2077 p->modes |= PARPORT_MODE_TRISTATE;
2078
2079 if (!parport_ip32_fifo_supported(p)) {
2080 printk(KERN_WARNING PPIP32
2081 "%s: error: FIFO disabled\n", p->name);
2082 /* Disable hardware modes depending on a working FIFO. */
2083 features &= ~PARPORT_IP32_ENABLE_SPP;
2084 features &= ~PARPORT_IP32_ENABLE_ECP;
2085 /* DMA is not needed if FIFO is not supported. */
2086 features &= ~PARPORT_IP32_ENABLE_DMA;
2087 }
2088
2089 /* Request IRQ */
2090 if (features & PARPORT_IP32_ENABLE_IRQ) {
2091 int irq = MACEISA_PARALLEL_IRQ;
2092 if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
2093 printk(KERN_WARNING PPIP32
2094 "%s: error: IRQ disabled\n", p->name);
2095 /* DMA cannot work without interrupts. */
2096 features &= ~PARPORT_IP32_ENABLE_DMA;
2097 } else {
2098 pr_probe(p, "Interrupt support enabled\n");
2099 p->irq = irq;
2100 priv->dcr_writable |= DCR_IRQ;
2101 }
2102 }
2103
2104 /* Allocate DMA resources */
2105 if (features & PARPORT_IP32_ENABLE_DMA) {
2106 if (parport_ip32_dma_register())
2107 printk(KERN_WARNING PPIP32
2108 "%s: error: DMA disabled\n", p->name);
2109 else {
2110 pr_probe(p, "DMA support enabled\n");
2111 p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
2112 p->modes |= PARPORT_MODE_DMA;
2113 }
2114 }
2115
2116 if (features & PARPORT_IP32_ENABLE_SPP) {
2117 /* Enable compatibility FIFO mode */
2118 p->ops->compat_write_data = parport_ip32_compat_write_data;
2119 p->modes |= PARPORT_MODE_COMPAT;
2120 pr_probe(p, "Hardware support for SPP mode enabled\n");
2121 }
2122 if (features & PARPORT_IP32_ENABLE_EPP) {
2123 /* Set up access functions to use EPP hardware. */
2124 p->ops->epp_read_data = parport_ip32_epp_read_data;
2125 p->ops->epp_write_data = parport_ip32_epp_write_data;
2126 p->ops->epp_read_addr = parport_ip32_epp_read_addr;
2127 p->ops->epp_write_addr = parport_ip32_epp_write_addr;
2128 p->modes |= PARPORT_MODE_EPP;
2129 pr_probe(p, "Hardware support for EPP mode enabled\n");
2130 }
2131 if (features & PARPORT_IP32_ENABLE_ECP) {
2132 /* Enable ECP FIFO mode */
2133 p->ops->ecp_write_data = parport_ip32_ecp_write_data;
2134 /* FIXME - not implemented */
2135/* p->ops->ecp_read_data = parport_ip32_ecp_read_data; */
2136/* p->ops->ecp_write_addr = parport_ip32_ecp_write_addr; */
2137 p->modes |= PARPORT_MODE_ECP;
2138 pr_probe(p, "Hardware support for ECP mode enabled\n");
2139 }
2140
2141 /* Initialize the port with sensible values */
2142 parport_ip32_set_mode(p, ECR_MODE_PS2);
2143 parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
2144 parport_ip32_data_forward(p);
2145 parport_ip32_disable_irq(p);
2146 parport_ip32_write_data(p, 0x00);
2147 parport_ip32_dump_state(p, "end init", 0);
2148
2149 /* Print out what we found */
2150 printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)",
2151 p->name, p->base, p->base_hi);
2152 if (p->irq != PARPORT_IRQ_NONE)
2153 printk(", irq %d", p->irq);
2154 printk(" [");
2155#define printmode(x) if (p->modes & PARPORT_MODE_##x) \
2156 printk("%s%s", f++ ? "," : "", #x)
2157 {
2158 unsigned int f = 0;
2159 printmode(PCSPP);
2160 printmode(TRISTATE);
2161 printmode(COMPAT);
2162 printmode(EPP);
2163 printmode(ECP);
2164 printmode(DMA);
2165 }
2166#undef printmode
2167 printk("]\n");
2168
2169 parport_announce_port(p);
2170 return p;
2171
2172fail:
2173 if (p)
2174 parport_put_port(p);
2175 kfree(priv);
2176 kfree(ops);
2177 return ERR_PTR(err);
2178}
2179
2180/**
2181 * parport_ip32_unregister_port - unregister a parallel port
2182 * @p: pointer to the &struct parport
2183 *
2184 * Unregisters a parallel port and free previously allocated resources
2185 * (memory, IRQ, ...).
2186 */
2187static __exit void parport_ip32_unregister_port(struct parport *p)
2188{
2189 struct parport_ip32_private * const priv = p->physport->private_data;
2190 struct parport_operations *ops = p->ops;
2191
2192 parport_remove_port(p);
2193 if (p->modes & PARPORT_MODE_DMA)
2194 parport_ip32_dma_unregister();
2195 if (p->irq != PARPORT_IRQ_NONE)
2196 free_irq(p->irq, p);
2197 parport_put_port(p);
2198 kfree(priv);
2199 kfree(ops);
2200}
2201
2202/**
2203 * parport_ip32_init - module initialization function
2204 */
2205static int __init parport_ip32_init(void)
2206{
2207 pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
2208 pr_debug1(PPIP32 "Compiled on %s, %s\n", __DATE__, __TIME__);
2209 this_port = parport_ip32_probe_port();
2210 return IS_ERR(this_port) ? PTR_ERR(this_port) : 0;
2211}
2212
2213/**
2214 * parport_ip32_exit - module termination function
2215 */
2216static void __exit parport_ip32_exit(void)
2217{
2218 parport_ip32_unregister_port(this_port);
2219}
2220
2221/*--- Module stuff -----------------------------------------------------*/
2222
2223MODULE_AUTHOR("Arnaud Giersch <arnaud.giersch@free.fr>");
2224MODULE_DESCRIPTION("SGI IP32 built-in parallel port driver");
2225MODULE_LICENSE("GPL");
2226MODULE_VERSION("0.6"); /* update in parport_ip32_init() too */
2227
2228module_init(parport_ip32_init);
2229module_exit(parport_ip32_exit);
2230
2231module_param(verbose_probing, bool, S_IRUGO);
2232MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialization");
2233
2234module_param(features, uint, S_IRUGO);
2235MODULE_PARM_DESC(features,
2236 "Bit mask of features to enable"
2237 ", bit 0: IRQ support"
2238 ", bit 1: DMA support"
2239 ", bit 2: hardware SPP mode"
2240 ", bit 3: hardware EPP mode"
2241 ", bit 4: hardware ECP mode");
2242
2243/*--- Inform (X)Emacs about preferred coding style ---------------------*/
2244/*
2245 * Local Variables:
2246 * mode: c
2247 * c-file-style: "linux"
2248 * indent-tabs-mode: t
2249 * tab-width: 8
2250 * fill-column: 78
2251 * ispell-local-dictionary: "american"
2252 * End:
2253 */
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 76dd077e3184..166de3507780 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -329,9 +329,9 @@ static int __devinit parport_register (struct pci_dev *dev,
329 329
330 if (priv->num_par == ARRAY_SIZE (priv->port)) { 330 if (priv->num_par == ARRAY_SIZE (priv->port)) {
331 printk (KERN_WARNING 331 printk (KERN_WARNING
332 "parport_serial: %s: only %u parallel ports " 332 "parport_serial: %s: only %zu parallel ports "
333 "supported (%d reported)\n", pci_name (dev), 333 "supported (%d reported)\n", pci_name (dev),
334 ARRAY_SIZE (priv->port), card->numports); 334 ARRAY_SIZE(priv->port), card->numports);
335 break; 335 break;
336 } 336 }
337 337
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index b62aee8de3cb..ea83b70e0de2 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -199,7 +199,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
199 199
200 if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) { 200 if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
201 if (belen != len) { 201 if (belen != len) {
202 printk (KERN_DEBUG "%s: Device ID was %d bytes" 202 printk (KERN_DEBUG "%s: Device ID was %zd bytes"
203 " while device told it would be %d" 203 " while device told it would be %d"
204 " bytes\n", 204 " bytes\n",
205 port->name, len, belen); 205 port->name, len, belen);
@@ -214,7 +214,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
214 if (buffer[len-1] == ';') { 214 if (buffer[len-1] == ';') {
215 printk (KERN_DEBUG "%s: Device ID reading stopped" 215 printk (KERN_DEBUG "%s: Device ID reading stopped"
216 " before device told data not available. " 216 " before device told data not available. "
217 "Current idlen %d of %d, len bytes %02X %02X\n", 217 "Current idlen %u of %u, len bytes %02X %02X\n",
218 port->name, current_idlen, numidlens, 218 port->name, current_idlen, numidlens,
219 length[0], length[1]); 219 length[0], length[1]);
220 goto done; 220 goto done;
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 6f50cc9323d9..6912399d0937 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -55,13 +55,21 @@ config DASD_DIAG
55 Disks under VM. If you are not running under VM or unsure what it is, 55 Disks under VM. If you are not running under VM or unsure what it is,
56 say "N". 56 say "N".
57 57
58config DASD_EER
59 tristate "Extended error reporting (EER)"
60 depends on DASD
61 help
62 This driver provides a character device interface to the
63 DASD extended error reporting. This is only needed if you want to
64 use applications written for the EER facility.
65
58config DASD_CMB 66config DASD_CMB
59 tristate "Compatibility interface for DASD channel measurement blocks" 67 tristate "Compatibility interface for DASD channel measurement blocks"
60 depends on DASD 68 depends on DASD
61 help 69 help
62 This driver provides an additional interface to the channel measurement 70 This driver provides an additional interface to the channel
63 facility, which is normally accessed though sysfs, with a set of 71 measurement facility, which is normally accessed though sysfs, with
64 ioctl functions specific to the dasd driver. 72 a set of ioctl functions specific to the dasd driver.
65 This is only needed if you want to use applications written for 73 This is only needed if you want to use applications written for
66 linux-2.4 dasd channel measurement facility interface. 74 linux-2.4 dasd channel measurement facility interface.
67 75
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index 58c6780134f7..0c0d871e8f51 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -5,6 +5,7 @@
5dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o 5dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o
6dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o 6dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o
7dasd_diag_mod-objs := dasd_diag.o 7dasd_diag_mod-objs := dasd_diag.o
8dasd_eer_mod-objs := dasd_eer.o
8dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ 9dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
9 dasd_genhd.o dasd_erp.o 10 dasd_genhd.o dasd_erp.o
10 11
@@ -13,5 +14,6 @@ obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
13obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o 14obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
14obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o 15obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
15obj-$(CONFIG_DASD_CMB) += dasd_cmb.o 16obj-$(CONFIG_DASD_CMB) += dasd_cmb.o
17obj-$(CONFIG_DASD_EER) += dasd_eer.o
16obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o 18obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
17obj-$(CONFIG_DCSSBLK) += dcssblk.o 19obj-$(CONFIG_DCSSBLK) += dcssblk.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index abdf1ee633e7..08c88fcd8963 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/buffer_head.h> 19#include <linux/buffer_head.h>
20#include <linux/hdreg.h> 20#include <linux/hdreg.h>
21#include <linux/notifier.h>
21 22
22#include <asm/ccwdev.h> 23#include <asm/ccwdev.h>
23#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
@@ -57,6 +58,7 @@ static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
57static void dasd_flush_ccw_queue(struct dasd_device *, int); 58static void dasd_flush_ccw_queue(struct dasd_device *, int);
58static void dasd_tasklet(struct dasd_device *); 59static void dasd_tasklet(struct dasd_device *);
59static void do_kick_device(void *data); 60static void do_kick_device(void *data);
61static void dasd_disable_eer(struct dasd_device *device);
60 62
61/* 63/*
62 * SECTION: Operations on the device structure. 64 * SECTION: Operations on the device structure.
@@ -151,6 +153,8 @@ dasd_state_new_to_known(struct dasd_device *device)
151static inline void 153static inline void
152dasd_state_known_to_new(struct dasd_device * device) 154dasd_state_known_to_new(struct dasd_device * device)
153{ 155{
156 /* disable extended error reporting for this device */
157 dasd_disable_eer(device);
154 /* Forget the discipline information. */ 158 /* Forget the discipline information. */
155 device->discipline = NULL; 159 device->discipline = NULL;
156 device->state = DASD_STATE_NEW; 160 device->state = DASD_STATE_NEW;
@@ -867,6 +871,9 @@ dasd_handle_state_change_pending(struct dasd_device *device)
867 struct dasd_ccw_req *cqr; 871 struct dasd_ccw_req *cqr;
868 struct list_head *l, *n; 872 struct list_head *l, *n;
869 873
874 /* first of all call extended error reporting */
875 dasd_write_eer_trigger(DASD_EER_STATECHANGE, device, NULL);
876
870 device->stopped &= ~DASD_STOPPED_PENDING; 877 device->stopped &= ~DASD_STOPPED_PENDING;
871 878
872 /* restart all 'running' IO on queue */ 879 /* restart all 'running' IO on queue */
@@ -1086,6 +1093,19 @@ restart:
1086 } 1093 }
1087 goto restart; 1094 goto restart;
1088 } 1095 }
1096
1097 /* first of all call extended error reporting */
1098 if (device->eer && cqr->status == DASD_CQR_FAILED) {
1099 dasd_write_eer_trigger(DASD_EER_FATALERROR,
1100 device, cqr);
1101
1102 /* restart request */
1103 cqr->status = DASD_CQR_QUEUED;
1104 cqr->retries = 255;
1105 device->stopped |= DASD_STOPPED_QUIESCE;
1106 goto restart;
1107 }
1108
1089 /* Process finished ERP request. */ 1109 /* Process finished ERP request. */
1090 if (cqr->refers) { 1110 if (cqr->refers) {
1091 __dasd_process_erp(device, cqr); 1111 __dasd_process_erp(device, cqr);
@@ -1223,7 +1243,8 @@ __dasd_start_head(struct dasd_device * device)
1223 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1243 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1224 /* check FAILFAST */ 1244 /* check FAILFAST */
1225 if (device->stopped & ~DASD_STOPPED_PENDING && 1245 if (device->stopped & ~DASD_STOPPED_PENDING &&
1226 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) { 1246 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1247 (!device->eer)) {
1227 cqr->status = DASD_CQR_FAILED; 1248 cqr->status = DASD_CQR_FAILED;
1228 dasd_schedule_bh(device); 1249 dasd_schedule_bh(device);
1229 } 1250 }
@@ -1965,6 +1986,9 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
1965 switch (event) { 1986 switch (event) {
1966 case CIO_GONE: 1987 case CIO_GONE:
1967 case CIO_NO_PATH: 1988 case CIO_NO_PATH:
1989 /* first of all call extended error reporting */
1990 dasd_write_eer_trigger(DASD_EER_NOPATH, device, NULL);
1991
1968 if (device->state < DASD_STATE_BASIC) 1992 if (device->state < DASD_STATE_BASIC)
1969 break; 1993 break;
1970 /* Device is active. We want to keep it. */ 1994 /* Device is active. We want to keep it. */
@@ -2022,6 +2046,51 @@ dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
2022 put_driver(drv); 2046 put_driver(drv);
2023} 2047}
2024 2048
2049/*
2050 * notifications for extended error reports
2051 */
2052static struct notifier_block *dasd_eer_chain;
2053
2054int
2055dasd_register_eer_notifier(struct notifier_block *nb)
2056{
2057 return notifier_chain_register(&dasd_eer_chain, nb);
2058}
2059
2060int
2061dasd_unregister_eer_notifier(struct notifier_block *nb)
2062{
2063 return notifier_chain_unregister(&dasd_eer_chain, nb);
2064}
2065
2066/*
2067 * Notify the registered error reporting module of a problem
2068 */
2069void
2070dasd_write_eer_trigger(unsigned int id, struct dasd_device *device,
2071 struct dasd_ccw_req *cqr)
2072{
2073 if (device->eer) {
2074 struct dasd_eer_trigger temp;
2075 temp.id = id;
2076 temp.device = device;
2077 temp.cqr = cqr;
2078 notifier_call_chain(&dasd_eer_chain, DASD_EER_TRIGGER,
2079 (void *)&temp);
2080 }
2081}
2082
2083/*
2084 * Tell the registered error reporting module to disable error reporting for
2085 * a given device and to cleanup any private data structures on that device.
2086 */
2087static void
2088dasd_disable_eer(struct dasd_device *device)
2089{
2090 notifier_call_chain(&dasd_eer_chain, DASD_EER_DISABLE, (void *)device);
2091}
2092
2093
2025static int __init 2094static int __init
2026dasd_init(void) 2095dasd_init(void)
2027{ 2096{
@@ -2103,6 +2172,11 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2103EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2172EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2104EXPORT_SYMBOL_GPL(dasd_generic_auto_online); 2173EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
2105 2174
2175EXPORT_SYMBOL(dasd_register_eer_notifier);
2176EXPORT_SYMBOL(dasd_unregister_eer_notifier);
2177EXPORT_SYMBOL(dasd_write_eer_trigger);
2178
2179
2106/* 2180/*
2107 * Overrides for Emacs so that we follow Linus's tabbing style. 2181 * Overrides for Emacs so that we follow Linus's tabbing style.
2108 * Emacs will notice this stuff at the end of the file and automatically 2182 * Emacs will notice this stuff at the end of the file and automatically
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 4ee0f934e325..c811380b9079 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1108,6 +1108,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
1108 case 0x0B: 1108 case 0x0B:
1109 DEV_MESSAGE(KERN_WARNING, device, "%s", 1109 DEV_MESSAGE(KERN_WARNING, device, "%s",
1110 "FORMAT F - Volume is suspended duplex"); 1110 "FORMAT F - Volume is suspended duplex");
1111 /* call extended error reporting (EER) */
1112 dasd_write_eer_trigger(DASD_EER_PPRCSUSPEND, device,
1113 erp->refers);
1111 break; 1114 break;
1112 case 0x0C: 1115 case 0x0C:
1113 DEV_MESSAGE(KERN_WARNING, device, "%s", 1116 DEV_MESSAGE(KERN_WARNING, device, "%s",
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index bc3823d35223..e15dd7978050 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -29,6 +29,7 @@
29#define DASD_ECKD_CCW_PSF 0x27 29#define DASD_ECKD_CCW_PSF 0x27
30#define DASD_ECKD_CCW_RSSD 0x3e 30#define DASD_ECKD_CCW_RSSD 0x3e
31#define DASD_ECKD_CCW_LOCATE_RECORD 0x47 31#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
32#define DASD_ECKD_CCW_SNSS 0x54
32#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 33#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
33#define DASD_ECKD_CCW_WRITE_MT 0x85 34#define DASD_ECKD_CCW_WRITE_MT 0x85
34#define DASD_ECKD_CCW_READ_MT 0x86 35#define DASD_ECKD_CCW_READ_MT 0x86
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
new file mode 100644
index 000000000000..f70cd7716b24
--- /dev/null
+++ b/drivers/s390/block/dasd_eer.c
@@ -0,0 +1,1090 @@
1/*
2 * character device driver for extended error reporting
3 *
4 *
5 * Copyright (C) 2005 IBM Corporation
6 * extended error reporting for DASD ECKD devices
7 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
8 *
9 */
10
11#include <linux/init.h>
12#include <linux/fs.h>
13#include <linux/kernel.h>
14#include <linux/miscdevice.h>
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/device.h>
18#include <linux/workqueue.h>
19#include <linux/poll.h>
20#include <linux/notifier.h>
21
22#include <asm/uaccess.h>
23#include <asm/semaphore.h>
24#include <asm/atomic.h>
25#include <asm/ebcdic.h>
26
27#include "dasd_int.h"
28#include "dasd_eckd.h"
29
30
31MODULE_LICENSE("GPL");
32
33MODULE_AUTHOR("Stefan Weinhuber <wein@de.ibm.com>");
34MODULE_DESCRIPTION("DASD extended error reporting module");
35
36
37#ifdef PRINTK_HEADER
38#undef PRINTK_HEADER
39#endif /* PRINTK_HEADER */
40#define PRINTK_HEADER "dasd(eer):"
41
42
43
44
45
46/*****************************************************************************/
47/* the internal buffer */
48/*****************************************************************************/
49
50/*
51 * The internal buffer is meant to store obaque blobs of data, so it doesn't
52 * know of higher level concepts like triggers.
53 * It consists of a number of pages that are used as a ringbuffer. Each data
54 * blob is stored in a simple record that consists of an integer, which
55 * contains the size of the following data, and the data bytes themselfes.
56 *
57 * To allow for multiple independent readers we create one internal buffer
58 * each time the device is opened and destroy the buffer when the file is
59 * closed again.
60 *
61 * One record can be written to a buffer by using the functions
62 * - dasd_eer_start_record (one time per record to write the size to the buffer
63 * and reserve the space for the data)
64 * - dasd_eer_write_buffer (one or more times per record to write the data)
65 * The data can be written in several steps but you will have to compute
66 * the total size up front for the invocation of dasd_eer_start_record.
67 * If the ringbuffer is full, dasd_eer_start_record will remove the required
68 * number of old records.
69 *
70 * A record is typically read in two steps, first read the integer that
71 * specifies the size of the following data, then read the data.
72 * Both can be done by
73 * - dasd_eer_read_buffer
74 *
75 * For all mentioned functions you need to get the bufferlock first and keep it
76 * until a complete record is written or read.
77 */
78
79
80/*
81 * Alle information necessary to keep track of an internal buffer is kept in
82 * a struct eerbuffer. The buffer specific to a file pointer is strored in
83 * the private_data field of that file. To be able to write data to all
84 * existing buffers, each buffer is also added to the bufferlist.
85 * If the user doesn't want to read a complete record in one go, we have to
86 * keep track of the rest of the record. residual stores the number of bytes
87 * that are still to deliver. If the rest of the record is invalidated between
88 * two reads then residual will be set to -1 so that the next read will fail.
89 * All entries in the eerbuffer structure are protected with the bufferlock.
90 * To avoid races between writing to a buffer on the one side and creating
91 * and destroying buffers on the other side, the bufferlock must also be used
92 * to protect the bufferlist.
93 */
94
95struct eerbuffer {
96 struct list_head list;
97 char **buffer;
98 int buffersize;
99 int buffer_page_count;
100 int head;
101 int tail;
102 int residual;
103};
104
105LIST_HEAD(bufferlist);
106
107static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED;
108
109DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
110
111/*
112 * How many free bytes are available on the buffer.
113 * needs to be called with bufferlock held
114 */
115static int
116dasd_eer_get_free_bytes(struct eerbuffer *eerb)
117{
118 if (eerb->head < eerb->tail) {
119 return eerb->tail - eerb->head - 1;
120 } else
121 return eerb->buffersize - eerb->head + eerb->tail -1;
122}
123
124/*
125 * How many bytes of buffer space are used.
126 * needs to be called with bufferlock held
127 */
128static int
129dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
130{
131
132 if (eerb->head >= eerb->tail) {
133 return eerb->head - eerb->tail;
134 } else
135 return eerb->buffersize - eerb->tail + eerb->head;
136}
137
138/*
139 * The dasd_eer_write_buffer function just copies count bytes of data
140 * to the buffer. Make sure to call dasd_eer_start_record first, to
141 * make sure that enough free space is available.
142 * needs to be called with bufferlock held
143 */
144static void
145dasd_eer_write_buffer(struct eerbuffer *eerb, int count, char *data)
146{
147
148 unsigned long headindex,localhead;
149 unsigned long rest, len;
150 char *nextdata;
151
152 nextdata = data;
153 rest = count;
154 while (rest > 0) {
155 headindex = eerb->head / PAGE_SIZE;
156 localhead = eerb->head % PAGE_SIZE;
157 len = min(rest, (PAGE_SIZE - localhead));
158 memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
159 nextdata += len;
160 rest -= len;
161 eerb->head += len;
162 if ( eerb->head == eerb->buffersize )
163 eerb->head = 0; /* wrap around */
164 if (eerb->head > eerb->buffersize) {
165 MESSAGE(KERN_ERR, "%s", "runaway buffer head.");
166 BUG();
167 }
168 }
169}
170
171/*
172 * needs to be called with bufferlock held
173 */
174static int
175dasd_eer_read_buffer(struct eerbuffer *eerb, int count, char *data)
176{
177
178 unsigned long tailindex,localtail;
179 unsigned long rest, len, finalcount;
180 char *nextdata;
181
182 finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
183 nextdata = data;
184 rest = finalcount;
185 while (rest > 0) {
186 tailindex = eerb->tail / PAGE_SIZE;
187 localtail = eerb->tail % PAGE_SIZE;
188 len = min(rest, (PAGE_SIZE - localtail));
189 memcpy(nextdata, eerb->buffer[tailindex]+localtail, len);
190 nextdata += len;
191 rest -= len;
192 eerb->tail += len;
193 if ( eerb->tail == eerb->buffersize )
194 eerb->tail = 0; /* wrap around */
195 if (eerb->tail > eerb->buffersize) {
196 MESSAGE(KERN_ERR, "%s", "runaway buffer tail.");
197 BUG();
198 }
199 }
200 return finalcount;
201}
202
203/*
204 * Whenever you want to write a blob of data to the internal buffer you
205 * have to start by using this function first. It will write the number
206 * of bytes that will be written to the buffer. If necessary it will remove
207 * old records to make room for the new one.
208 * needs to be called with bufferlock held
209 */
210static int
211dasd_eer_start_record(struct eerbuffer *eerb, int count)
212{
213 int tailcount;
214 if (count + sizeof(count) > eerb->buffersize)
215 return -ENOMEM;
216 while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
217 if (eerb->residual > 0) {
218 eerb->tail += eerb->residual;
219 if (eerb->tail >= eerb->buffersize)
220 eerb->tail -= eerb->buffersize;
221 eerb->residual = -1;
222 }
223 dasd_eer_read_buffer(eerb, sizeof(tailcount),
224 (char*)(&tailcount));
225 eerb->tail += tailcount;
226 if (eerb->tail >= eerb->buffersize)
227 eerb->tail -= eerb->buffersize;
228 }
229 dasd_eer_write_buffer(eerb, sizeof(count), (char*)(&count));
230
231 return 0;
232};
233
234/*
235 * release pages that are not used anymore
236 */
237static void
238dasd_eer_free_buffer_pages(char **buf, int no_pages)
239{
240 int i;
241
242 for (i = 0; i < no_pages; ++i) {
243 free_page((unsigned long)buf[i]);
244 }
245}
246
247/*
248 * allocate a new set of memory pages
249 */
250static int
251dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
252{
253 int i;
254
255 for (i = 0; i < no_pages; ++i) {
256 buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
257 if (!buf[i]) {
258 dasd_eer_free_buffer_pages(buf, i);
259 return -ENOMEM;
260 }
261 }
262 return 0;
263}
264
265/*
266 * empty the buffer by resetting head and tail
267 * In case there is a half read data blob in the buffer, we set residual
268 * to -1 to indicate that the remainder of the blob is lost.
269 */
270static void
271dasd_eer_purge_buffer(struct eerbuffer *eerb)
272{
273 unsigned long flags;
274
275 spin_lock_irqsave(&bufferlock, flags);
276 if (eerb->residual > 0)
277 eerb->residual = -1;
278 eerb->tail=0;
279 eerb->head=0;
280 spin_unlock_irqrestore(&bufferlock, flags);
281}
282
283/*
284 * set the size of the buffer, newsize is the new number of pages to be used
285 * we don't try to copy any data back an forth, so any resize will also purge
286 * the buffer
287 */
288static int
289dasd_eer_resize_buffer(struct eerbuffer *eerb, int newsize)
290{
291 int i, oldcount, reuse;
292 char **new;
293 char **old;
294 unsigned long flags;
295
296 if (newsize < 1)
297 return -EINVAL;
298 if (eerb->buffer_page_count == newsize) {
299 /* documented behaviour is that any successfull invocation
300 * will purge all records */
301 dasd_eer_purge_buffer(eerb);
302 return 0;
303 }
304 new = kmalloc(newsize*sizeof(char*), GFP_KERNEL);
305 if (!new)
306 return -ENOMEM;
307
308 reuse=min(eerb->buffer_page_count, newsize);
309 for (i = 0; i < reuse; ++i) {
310 new[i] = eerb->buffer[i];
311 }
312 if (eerb->buffer_page_count < newsize) {
313 if (dasd_eer_allocate_buffer_pages(
314 &new[eerb->buffer_page_count],
315 newsize - eerb->buffer_page_count)) {
316 kfree(new);
317 return -ENOMEM;
318 }
319 }
320
321 spin_lock_irqsave(&bufferlock, flags);
322 old = eerb->buffer;
323 eerb->buffer = new;
324 if (eerb->residual > 0)
325 eerb->residual = -1;
326 eerb->tail = 0;
327 eerb->head = 0;
328 oldcount = eerb->buffer_page_count;
329 eerb->buffer_page_count = newsize;
330 spin_unlock_irqrestore(&bufferlock, flags);
331
332 if (oldcount > newsize) {
333 for (i = newsize; i < oldcount; ++i) {
334 free_page((unsigned long)old[i]);
335 }
336 }
337 kfree(old);
338
339 return 0;
340}
341
342
343/*****************************************************************************/
344/* The extended error reporting functionality */
345/*****************************************************************************/
346
347/*
348 * When a DASD device driver wants to report an error, it calls the
349 * function dasd_eer_write_trigger (via a notifier mechanism) and gives the
350 * respective trigger ID as parameter.
351 * Currently there are four kinds of triggers:
352 *
353 * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
354 * DASD_EER_PPRCSUSPEND: PPRC was suspended
355 * DASD_EER_NOPATH: There is no path to the device left.
356 * DASD_EER_STATECHANGE: The state of the device has changed.
357 *
358 * For the first three triggers all required information can be supplied by
359 * the caller. For these triggers a record is written by the function
360 * dasd_eer_write_standard_trigger.
361 *
362 * When dasd_eer_write_trigger is called to write a DASD_EER_STATECHANGE
363 * trigger, we have to gather the necessary sense data first. We cannot queue
364 * the necessary SNSS (sense subsystem status) request immediatly, since we
365 * are likely to run in a deadlock situation. Instead, we schedule a
366 * work_struct that calls the function dasd_eer_sense_subsystem_status to
367 * create and start an SNSS request asynchronously.
368 *
369 * To avoid memory allocations at runtime, the necessary memory is allocated
370 * when the extended error reporting is enabled for a device (by
371 * dasd_eer_probe). There is one private eer data structure for each eer
372 * enabled DASD device. It contains memory for the work_struct, one SNSS cqr
373 * and a flags field that is used to coordinate the use of the cqr. The call
374 * to write a state change trigger can come in at any time, so we have one flag
375 * CQR_IN_USE that protects the cqr itself. When this flag indicates that the
376 * cqr is currently in use, dasd_eer_sense_subsystem_status cannot start a
377 * second request but sets the SNSS_REQUESTED flag instead.
378 *
379 * When the request is finished, the callback function dasd_eer_SNSS_cb
380 * is called. This function will invoke the function
381 * dasd_eer_write_SNSS_trigger to finally write the trigger. It will also
382 * check the SNSS_REQUESTED flag and if it is set it will call
383 * dasd_eer_sense_subsystem_status again.
384 *
385 * To avoid race conditions during the handling of the lock, the flags must
386 * be protected by the snsslock.
387 */
388
389struct dasd_eer_private {
390 struct dasd_ccw_req *cqr;
391 unsigned long flags;
392 struct work_struct worker;
393};
394
395static void dasd_eer_destroy(struct dasd_device *device,
396 struct dasd_eer_private *eer);
397static int
398dasd_eer_write_trigger(struct dasd_eer_trigger *trigger);
399static void dasd_eer_sense_subsystem_status(void *data);
400static int dasd_eer_notify(struct notifier_block *self,
401 unsigned long action, void *data);
402
403struct workqueue_struct *dasd_eer_workqueue;
404
405#define SNSS_DATA_SIZE 44
406static spinlock_t snsslock = SPIN_LOCK_UNLOCKED;
407
408#define DASD_EER_BUSID_SIZE 10
409struct dasd_eer_header {
410 __u32 total_size;
411 __u32 trigger;
412 __u64 tv_sec;
413 __u64 tv_usec;
414 char busid[DASD_EER_BUSID_SIZE];
415} __attribute__ ((packed));
416
417static struct notifier_block dasd_eer_nb = {
418 .notifier_call = dasd_eer_notify,
419};
420
421/*
422 * flags for use with dasd_eer_private
423 */
424#define CQR_IN_USE 0
425#define SNSS_REQUESTED 1
426
427/*
428 * This function checks if extended error reporting is available for a given
429 * dasd_device. If yes, then it creates and returns a struct dasd_eer,
430 * otherwise it returns an -EPERM error pointer.
431 */
432struct dasd_eer_private *
433dasd_eer_probe(struct dasd_device *device)
434{
435 struct dasd_eer_private *private;
436
437 if (!(device && device->discipline
438 && !strcmp(device->discipline->name, "ECKD"))) {
439 return ERR_PTR(-EPERM);
440 }
441 /* allocate the private data structure */
442 private = (struct dasd_eer_private *)kmalloc(
443 sizeof(struct dasd_eer_private), GFP_KERNEL);
444 if (!private) {
445 return ERR_PTR(-ENOMEM);
446 }
447 INIT_WORK(&private->worker, dasd_eer_sense_subsystem_status,
448 (void *)device);
449 private->cqr = dasd_kmalloc_request("ECKD",
450 1 /* SNSS */ ,
451 SNSS_DATA_SIZE ,
452 device);
453 if (!private->cqr) {
454 kfree(private);
455 return ERR_PTR(-ENOMEM);
456 }
457 private->flags = 0;
458 return private;
459};
460
461/*
462 * If our private SNSS request is queued, remove it from the
463 * dasd ccw queue so we can free the requests memory.
464 */
465static void
466dasd_eer_dequeue_SNSS_request(struct dasd_device *device,
467 struct dasd_eer_private *eer)
468{
469 struct list_head *lst, *nxt;
470 struct dasd_ccw_req *cqr, *erpcqr;
471 dasd_erp_fn_t erp_fn;
472
473 spin_lock_irq(get_ccwdev_lock(device->cdev));
474 list_for_each_safe(lst, nxt, &device->ccw_queue) {
475 cqr = list_entry(lst, struct dasd_ccw_req, list);
476 /* we are looking for two kinds or requests */
477 /* first kind: our SNSS request: */
478 if (cqr == eer->cqr) {
479 if (cqr->status == DASD_CQR_IN_IO)
480 device->discipline->term_IO(cqr);
481 list_del(&cqr->list);
482 break;
483 }
484 /* second kind: ERP requests for our SNSS request */
485 if (cqr->refers) {
486 /* If this erp request chain ends in our cqr, then */
487 /* cal the erp_postaction to clean it up */
488 erpcqr = cqr;
489 while (erpcqr->refers) {
490 erpcqr = erpcqr->refers;
491 }
492 if (erpcqr == eer->cqr) {
493 erp_fn = device->discipline->erp_postaction(
494 cqr);
495 erp_fn(cqr);
496 }
497 continue;
498 }
499 }
500 spin_unlock_irq(get_ccwdev_lock(device->cdev));
501}
502
503/*
504 * This function dismantles a struct dasd_eer that was created by
505 * dasd_eer_probe. Since we want to free our private data structure,
506 * we must make sure that the memory is not in use anymore.
507 * We have to flush the work queue and remove a possible SNSS request
508 * from the dasd queue.
509 */
510static void
511dasd_eer_destroy(struct dasd_device *device, struct dasd_eer_private *eer)
512{
513 flush_workqueue(dasd_eer_workqueue);
514 dasd_eer_dequeue_SNSS_request(device, eer);
515 dasd_kfree_request(eer->cqr, device);
516 kfree(eer);
517};
518
519/*
520 * enable the extended error reporting for a particular device
521 */
522static int
523dasd_eer_enable_on_device(struct dasd_device *device)
524{
525 void *eer;
526 if (!device)
527 return -ENODEV;
528 if (device->eer)
529 return 0;
530 if (!try_module_get(THIS_MODULE)) {
531 return -EINVAL;
532 }
533 eer = (void *)dasd_eer_probe(device);
534 if (IS_ERR(eer)) {
535 module_put(THIS_MODULE);
536 return PTR_ERR(eer);
537 }
538 device->eer = eer;
539 return 0;
540}
541
542/*
543 * enable the extended error reporting for a particular device
544 */
545static int
546dasd_eer_disable_on_device(struct dasd_device *device)
547{
548 struct dasd_eer_private *eer = device->eer;
549
550 if (!device)
551 return -ENODEV;
552 if (!device->eer)
553 return 0;
554 device->eer = NULL;
555 dasd_eer_destroy(device,eer);
556 module_put(THIS_MODULE);
557
558 return 0;
559}
560
561/*
562 * Set extended error reporting (eer)
563 * Note: This will be registered as a DASD ioctl, to be called on DASD devices.
564 */
565static int
566dasd_ioctl_set_eer(struct block_device *bdev, int no, long args)
567{
568 struct dasd_device *device;
569 int intval;
570
571 if (!capable(CAP_SYS_ADMIN))
572 return -EACCES;
573 if (bdev != bdev->bd_contains)
574 /* Error-reporting is not allowed for partitions */
575 return -EINVAL;
576 if (get_user(intval, (int __user *) args))
577 return -EFAULT;
578 device = bdev->bd_disk->private_data;
579 if (device == NULL)
580 return -ENODEV;
581
582 intval = (intval != 0);
583 DEV_MESSAGE (KERN_DEBUG, device,
584 "set eer on device to %d", intval);
585 if (intval)
586 return dasd_eer_enable_on_device(device);
587 else
588 return dasd_eer_disable_on_device(device);
589}
590
591/*
592 * Get value of extended error reporting.
593 * Note: This will be registered as a DASD ioctl, to be called on DASD devices.
594 */
595static int
596dasd_ioctl_get_eer(struct block_device *bdev, int no, long args)
597{
598 struct dasd_device *device;
599
600 device = bdev->bd_disk->private_data;
601 if (device == NULL)
602 return -ENODEV;
603 return put_user((device->eer != NULL), (int __user *) args);
604}
605
606/*
607 * The following function can be used for those triggers that have
608 * all necessary data available when the function is called.
609 * If the parameter cqr is not NULL, the chain of requests will be searched
610 * for valid sense data, and all valid sense data sets will be added to
611 * the triggers data.
612 */
613static int
614dasd_eer_write_standard_trigger(int trigger, struct dasd_device *device,
615 struct dasd_ccw_req *cqr)
616{
617 struct dasd_ccw_req *temp_cqr;
618 int data_size;
619 struct timeval tv;
620 struct dasd_eer_header header;
621 unsigned long flags;
622 struct eerbuffer *eerb;
623
624 /* go through cqr chain and count the valid sense data sets */
625 temp_cqr = cqr;
626 data_size = 0;
627 while (temp_cqr) {
628 if (temp_cqr->irb.esw.esw0.erw.cons)
629 data_size += 32;
630 temp_cqr = temp_cqr->refers;
631 }
632
633 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
634 header.trigger = trigger;
635 do_gettimeofday(&tv);
636 header.tv_sec = tv.tv_sec;
637 header.tv_usec = tv.tv_usec;
638 strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
639
640 spin_lock_irqsave(&bufferlock, flags);
641 list_for_each_entry(eerb, &bufferlist, list) {
642 dasd_eer_start_record(eerb, header.total_size);
643 dasd_eer_write_buffer(eerb, sizeof(header), (char*)(&header));
644 temp_cqr = cqr;
645 while (temp_cqr) {
646 if (temp_cqr->irb.esw.esw0.erw.cons)
647 dasd_eer_write_buffer(eerb, 32, cqr->irb.ecw);
648 temp_cqr = temp_cqr->refers;
649 }
650 dasd_eer_write_buffer(eerb, 4,"EOR");
651 }
652 spin_unlock_irqrestore(&bufferlock, flags);
653
654 wake_up_interruptible(&dasd_eer_read_wait_queue);
655
656 return 0;
657}
658
659/*
660 * This function writes a DASD_EER_STATECHANGE trigger.
661 */
662static void
663dasd_eer_write_SNSS_trigger(struct dasd_device *device,
664 struct dasd_ccw_req *cqr)
665{
666 int data_size;
667 int snss_rc;
668 struct timeval tv;
669 struct dasd_eer_header header;
670 unsigned long flags;
671 struct eerbuffer *eerb;
672
673 snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
674 if (snss_rc)
675 data_size = 0;
676 else
677 data_size = SNSS_DATA_SIZE;
678
679 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
680 header.trigger = DASD_EER_STATECHANGE;
681 do_gettimeofday(&tv);
682 header.tv_sec = tv.tv_sec;
683 header.tv_usec = tv.tv_usec;
684 strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
685
686 spin_lock_irqsave(&bufferlock, flags);
687 list_for_each_entry(eerb, &bufferlist, list) {
688 dasd_eer_start_record(eerb, header.total_size);
689 dasd_eer_write_buffer(eerb, sizeof(header),(char*)(&header));
690 if (!snss_rc)
691 dasd_eer_write_buffer(eerb, SNSS_DATA_SIZE, cqr->data);
692 dasd_eer_write_buffer(eerb, 4,"EOR");
693 }
694 spin_unlock_irqrestore(&bufferlock, flags);
695
696 wake_up_interruptible(&dasd_eer_read_wait_queue);
697}
698
699/*
700 * callback function for use with SNSS request
701 */
702static void
703dasd_eer_SNSS_cb(struct dasd_ccw_req *cqr, void *data)
704{
705 struct dasd_device *device;
706 struct dasd_eer_private *private;
707 unsigned long irqflags;
708
709 device = (struct dasd_device *)data;
710 private = (struct dasd_eer_private *)device->eer;
711 dasd_eer_write_SNSS_trigger(device, cqr);
712 spin_lock_irqsave(&snsslock, irqflags);
713 if(!test_and_clear_bit(SNSS_REQUESTED, &private->flags)) {
714 clear_bit(CQR_IN_USE, &private->flags);
715 spin_unlock_irqrestore(&snsslock, irqflags);
716 return;
717 };
718 clear_bit(CQR_IN_USE, &private->flags);
719 spin_unlock_irqrestore(&snsslock, irqflags);
720 dasd_eer_sense_subsystem_status(device);
721 return;
722}
723
724/*
725 * clean a used cqr before using it again
726 */
727static void
728dasd_eer_clean_SNSS_request(struct dasd_ccw_req *cqr)
729{
730 struct ccw1 *cpaddr = cqr->cpaddr;
731 void *data = cqr->data;
732
733 memset(cqr, 0, sizeof(struct dasd_ccw_req));
734 memset(cpaddr, 0, sizeof(struct ccw1));
735 memset(data, 0, SNSS_DATA_SIZE);
736 cqr->cpaddr = cpaddr;
737 cqr->data = data;
738 strncpy((char *) &cqr->magic, "ECKD", 4);
739 ASCEBC((char *) &cqr->magic, 4);
740 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
741}
742
743/*
744 * build and start an SNSS request
745 * This function is called from a work queue so we have to
746 * pass the dasd_device pointer as a void pointer.
747 */
748static void
749dasd_eer_sense_subsystem_status(void *data)
750{
751 struct dasd_device *device;
752 struct dasd_eer_private *private;
753 struct dasd_ccw_req *cqr;
754 struct ccw1 *ccw;
755 unsigned long irqflags;
756
757 device = (struct dasd_device *)data;
758 private = (struct dasd_eer_private *)device->eer;
759 if (!private) /* device not eer enabled any more */
760 return;
761 cqr = private->cqr;
762 spin_lock_irqsave(&snsslock, irqflags);
763 if(test_and_set_bit(CQR_IN_USE, &private->flags)) {
764 set_bit(SNSS_REQUESTED, &private->flags);
765 spin_unlock_irqrestore(&snsslock, irqflags);
766 return;
767 };
768 spin_unlock_irqrestore(&snsslock, irqflags);
769 dasd_eer_clean_SNSS_request(cqr);
770 cqr->device = device;
771 cqr->retries = 255;
772 cqr->expires = 10 * HZ;
773
774 ccw = cqr->cpaddr;
775 ccw->cmd_code = DASD_ECKD_CCW_SNSS;
776 ccw->count = SNSS_DATA_SIZE;
777 ccw->flags = 0;
778 ccw->cda = (__u32)(addr_t)cqr->data;
779
780 cqr->buildclk = get_clock();
781 cqr->status = DASD_CQR_FILLED;
782 cqr->callback = dasd_eer_SNSS_cb;
783 cqr->callback_data = (void *)device;
784 dasd_add_request_head(cqr);
785
786 return;
787}
788
789/*
790 * This function is called for all triggers. It calls the appropriate
791 * function that writes the actual trigger records.
792 */
793static int
794dasd_eer_write_trigger(struct dasd_eer_trigger *trigger)
795{
796 int rc;
797 struct dasd_eer_private *private = trigger->device->eer;
798
799 switch (trigger->id) {
800 case DASD_EER_FATALERROR:
801 case DASD_EER_PPRCSUSPEND:
802 rc = dasd_eer_write_standard_trigger(
803 trigger->id, trigger->device, trigger->cqr);
804 break;
805 case DASD_EER_NOPATH:
806 rc = dasd_eer_write_standard_trigger(
807 trigger->id, trigger->device, NULL);
808 break;
809 case DASD_EER_STATECHANGE:
810 if (queue_work(dasd_eer_workqueue, &private->worker)) {
811 rc=0;
812 } else {
813 /* If the work_struct was already queued, it can't
814 * be queued again. But this is OK since we don't
815 * need to have it queued twice.
816 */
817 rc = -EBUSY;
818 }
819 break;
820 default: /* unknown trigger, so we write it without any sense data */
821 rc = dasd_eer_write_standard_trigger(
822 trigger->id, trigger->device, NULL);
823 break;
824 }
825 return rc;
826}
827
828/*
829 * This function is registered with the dasd device driver and gets called
830 * for all dasd eer notifications.
831 */
832static int dasd_eer_notify(struct notifier_block *self,
833 unsigned long action, void *data)
834{
835 switch (action) {
836 case DASD_EER_DISABLE:
837 dasd_eer_disable_on_device((struct dasd_device *)data);
838 break;
839 case DASD_EER_TRIGGER:
840 dasd_eer_write_trigger((struct dasd_eer_trigger *)data);
841 break;
842 }
843 return NOTIFY_OK;
844}
845
846
847/*****************************************************************************/
848/* the device operations */
849/*****************************************************************************/
850
851/*
852 * On the one side we need a lock to access our internal buffer, on the
853 * other side a copy_to_user can sleep. So we need to copy the data we have
854 * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
855 */
856static char readbuffer[PAGE_SIZE];
857DECLARE_MUTEX(readbuffer_mutex);
858
859
860static int
861dasd_eer_open(struct inode *inp, struct file *filp)
862{
863 struct eerbuffer *eerb;
864 unsigned long flags;
865
866 eerb = kmalloc(sizeof(struct eerbuffer), GFP_KERNEL);
867 eerb->head = 0;
868 eerb->tail = 0;
869 eerb->residual = 0;
870 eerb->buffer_page_count = 1;
871 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
872 eerb->buffer = kmalloc(eerb->buffer_page_count*sizeof(char*),
873 GFP_KERNEL);
874 if (!eerb->buffer)
875 return -ENOMEM;
876 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
877 eerb->buffer_page_count)) {
878 kfree(eerb->buffer);
879 return -ENOMEM;
880 }
881 filp->private_data = eerb;
882 spin_lock_irqsave(&bufferlock, flags);
883 list_add(&eerb->list, &bufferlist);
884 spin_unlock_irqrestore(&bufferlock, flags);
885
886 return nonseekable_open(inp,filp);
887}
888
889static int
890dasd_eer_close(struct inode *inp, struct file *filp)
891{
892 struct eerbuffer *eerb;
893 unsigned long flags;
894
895 eerb = (struct eerbuffer *)filp->private_data;
896 spin_lock_irqsave(&bufferlock, flags);
897 list_del(&eerb->list);
898 spin_unlock_irqrestore(&bufferlock, flags);
899 dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
900 kfree(eerb->buffer);
901 kfree(eerb);
902
903 return 0;
904}
905
906static long
907dasd_eer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
908{
909 int intval;
910 struct eerbuffer *eerb;
911
912 eerb = (struct eerbuffer *)filp->private_data;
913 switch (cmd) {
914 case DASD_EER_PURGE:
915 dasd_eer_purge_buffer(eerb);
916 return 0;
917 case DASD_EER_SETBUFSIZE:
918 if (get_user(intval, (int __user *)arg))
919 return -EFAULT;
920 return dasd_eer_resize_buffer(eerb, intval);
921 default:
922 return -ENOIOCTLCMD;
923 }
924}
925
926static ssize_t
927dasd_eer_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
928{
929 int tc,rc;
930 int tailcount,effective_count;
931 unsigned long flags;
932 struct eerbuffer *eerb;
933
934 eerb = (struct eerbuffer *)filp->private_data;
935 if(down_interruptible(&readbuffer_mutex))
936 return -ERESTARTSYS;
937
938 spin_lock_irqsave(&bufferlock, flags);
939
940 if (eerb->residual < 0) { /* the remainder of this record */
941 /* has been deleted */
942 eerb->residual = 0;
943 spin_unlock_irqrestore(&bufferlock, flags);
944 up(&readbuffer_mutex);
945 return -EIO;
946 } else if (eerb->residual > 0) {
947 /* OK we still have a second half of a record to deliver */
948 effective_count = min(eerb->residual, (int)count);
949 eerb->residual -= effective_count;
950 } else {
951 tc = 0;
952 while (!tc) {
953 tc = dasd_eer_read_buffer(eerb,
954 sizeof(tailcount), (char*)(&tailcount));
955 if (!tc) {
956 /* no data available */
957 spin_unlock_irqrestore(&bufferlock, flags);
958 up(&readbuffer_mutex);
959 if (filp->f_flags & O_NONBLOCK)
960 return -EAGAIN;
961 rc = wait_event_interruptible(
962 dasd_eer_read_wait_queue,
963 eerb->head != eerb->tail);
964 if (rc) {
965 return rc;
966 }
967 if(down_interruptible(&readbuffer_mutex))
968 return -ERESTARTSYS;
969 spin_lock_irqsave(&bufferlock, flags);
970 }
971 }
972 WARN_ON(tc != sizeof(tailcount));
973 effective_count = min(tailcount,(int)count);
974 eerb->residual = tailcount - effective_count;
975 }
976
977 tc = dasd_eer_read_buffer(eerb, effective_count, readbuffer);
978 WARN_ON(tc != effective_count);
979
980 spin_unlock_irqrestore(&bufferlock, flags);
981
982 if (copy_to_user(buf, readbuffer, effective_count)) {
983 up(&readbuffer_mutex);
984 return -EFAULT;
985 }
986
987 up(&readbuffer_mutex);
988 return effective_count;
989}
990
991static unsigned int
992dasd_eer_poll (struct file *filp, poll_table *ptable)
993{
994 unsigned int mask;
995 unsigned long flags;
996 struct eerbuffer *eerb;
997
998 eerb = (struct eerbuffer *)filp->private_data;
999 poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
1000 spin_lock_irqsave(&bufferlock, flags);
1001 if (eerb->head != eerb->tail)
1002 mask = POLLIN | POLLRDNORM ;
1003 else
1004 mask = 0;
1005 spin_unlock_irqrestore(&bufferlock, flags);
1006 return mask;
1007}
1008
1009static struct file_operations dasd_eer_fops = {
1010 .open = &dasd_eer_open,
1011 .release = &dasd_eer_close,
1012 .unlocked_ioctl = &dasd_eer_ioctl,
1013 .compat_ioctl = &dasd_eer_ioctl,
1014 .read = &dasd_eer_read,
1015 .poll = &dasd_eer_poll,
1016 .owner = THIS_MODULE,
1017};
1018
1019static struct miscdevice dasd_eer_dev = {
1020 .minor = MISC_DYNAMIC_MINOR,
1021 .name = "dasd_eer",
1022 .fops = &dasd_eer_fops,
1023};
1024
1025
1026/*****************************************************************************/
1027/* Init and exit */
1028/*****************************************************************************/
1029
1030static int
1031__init dasd_eer_init(void)
1032{
1033 int rc;
1034
1035 dasd_eer_workqueue = create_singlethread_workqueue("dasd_eer");
1036 if (!dasd_eer_workqueue) {
1037 MESSAGE(KERN_ERR , "%s", "dasd_eer_init could not "
1038 "create workqueue \n");
1039 rc = -ENOMEM;
1040 goto out;
1041 }
1042
1043 rc = dasd_register_eer_notifier(&dasd_eer_nb);
1044 if (rc) {
1045 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
1046 "register error reporting");
1047 goto queue;
1048 }
1049
1050 dasd_ioctl_no_register(THIS_MODULE, BIODASDEERSET, dasd_ioctl_set_eer);
1051 dasd_ioctl_no_register(THIS_MODULE, BIODASDEERGET, dasd_ioctl_get_eer);
1052
1053 /* we don't need our own character device,
1054 * so we just register as misc device */
1055 rc = misc_register(&dasd_eer_dev);
1056 if (rc) {
1057 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
1058 "register misc device");
1059 goto unregister;
1060 }
1061
1062 return 0;
1063
1064unregister:
1065 dasd_unregister_eer_notifier(&dasd_eer_nb);
1066 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET,
1067 dasd_ioctl_set_eer);
1068 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET,
1069 dasd_ioctl_get_eer);
1070queue:
1071 destroy_workqueue(dasd_eer_workqueue);
1072out:
1073 return rc;
1074
1075}
1076module_init(dasd_eer_init);
1077
1078static void
1079__exit dasd_eer_exit(void)
1080{
1081 dasd_unregister_eer_notifier(&dasd_eer_nb);
1082 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET,
1083 dasd_ioctl_set_eer);
1084 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET,
1085 dasd_ioctl_get_eer);
1086 destroy_workqueue(dasd_eer_workqueue);
1087
1088 WARN_ON(misc_deregister(&dasd_eer_dev) != 0);
1089}
1090module_exit(dasd_eer_exit);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index c20af9874500..d1b08fa13fd2 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -275,6 +275,34 @@ struct dasd_discipline {
275 275
276extern struct dasd_discipline *dasd_diag_discipline_pointer; 276extern struct dasd_discipline *dasd_diag_discipline_pointer;
277 277
278
279/*
280 * Notification numbers for extended error reporting notifications:
281 * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
282 * eer pointer) is freed. The error reporting module needs to do all necessary
283 * cleanup steps.
284 * The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
285 */
286#define DASD_EER_DISABLE 0
287#define DASD_EER_TRIGGER 1
288
289/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
290#define DASD_EER_FATALERROR 1
291#define DASD_EER_NOPATH 2
292#define DASD_EER_STATECHANGE 3
293#define DASD_EER_PPRCSUSPEND 4
294
295/*
296 * The dasd_eer_trigger structure contains all data that we need to send
297 * along with an DASD_EER_TRIGGER notification.
298 */
299struct dasd_eer_trigger {
300 unsigned int id;
301 struct dasd_device *device;
302 struct dasd_ccw_req *cqr;
303};
304
305
278struct dasd_device { 306struct dasd_device {
279 /* Block device stuff. */ 307 /* Block device stuff. */
280 struct gendisk *gdp; 308 struct gendisk *gdp;
@@ -288,6 +316,9 @@ struct dasd_device {
288 unsigned long flags; /* per device flags */ 316 unsigned long flags; /* per device flags */
289 unsigned short features; /* copy of devmap-features (read-only!) */ 317 unsigned short features; /* copy of devmap-features (read-only!) */
290 318
319 /* extended error reporting stuff (eer) */
320 void *eer;
321
291 /* Device discipline stuff. */ 322 /* Device discipline stuff. */
292 struct dasd_discipline *discipline; 323 struct dasd_discipline *discipline;
293 char *private; 324 char *private;
@@ -488,6 +519,12 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
488int dasd_generic_set_offline (struct ccw_device *cdev); 519int dasd_generic_set_offline (struct ccw_device *cdev);
489int dasd_generic_notify(struct ccw_device *, int); 520int dasd_generic_notify(struct ccw_device *, int);
490void dasd_generic_auto_online (struct ccw_driver *); 521void dasd_generic_auto_online (struct ccw_driver *);
522int dasd_register_eer_notifier(struct notifier_block *);
523int dasd_unregister_eer_notifier(struct notifier_block *);
524void dasd_write_eer_trigger(unsigned int , struct dasd_device *,
525 struct dasd_ccw_req *);
526
527
491 528
492/* externals in dasd_devmap.c */ 529/* externals in dasd_devmap.c */
493extern int dasd_max_devindex; 530extern int dasd_max_devindex;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 44e4b4bb1c5a..3e75095f35d0 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -68,6 +68,6 @@ extern void *chsc_get_chp_desc(struct subchannel*, int);
68 68
69extern int chsc_enable_facility(int); 69extern int chsc_enable_facility(int);
70 70
71#define to_channelpath(dev) container_of(dev, struct channel_path, dev) 71#define to_channelpath(device) container_of(device, struct channel_path, dev)
72 72
73#endif 73#endif
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 245ca99a641e..c551bb84dbfb 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1245,7 +1245,7 @@ static int __init init_scsi(void)
1245 if (error) 1245 if (error)
1246 goto cleanup_sysctl; 1246 goto cleanup_sysctl;
1247 1247
1248 for (i = 0; i < NR_CPUS; i++) 1248 for_each_cpu(i)
1249 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1249 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1250 1250
1251 devfs_mk_dir("scsi"); 1251 devfs_mk_dir("scsi");
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 2a912153321e..bb9ec28ccc2b 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -439,6 +439,20 @@ static int pci_siig_init(struct pci_dev *dev)
439 return -ENODEV; 439 return -ENODEV;
440} 440}
441 441
442static int pci_siig_setup(struct serial_private *priv,
443 struct pciserial_board *board,
444 struct uart_port *port, int idx)
445{
446 unsigned int bar = FL_GET_BASE(board->flags) + idx, offset = 0;
447
448 if (idx > 3) {
449 bar = 4;
450 offset = (idx - 4) * 8;
451 }
452
453 return setup_port(priv, port, bar, offset, 0);
454}
455
442/* 456/*
443 * Timedia has an explosion of boards, and to avoid the PCI table from 457 * Timedia has an explosion of boards, and to avoid the PCI table from
444 * growing *huge*, we use this function to collapse some 70 entries 458 * growing *huge*, we use this function to collapse some 70 entries
@@ -748,7 +762,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
748 .subvendor = PCI_ANY_ID, 762 .subvendor = PCI_ANY_ID,
749 .subdevice = PCI_ANY_ID, 763 .subdevice = PCI_ANY_ID,
750 .init = pci_siig_init, 764 .init = pci_siig_init,
751 .setup = pci_default_setup, 765 .setup = pci_siig_setup,
752 }, 766 },
753 /* 767 /*
754 * Titan cards 768 * Titan cards
@@ -2141,6 +2155,15 @@ static struct pci_device_id serial_pci_tbl[] = {
2141 { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_850, 2155 { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_850,
2142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2156 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2143 pbn_b0_bt_4_921600 }, 2157 pbn_b0_bt_4_921600 },
2158 { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_550,
2159 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2160 pbn_b0_bt_8_921600 },
2161 { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_650,
2162 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2163 pbn_b0_bt_8_921600 },
2164 { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_850,
2165 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2166 pbn_b0_bt_8_921600 },
2144 2167
2145 /* 2168 /*
2146 * Computone devices submitted by Doug McNash dmcnash@computone.com 2169 * Computone devices submitted by Doug McNash dmcnash@computone.com
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 0d38f0f2ae29..0f4361c8466b 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -98,6 +98,7 @@ config SERIAL_8250_NR_UARTS
98config SERIAL_8250_RUNTIME_UARTS 98config SERIAL_8250_RUNTIME_UARTS
99 int "Number of 8250/16550 serial ports to register at runtime" 99 int "Number of 8250/16550 serial ports to register at runtime"
100 depends on SERIAL_8250 100 depends on SERIAL_8250
101 range 0 SERIAL_8250_NR_UARTS
101 default "4" 102 default "4"
102 help 103 help
103 Set this to the maximum number of serial ports you want 104 Set this to the maximum number of serial ports you want
@@ -892,20 +893,20 @@ config SERIAL_VR41XX_CONSOLE
892 a console on a serial port, say Y. Otherwise, say N. 893 a console on a serial port, say Y. Otherwise, say N.
893 894
894config SERIAL_JSM 895config SERIAL_JSM
895 tristate "Digi International NEO PCI Support" 896 tristate "Digi International NEO PCI Support"
896 depends on PCI && BROKEN 897 depends on PCI
897 select SERIAL_CORE 898 select SERIAL_CORE
898 help 899 help
899 This is a driver for Digi International's Neo series 900 This is a driver for Digi International's Neo series
900 of cards which provide multiple serial ports. You would need 901 of cards which provide multiple serial ports. You would need
901 something like this to connect more than two modems to your Linux 902 something like this to connect more than two modems to your Linux
902 box, for instance in order to become a dial-in server. This driver 903 box, for instance in order to become a dial-in server. This driver
903 supports PCI boards only. 904 supports PCI boards only.
904 If you have a card like this, say Y here and read the file 905 If you have a card like this, say Y here and read the file
905 <file:Documentation/jsm.txt>. 906 <file:Documentation/jsm.txt>.
906 907
907 To compile this driver as a module, choose M here: the 908 To compile this driver as a module, choose M here: the
908 module will be called jsm. 909 module will be called jsm.
909 910
910config SERIAL_SGI_IOC4 911config SERIAL_SGI_IOC4
911 tristate "SGI IOC4 controller serial support" 912 tristate "SGI IOC4 controller serial support"
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h
index 18753193f59b..dfc1e86d3aa1 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/serial/jsm/jsm.h
@@ -380,7 +380,6 @@ struct neo_uart_struct {
380extern struct uart_driver jsm_uart_driver; 380extern struct uart_driver jsm_uart_driver;
381extern struct board_ops jsm_neo_ops; 381extern struct board_ops jsm_neo_ops;
382extern int jsm_debug; 382extern int jsm_debug;
383extern int jsm_rawreadok;
384 383
385/************************************************************************* 384/*************************************************************************
386 * 385 *
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index 7e56c7824194..b1b66e71d281 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -49,11 +49,8 @@ struct uart_driver jsm_uart_driver = {
49}; 49};
50 50
51int jsm_debug; 51int jsm_debug;
52int jsm_rawreadok;
53module_param(jsm_debug, int, 0); 52module_param(jsm_debug, int, 0);
54module_param(jsm_rawreadok, int, 0);
55MODULE_PARM_DESC(jsm_debug, "Driver debugging level"); 53MODULE_PARM_DESC(jsm_debug, "Driver debugging level");
56MODULE_PARM_DESC(jsm_rawreadok, "Bypass flip buffers on input");
57 54
58static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) 55static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
59{ 56{
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 6fa0d62d6f68..4d48b625cd3d 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -20,8 +20,10 @@
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * Scott H Kilau <Scott_Kilau@digi.com> 22 * Scott H Kilau <Scott_Kilau@digi.com>
23 * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> 23 * Ananda Venkatarman <mansarov@us.ibm.com>
24 * 24 * Modifications:
25 * 01/19/06: changed jsm_input routine to use the dynamically allocated
26 * tty_buffer changes. Contributors: Scott Kilau and Ananda V.
25 ***********************************************************************/ 27 ***********************************************************************/
26#include <linux/tty.h> 28#include <linux/tty.h>
27#include <linux/tty_flip.h> 29#include <linux/tty_flip.h>
@@ -497,16 +499,15 @@ void jsm_input(struct jsm_channel *ch)
497{ 499{
498 struct jsm_board *bd; 500 struct jsm_board *bd;
499 struct tty_struct *tp; 501 struct tty_struct *tp;
502 struct tty_ldisc *ld;
500 u32 rmask; 503 u32 rmask;
501 u16 head; 504 u16 head;
502 u16 tail; 505 u16 tail;
503 int data_len; 506 int data_len;
504 unsigned long lock_flags; 507 unsigned long lock_flags;
505 int flip_len; 508 int flip_len = 0;
506 int len = 0; 509 int len = 0;
507 int n = 0; 510 int n = 0;
508 char *buf = NULL;
509 char *buf2 = NULL;
510 int s = 0; 511 int s = 0;
511 int i = 0; 512 int i = 0;
512 513
@@ -574,56 +575,50 @@ void jsm_input(struct jsm_channel *ch)
574 575
575 /* 576 /*
576 * If the rxbuf is empty and we are not throttled, put as much 577 * If the rxbuf is empty and we are not throttled, put as much
577 * as we can directly into the linux TTY flip buffer. 578 * as we can directly into the linux TTY buffer.
578 * The jsm_rawreadok case takes advantage of carnal knowledge that
579 * the char_buf and the flag_buf are next to each other and
580 * are each of (2 * TTY_FLIPBUF_SIZE) size.
581 * 579 *
582 * NOTE: if(!tty->real_raw), the call to ldisc.receive_buf
583 *actually still uses the flag buffer, so you can't
584 *use it for input data
585 */ 580 */
586 if (jsm_rawreadok) { 581 flip_len = TTY_FLIPBUF_SIZE;
587 if (tp->real_raw)
588 flip_len = MYFLIPLEN;
589 else
590 flip_len = 2 * TTY_FLIPBUF_SIZE;
591 } else
592 flip_len = TTY_FLIPBUF_SIZE - tp->flip.count;
593 582
594 len = min(data_len, flip_len); 583 len = min(data_len, flip_len);
595 len = min(len, (N_TTY_BUF_SIZE - 1) - tp->read_cnt); 584 len = min(len, (N_TTY_BUF_SIZE - 1) - tp->read_cnt);
585 ld = tty_ldisc_ref(tp);
596 586
597 if (len <= 0) { 587 /*
598 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); 588 * If the DONT_FLIP flag is on, don't flush our buffer, and act
599 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input 1\n"); 589 * like the ld doesn't have any space to put the data right now.
600 return; 590 */
601 } 591 if (test_bit(TTY_DONT_FLIP, &tp->flags))
592 len = 0;
602 593
603 /* 594 /*
604 * If we're bypassing flip buffers on rx, we can blast it 595 * If we were unable to get a reference to the ld,
605 * right into the beginning of the buffer. 596 * don't flush our buffer, and act like the ld doesn't
597 * have any space to put the data right now.
606 */ 598 */
607 if (jsm_rawreadok) { 599 if (!ld) {
608 if (tp->real_raw) { 600 len = 0;
609 if (ch->ch_flags & CH_FLIPBUF_IN_USE) {
610 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
611 "JSM - FLIPBUF in use. delaying input\n");
612 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
613 return;
614 }
615 ch->ch_flags |= CH_FLIPBUF_IN_USE;
616 buf = ch->ch_bd->flipbuf;
617 buf2 = NULL;
618 } else {
619 buf = tp->flip.char_buf;
620 buf2 = tp->flip.flag_buf;
621 }
622 } else { 601 } else {
623 buf = tp->flip.char_buf_ptr; 602 /*
624 buf2 = tp->flip.flag_buf_ptr; 603 * If ld doesn't have a pointer to a receive_buf function,
604 * flush the data, then act like the ld doesn't have any
605 * space to put the data right now.
606 */
607 if (!ld->receive_buf) {
608 ch->ch_r_head = ch->ch_r_tail;
609 len = 0;
610 }
625 } 611 }
626 612
613 if (len <= 0) {
614 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
615 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input 1\n");
616 if (ld)
617 tty_ldisc_deref(ld);
618 return;
619 }
620
621 len = tty_buffer_request_room(tp, len);
627 n = len; 622 n = len;
628 623
629 /* 624 /*
@@ -638,121 +633,47 @@ void jsm_input(struct jsm_channel *ch)
638 if (s <= 0) 633 if (s <= 0)
639 break; 634 break;
640 635
641 memcpy(buf, ch->ch_rqueue + tail, s); 636 /*
642 637 * If conditions are such that ld needs to see all
643 /* buf2 is only set when port isn't raw */ 638 * UART errors, we will have to walk each character
644 if (buf2) 639 * and error byte and send them to the buffer one at
645 memcpy(buf2, ch->ch_equeue + tail, s); 640 * a time.
646 641 */
647 tail += s;
648 buf += s;
649 if (buf2)
650 buf2 += s;
651 n -= s;
652 /* Flip queue if needed */
653 tail &= rmask;
654 }
655 642
656 /*
657 * In high performance mode, we don't have to update
658 * flag_buf or any of the counts or pointers into flip buf.
659 */
660 if (!jsm_rawreadok) {
661 if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { 643 if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
662 for (i = 0; i < len; i++) { 644 for (i = 0; i < s; i++) {
663 /* 645 /*
664 * Give the Linux ld the flags in the 646 * Give the Linux ld the flags in the
665 * format it likes. 647 * format it likes.
666 */ 648 */
667 if (tp->flip.flag_buf_ptr[i] & UART_LSR_BI) 649 if (*(ch->ch_equeue +tail +i) & UART_LSR_BI)
668 tp->flip.flag_buf_ptr[i] = TTY_BREAK; 650 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_BREAK);
669 else if (tp->flip.flag_buf_ptr[i] & UART_LSR_PE) 651 else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE)
670 tp->flip.flag_buf_ptr[i] = TTY_PARITY; 652 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_PARITY);
671 else if (tp->flip.flag_buf_ptr[i] & UART_LSR_FE) 653 else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE)
672 tp->flip.flag_buf_ptr[i] = TTY_FRAME; 654 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_FRAME);
673 else 655 else
674 tp->flip.flag_buf_ptr[i] = TTY_NORMAL; 656 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_NORMAL);
675 } 657 }
676 } else { 658 } else {
677 memset(tp->flip.flag_buf_ptr, 0, len); 659 tty_insert_flip_string(tp, ch->ch_rqueue + tail, s) ;
678 } 660 }
679 661 tail += s;
680 tp->flip.char_buf_ptr += len; 662 n -= s;
681 tp->flip.flag_buf_ptr += len; 663 /* Flip queue if needed */
682 tp->flip.count += len; 664 tail &= rmask;
683 }
684 else if (!tp->real_raw) {
685 if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
686 for (i = 0; i < len; i++) {
687 /*
688 * Give the Linux ld the flags in the
689 * format it likes.
690 */
691 if (tp->flip.flag_buf_ptr[i] & UART_LSR_BI)
692 tp->flip.flag_buf_ptr[i] = TTY_BREAK;
693 else if (tp->flip.flag_buf_ptr[i] & UART_LSR_PE)
694 tp->flip.flag_buf_ptr[i] = TTY_PARITY;
695 else if (tp->flip.flag_buf_ptr[i] & UART_LSR_FE)
696 tp->flip.flag_buf_ptr[i] = TTY_FRAME;
697 else
698 tp->flip.flag_buf_ptr[i] = TTY_NORMAL;
699 }
700 } else
701 memset(tp->flip.flag_buf, 0, len);
702 } 665 }
703 666
704 /* 667 ch->ch_r_tail = tail & rmask;
705 * If we're doing raw reads, jam it right into the 668 ch->ch_e_tail = tail & rmask;
706 * line disc bypassing the flip buffers. 669 jsm_check_queue_flow_control(ch);
707 */ 670 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
708 if (jsm_rawreadok) {
709 if (tp->real_raw) {
710 ch->ch_r_tail = tail & rmask;
711 ch->ch_e_tail = tail & rmask;
712
713 jsm_check_queue_flow_control(ch);
714
715 /* !!! WE *MUST* LET GO OF ALL LOCKS BEFORE CALLING RECEIVE BUF !!! */
716 671
717 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); 672 /* Tell the tty layer its okay to "eat" the data now */
673 tty_flip_buffer_push(tp);
718 674
719 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, 675 if (ld)
720 "jsm_input. %d real_raw len:%d calling receive_buf for board %d\n", 676 tty_ldisc_deref(ld);
721 __LINE__, len, ch->ch_bd->boardnum);
722 tp->ldisc.receive_buf(tp, ch->ch_bd->flipbuf, NULL, len);
723
724 /* Allow use of channel flip buffer again */
725 spin_lock_irqsave(&ch->ch_lock, lock_flags);
726 ch->ch_flags &= ~CH_FLIPBUF_IN_USE;
727 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
728
729 } else {
730 ch->ch_r_tail = tail & rmask;
731 ch->ch_e_tail = tail & rmask;
732
733 jsm_check_queue_flow_control(ch);
734
735 /* !!! WE *MUST* LET GO OF ALL LOCKS BEFORE CALLING RECEIVE BUF !!! */
736 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
737
738 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
739 "jsm_input. %d not real_raw len:%d calling receive_buf for board %d\n",
740 __LINE__, len, ch->ch_bd->boardnum);
741
742 tp->ldisc.receive_buf(tp, tp->flip.char_buf, tp->flip.flag_buf, len);
743 }
744 } else {
745 ch->ch_r_tail = tail & rmask;
746 ch->ch_e_tail = tail & rmask;
747
748 jsm_check_queue_flow_control(ch);
749
750 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
751
752 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
753 "jsm_input. %d not jsm_read raw okay scheduling flip\n", __LINE__);
754 tty_schedule_flip(tp);
755 }
756 677
757 jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n"); 678 jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n");
758} 679}
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index d957a3a9edf1..0ef648fa4b2d 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -350,8 +350,7 @@ static inline void receive_chars(struct mcf_serial *info)
350 } 350 }
351 tty_insert_flip_char(tty, ch, flag); 351 tty_insert_flip_char(tty, ch, flag);
352 } 352 }
353 353 tty_flip_buffer_push(tty);
354 schedule_work(&tty->flip.work);
355 return; 354 return;
356} 355}
357 356
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 0717abfdae06..95fb4939c675 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -2237,7 +2237,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
2237 * If this port is a console, then the spinlock is already 2237 * If this port is a console, then the spinlock is already
2238 * initialised. 2238 * initialised.
2239 */ 2239 */
2240 if (!uart_console(port)) 2240 if (!(uart_console(port) && (port->cons->flags & CON_ENABLED)))
2241 spin_lock_init(&port->lock); 2241 spin_lock_init(&port->lock);
2242 2242
2243 uart_configure_port(drv, state, port); 2243 uart_configure_port(drv, state, port);
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index f6704688ee8c..5578a9dd04e8 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -3558,10 +3558,16 @@ static void ixj_write_frame(IXJ *j)
3558 } 3558 }
3559 /* Add word 0 to G.729 frames for the 8021. Right now we don't do VAD/CNG */ 3559 /* Add word 0 to G.729 frames for the 8021. Right now we don't do VAD/CNG */
3560 if (j->play_codec == G729 && (cnt == 0 || cnt == 10 || cnt == 20)) { 3560 if (j->play_codec == G729 && (cnt == 0 || cnt == 10 || cnt == 20)) {
3561 if(j->write_buffer_rp + cnt == 0 && j->write_buffer_rp + cnt + 1 == 0 && j->write_buffer_rp + cnt + 2 == 0 && 3561 if (j->write_buffer_rp[cnt] == 0 &&
3562 j->write_buffer_rp + cnt + 3 == 0 && j->write_buffer_rp + cnt + 4 == 0 && j->write_buffer_rp + cnt + 5 == 0 && 3562 j->write_buffer_rp[cnt + 1] == 0 &&
3563 j->write_buffer_rp + cnt + 6 == 0 && j->write_buffer_rp + cnt + 7 == 0 && j->write_buffer_rp + cnt + 8 == 0 && 3563 j->write_buffer_rp[cnt + 2] == 0 &&
3564 j->write_buffer_rp + cnt + 9 == 0) { 3564 j->write_buffer_rp[cnt + 3] == 0 &&
3565 j->write_buffer_rp[cnt + 4] == 0 &&
3566 j->write_buffer_rp[cnt + 5] == 0 &&
3567 j->write_buffer_rp[cnt + 6] == 0 &&
3568 j->write_buffer_rp[cnt + 7] == 0 &&
3569 j->write_buffer_rp[cnt + 8] == 0 &&
3570 j->write_buffer_rp[cnt + 9] == 0) {
3565 /* someone is trying to write silence lets make this a type 0 frame. */ 3571 /* someone is trying to write silence lets make this a type 0 frame. */
3566 outb_p(0x00, j->DSPbase + 0x0C); 3572 outb_p(0x00, j->DSPbase + 0x0C);
3567 outb_p(0x00, j->DSPbase + 0x0D); 3573 outb_p(0x00, j->DSPbase + 0x0D);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 076462c8ba2a..dce9d987f0fc 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -378,7 +378,7 @@ const struct usb_device_id *usb_match_id(struct usb_interface *interface,
378 378
379 return NULL; 379 return NULL;
380} 380}
381EXPORT_SYMBOL_GPL(usb_match_id); 381EXPORT_SYMBOL(usb_match_id);
382 382
383int usb_device_match(struct device *dev, struct device_driver *drv) 383int usb_device_match(struct device *dev, struct device_driver *drv)
384{ 384{
@@ -446,7 +446,7 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner)
446 446
447 return retval; 447 return retval;
448} 448}
449EXPORT_SYMBOL_GPL(usb_register_driver); 449EXPORT_SYMBOL(usb_register_driver);
450 450
451/** 451/**
452 * usb_deregister - unregister a USB driver 452 * usb_deregister - unregister a USB driver
@@ -469,4 +469,4 @@ void usb_deregister(struct usb_driver *driver)
469 469
470 usbfs_update_special(); 470 usbfs_update_special();
471} 471}
472EXPORT_SYMBOL_GPL(usb_deregister); 472EXPORT_SYMBOL(usb_deregister);
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index a7bcd17112c0..0339f5640a78 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -30,10 +30,11 @@
30 30
31#define STI_DRIVERVERSION "Version 0.9a" 31#define STI_DRIVERVERSION "Version 0.9a"
32 32
33struct sti_struct *default_sti; 33struct sti_struct *default_sti __read_mostly;
34 34
35static int num_sti_roms; /* # of STI ROMS found */ 35/* number of STI ROMS found and their ptrs to each struct */
36static struct sti_struct *sti_roms[MAX_STI_ROMS]; /* ptr to each sti_struct */ 36static int num_sti_roms __read_mostly;
37static struct sti_struct *sti_roms[MAX_STI_ROMS] __read_mostly;
37 38
38 39
39/* The colour indices used by STI are 40/* The colour indices used by STI are
@@ -266,7 +267,7 @@ sti_rom_copy(unsigned long base, unsigned long count, void *dest)
266 267
267 268
268 269
269static char default_sti_path[21]; 270static char default_sti_path[21] __read_mostly;
270 271
271#ifndef MODULE 272#ifndef MODULE
272static int __init sti_setup(char *str) 273static int __init sti_setup(char *str)
@@ -414,10 +415,10 @@ sti_init_glob_cfg(struct sti_struct *sti,
414 if (!sti->sti_mem_request) 415 if (!sti->sti_mem_request)
415 sti->sti_mem_request = 256; /* STI default */ 416 sti->sti_mem_request = 256; /* STI default */
416 417
417 glob_cfg = kmalloc(sizeof(*sti->glob_cfg), GFP_KERNEL); 418 glob_cfg = kzalloc(sizeof(*sti->glob_cfg), GFP_KERNEL);
418 glob_cfg_ext = kmalloc(sizeof(*glob_cfg_ext), GFP_KERNEL); 419 glob_cfg_ext = kzalloc(sizeof(*glob_cfg_ext), GFP_KERNEL);
419 save_addr = kmalloc(save_addr_size, GFP_KERNEL); 420 save_addr = kzalloc(save_addr_size, GFP_KERNEL);
420 sti_mem_addr = kmalloc(sti->sti_mem_request, GFP_KERNEL); 421 sti_mem_addr = kzalloc(sti->sti_mem_request, GFP_KERNEL);
421 422
422 if (!(glob_cfg && glob_cfg_ext && save_addr && sti_mem_addr)) { 423 if (!(glob_cfg && glob_cfg_ext && save_addr && sti_mem_addr)) {
423 kfree(glob_cfg); 424 kfree(glob_cfg);
@@ -427,11 +428,6 @@ sti_init_glob_cfg(struct sti_struct *sti,
427 return -ENOMEM; 428 return -ENOMEM;
428 } 429 }
429 430
430 memset(glob_cfg, 0, sizeof(*glob_cfg));
431 memset(glob_cfg_ext, 0, sizeof(*glob_cfg_ext));
432 memset(save_addr, 0, save_addr_size);
433 memset(sti_mem_addr, 0, sti->sti_mem_request);
434
435 glob_cfg->ext_ptr = STI_PTR(glob_cfg_ext); 431 glob_cfg->ext_ptr = STI_PTR(glob_cfg_ext);
436 glob_cfg->save_addr = STI_PTR(save_addr); 432 glob_cfg->save_addr = STI_PTR(save_addr);
437 for (i=0; i<8; i++) { 433 for (i=0; i<8; i++) {
@@ -502,9 +498,9 @@ sti_init_glob_cfg(struct sti_struct *sti,
502 498
503#ifdef CONFIG_FB 499#ifdef CONFIG_FB
504struct sti_cooked_font * __init 500struct sti_cooked_font * __init
505sti_select_fbfont( struct sti_cooked_rom *cooked_rom, char *fbfont_name ) 501sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
506{ 502{
507 struct font_desc *fbfont; 503 const struct font_desc *fbfont;
508 unsigned int size, bpc; 504 unsigned int size, bpc;
509 void *dest; 505 void *dest;
510 struct sti_rom_font *nf; 506 struct sti_rom_font *nf;
@@ -525,10 +521,9 @@ sti_select_fbfont( struct sti_cooked_rom *cooked_rom, char *fbfont_name )
525 size = bpc * 256; 521 size = bpc * 256;
526 size += sizeof(struct sti_rom_font); 522 size += sizeof(struct sti_rom_font);
527 523
528 nf = kmalloc(size, GFP_KERNEL); 524 nf = kzalloc(size, GFP_KERNEL);
529 if (!nf) 525 if (!nf)
530 return NULL; 526 return NULL;
531 memset(nf, 0, size);
532 527
533 nf->first_char = 0; 528 nf->first_char = 0;
534 nf->last_char = 255; 529 nf->last_char = 255;
@@ -544,7 +539,7 @@ sti_select_fbfont( struct sti_cooked_rom *cooked_rom, char *fbfont_name )
544 dest += sizeof(struct sti_rom_font); 539 dest += sizeof(struct sti_rom_font);
545 memcpy(dest, fbfont->data, bpc*256); 540 memcpy(dest, fbfont->data, bpc*256);
546 541
547 cooked_font = kmalloc(sizeof(*cooked_font), GFP_KERNEL); 542 cooked_font = kzalloc(sizeof(*cooked_font), GFP_KERNEL);
548 if (!cooked_font) { 543 if (!cooked_font) {
549 kfree(nf); 544 kfree(nf);
550 return NULL; 545 return NULL;
@@ -559,7 +554,7 @@ sti_select_fbfont( struct sti_cooked_rom *cooked_rom, char *fbfont_name )
559} 554}
560#else 555#else
561struct sti_cooked_font * __init 556struct sti_cooked_font * __init
562sti_select_fbfont(struct sti_cooked_rom *cooked_rom, char *fbfont_name) 557sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
563{ 558{
564 return NULL; 559 return NULL;
565} 560}
@@ -617,7 +612,7 @@ sti_cook_fonts(struct sti_cooked_rom *cooked_rom,
617 struct sti_rom_font *raw_font, *font_start; 612 struct sti_rom_font *raw_font, *font_start;
618 struct sti_cooked_font *cooked_font; 613 struct sti_cooked_font *cooked_font;
619 614
620 cooked_font = kmalloc(sizeof(*cooked_font), GFP_KERNEL); 615 cooked_font = kzalloc(sizeof(*cooked_font), GFP_KERNEL);
621 if (!cooked_font) 616 if (!cooked_font)
622 return 0; 617 return 0;
623 618
@@ -631,7 +626,7 @@ sti_cook_fonts(struct sti_cooked_rom *cooked_rom,
631 while (raw_font->next_font) { 626 while (raw_font->next_font) {
632 raw_font = ((void *)font_start) + (raw_font->next_font); 627 raw_font = ((void *)font_start) + (raw_font->next_font);
633 628
634 cooked_font->next_font = kmalloc(sizeof(*cooked_font), GFP_KERNEL); 629 cooked_font->next_font = kzalloc(sizeof(*cooked_font), GFP_KERNEL);
635 if (!cooked_font->next_font) 630 if (!cooked_font->next_font)
636 return 1; 631 return 1;
637 632
@@ -668,10 +663,9 @@ sti_bmode_font_raw(struct sti_cooked_font *f)
668 unsigned char *n, *p, *q; 663 unsigned char *n, *p, *q;
669 int size = f->raw->bytes_per_char*256+sizeof(struct sti_rom_font); 664 int size = f->raw->bytes_per_char*256+sizeof(struct sti_rom_font);
670 665
671 n = kmalloc (4*size, GFP_KERNEL); 666 n = kzalloc (4*size, GFP_KERNEL);
672 if (!n) 667 if (!n)
673 return NULL; 668 return NULL;
674 memset (n, 0, 4*size);
675 p = n + 3; 669 p = n + 3;
676 q = (unsigned char *)f->raw; 670 q = (unsigned char *)f->raw;
677 while (size--) { 671 while (size--) {
@@ -816,13 +810,12 @@ sti_try_rom_generic(unsigned long address, unsigned long hpa, struct pci_dev *pd
816 return NULL; 810 return NULL;
817 } 811 }
818 812
819 sti = kmalloc(sizeof(*sti), GFP_KERNEL); 813 sti = kzalloc(sizeof(*sti), GFP_KERNEL);
820 if (!sti) { 814 if (!sti) {
821 printk(KERN_ERR "Not enough memory !\n"); 815 printk(KERN_ERR "Not enough memory !\n");
822 return NULL; 816 return NULL;
823 } 817 }
824 818
825 memset(sti, 0, sizeof(*sti));
826 spin_lock_init(&sti->lock); 819 spin_lock_init(&sti->lock);
827 820
828test_rom: 821test_rom:
@@ -1035,7 +1028,7 @@ static struct parisc_driver pa_sti_driver = {
1035 * sti_init_roms() - detects all STI ROMs and stores them in sti_roms[] 1028 * sti_init_roms() - detects all STI ROMs and stores them in sti_roms[]
1036 */ 1029 */
1037 1030
1038static int sticore_initialized; 1031static int sticore_initialized __read_mostly;
1039 1032
1040static void __init sti_init_roms(void) 1033static void __init sti_init_roms(void)
1041{ 1034{
diff --git a/fs/9p/conv.c b/fs/9p/conv.c
index 32a9f99154e2..bf1f10067960 100644
--- a/fs/9p/conv.c
+++ b/fs/9p/conv.c
@@ -116,13 +116,19 @@ static void buf_put_int64(struct cbuf *buf, u64 val)
116 } 116 }
117} 117}
118 118
119static void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen) 119static char *buf_put_stringn(struct cbuf *buf, const char *s, u16 slen)
120{ 120{
121 char *ret;
122
123 ret = NULL;
121 if (buf_check_size(buf, slen + 2)) { 124 if (buf_check_size(buf, slen + 2)) {
122 buf_put_int16(buf, slen); 125 buf_put_int16(buf, slen);
126 ret = buf->p;
123 memcpy(buf->p, s, slen); 127 memcpy(buf->p, s, slen);
124 buf->p += slen; 128 buf->p += slen;
125 } 129 }
130
131 return ret;
126} 132}
127 133
128static inline void buf_put_string(struct cbuf *buf, const char *s) 134static inline void buf_put_string(struct cbuf *buf, const char *s)
@@ -430,15 +436,19 @@ static inline void v9fs_put_int64(struct cbuf *bufp, u64 val, u64 * p)
430static void 436static void
431v9fs_put_str(struct cbuf *bufp, char *data, struct v9fs_str *str) 437v9fs_put_str(struct cbuf *bufp, char *data, struct v9fs_str *str)
432{ 438{
433 if (data) { 439 int len;
434 str->len = strlen(data); 440 char *s;
435 str->str = bufp->p; 441
436 } else { 442 if (data)
437 str->len = 0; 443 len = strlen(data);
438 str->str = NULL; 444 else
439 } 445 len = 0;
440 446
441 buf_put_stringn(bufp, data, str->len); 447 s = buf_put_stringn(bufp, data, len);
448 if (str) {
449 str->len = len;
450 str->str = s;
451 }
442} 452}
443 453
444static int 454static int
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 945cb368d451..ea1134eb47c8 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -471,10 +471,13 @@ static void v9fs_write_work(void *a)
471 } 471 }
472 472
473 spin_lock(&m->lock); 473 spin_lock(&m->lock);
474 req = 474again:
475 list_entry(m->unsent_req_list.next, struct v9fs_req, 475 req = list_entry(m->unsent_req_list.next, struct v9fs_req,
476 req_list); 476 req_list);
477 list_move_tail(&req->req_list, &m->req_list); 477 list_move_tail(&req->req_list, &m->req_list);
478 if (req->err == ERREQFLUSH)
479 goto again;
480
478 m->wbuf = req->tcall->sdata; 481 m->wbuf = req->tcall->sdata;
479 m->wsize = req->tcall->size; 482 m->wsize = req->tcall->size;
480 m->wpos = 0; 483 m->wpos = 0;
@@ -525,7 +528,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
525 struct v9fs_str *ename; 528 struct v9fs_str *ename;
526 529
527 tag = req->tag; 530 tag = req->tag;
528 if (req->rcall->id == RERROR && !req->err) { 531 if (!req->err && req->rcall->id == RERROR) {
529 ecode = req->rcall->params.rerror.errno; 532 ecode = req->rcall->params.rerror.errno;
530 ename = &req->rcall->params.rerror.error; 533 ename = &req->rcall->params.rerror.error;
531 534
@@ -551,7 +554,10 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
551 req->err = -EIO; 554 req->err = -EIO;
552 } 555 }
553 556
554 if (req->cb && req->err != ERREQFLUSH) { 557 if (req->err == ERREQFLUSH)
558 return;
559
560 if (req->cb) {
555 dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n", 561 dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
556 req->tcall, req->rcall); 562 req->tcall, req->rcall);
557 563
@@ -812,6 +818,7 @@ v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
812 struct v9fs_mux_rpc *r; 818 struct v9fs_mux_rpc *r;
813 819
814 if (err == ERREQFLUSH) { 820 if (err == ERREQFLUSH) {
821 kfree(rc);
815 dprintk(DEBUG_MUX, "err req flush\n"); 822 dprintk(DEBUG_MUX, "err req flush\n");
816 return; 823 return;
817 } 824 }
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 91f552454c76..63e5b0398e8b 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -886,8 +886,8 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
886 } 886 }
887 887
888 /* copy extension buffer into buffer */ 888 /* copy extension buffer into buffer */
889 if (fcall->params.rstat.stat.extension.len < buflen) 889 if (fcall->params.rstat.stat.extension.len+1 < buflen)
890 buflen = fcall->params.rstat.stat.extension.len; 890 buflen = fcall->params.rstat.stat.extension.len + 1;
891 891
892 memcpy(buffer, fcall->params.rstat.stat.extension.str, buflen - 1); 892 memcpy(buffer, fcall->params.rstat.stat.extension.str, buflen - 1);
893 buffer[buflen-1] = 0; 893 buffer[buflen-1] = 0;
@@ -951,7 +951,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
951 if (!link) 951 if (!link)
952 link = ERR_PTR(-ENOMEM); 952 link = ERR_PTR(-ENOMEM);
953 else { 953 else {
954 len = v9fs_readlink(dentry, link, strlen(link)); 954 len = v9fs_readlink(dentry, link, PATH_MAX);
955 955
956 if (len < 0) { 956 if (len < 0) {
957 __putname(link); 957 __putname(link);
diff --git a/fs/Kconfig b/fs/Kconfig
index 93b5dc4082ff..e9749b0eecd8 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -883,8 +883,6 @@ config CONFIGFS_FS
883 Both sysfs and configfs can and should exist together on the 883 Both sysfs and configfs can and should exist together on the
884 same system. One is not a replacement for the other. 884 same system. One is not a replacement for the other.
885 885
886 If unsure, say N.
887
888endmenu 886endmenu
889 887
890menu "Miscellaneous filesystems" 888menu "Miscellaneous filesystems"
diff --git a/fs/buffer.c b/fs/buffer.c
index 5e4a90ee103f..62cfd17dc5fe 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2867,22 +2867,22 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2867 else if (test_set_buffer_locked(bh)) 2867 else if (test_set_buffer_locked(bh))
2868 continue; 2868 continue;
2869 2869
2870 get_bh(bh);
2871 if (rw == WRITE || rw == SWRITE) { 2870 if (rw == WRITE || rw == SWRITE) {
2872 if (test_clear_buffer_dirty(bh)) { 2871 if (test_clear_buffer_dirty(bh)) {
2873 bh->b_end_io = end_buffer_write_sync; 2872 bh->b_end_io = end_buffer_write_sync;
2873 get_bh(bh);
2874 submit_bh(WRITE, bh); 2874 submit_bh(WRITE, bh);
2875 continue; 2875 continue;
2876 } 2876 }
2877 } else { 2877 } else {
2878 if (!buffer_uptodate(bh)) { 2878 if (!buffer_uptodate(bh)) {
2879 bh->b_end_io = end_buffer_read_sync; 2879 bh->b_end_io = end_buffer_read_sync;
2880 get_bh(bh);
2880 submit_bh(rw, bh); 2881 submit_bh(rw, bh);
2881 continue; 2882 continue;
2882 } 2883 }
2883 } 2884 }
2884 unlock_buffer(bh); 2885 unlock_buffer(bh);
2885 put_bh(bh);
2886 } 2886 }
2887} 2887}
2888 2888
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index 8899d9c5f6bf..f70e46951b37 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -36,6 +36,7 @@ struct configfs_dirent {
36 int s_type; 36 int s_type;
37 umode_t s_mode; 37 umode_t s_mode;
38 struct dentry * s_dentry; 38 struct dentry * s_dentry;
39 struct iattr * s_iattr;
39}; 40};
40 41
41#define CONFIGFS_ROOT 0x0001 42#define CONFIGFS_ROOT 0x0001
@@ -48,10 +49,11 @@ struct configfs_dirent {
48#define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR) 49#define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR)
49 50
50extern struct vfsmount * configfs_mount; 51extern struct vfsmount * configfs_mount;
52extern kmem_cache_t *configfs_dir_cachep;
51 53
52extern int configfs_is_root(struct config_item *item); 54extern int configfs_is_root(struct config_item *item);
53 55
54extern struct inode * configfs_new_inode(mode_t mode); 56extern struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent *);
55extern int configfs_create(struct dentry *, int mode, int (*init)(struct inode *)); 57extern int configfs_create(struct dentry *, int mode, int (*init)(struct inode *));
56 58
57extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); 59extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
@@ -63,6 +65,7 @@ extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
63 65
64extern const unsigned char * configfs_get_name(struct configfs_dirent *sd); 66extern const unsigned char * configfs_get_name(struct configfs_dirent *sd);
65extern void configfs_drop_dentry(struct configfs_dirent *sd, struct dentry *parent); 67extern void configfs_drop_dentry(struct configfs_dirent *sd, struct dentry *parent);
68extern int configfs_setattr(struct dentry *dentry, struct iattr *iattr);
66 69
67extern int configfs_pin_fs(void); 70extern int configfs_pin_fs(void);
68extern void configfs_release_fs(void); 71extern void configfs_release_fs(void);
@@ -120,8 +123,10 @@ static inline struct config_item *configfs_get_config_item(struct dentry *dentry
120 123
121static inline void release_configfs_dirent(struct configfs_dirent * sd) 124static inline void release_configfs_dirent(struct configfs_dirent * sd)
122{ 125{
123 if (!(sd->s_type & CONFIGFS_ROOT)) 126 if (!(sd->s_type & CONFIGFS_ROOT)) {
124 kfree(sd); 127 kfree(sd->s_iattr);
128 kmem_cache_free(configfs_dir_cachep, sd);
129 }
125} 130}
126 131
127static inline struct configfs_dirent * configfs_get(struct configfs_dirent * sd) 132static inline struct configfs_dirent * configfs_get(struct configfs_dirent * sd)
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index b668ec61527e..ca60e3abef45 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -72,7 +72,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent * pare
72{ 72{
73 struct configfs_dirent * sd; 73 struct configfs_dirent * sd;
74 74
75 sd = kmalloc(sizeof(*sd), GFP_KERNEL); 75 sd = kmem_cache_alloc(configfs_dir_cachep, GFP_KERNEL);
76 if (!sd) 76 if (!sd)
77 return NULL; 77 return NULL;
78 78
@@ -136,13 +136,19 @@ static int create_dir(struct config_item * k, struct dentry * p,
136 int error; 136 int error;
137 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; 137 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
138 138
139 error = configfs_create(d, mode, init_dir); 139 error = configfs_make_dirent(p->d_fsdata, d, k, mode,
140 CONFIGFS_DIR);
140 if (!error) { 141 if (!error) {
141 error = configfs_make_dirent(p->d_fsdata, d, k, mode, 142 error = configfs_create(d, mode, init_dir);
142 CONFIGFS_DIR);
143 if (!error) { 143 if (!error) {
144 p->d_inode->i_nlink++; 144 p->d_inode->i_nlink++;
145 (d)->d_op = &configfs_dentry_ops; 145 (d)->d_op = &configfs_dentry_ops;
146 } else {
147 struct configfs_dirent *sd = d->d_fsdata;
148 if (sd) {
149 list_del_init(&sd->s_sibling);
150 configfs_put(sd);
151 }
146 } 152 }
147 } 153 }
148 return error; 154 return error;
@@ -182,12 +188,19 @@ int configfs_create_link(struct configfs_symlink *sl,
182 int err = 0; 188 int err = 0;
183 umode_t mode = S_IFLNK | S_IRWXUGO; 189 umode_t mode = S_IFLNK | S_IRWXUGO;
184 190
185 err = configfs_create(dentry, mode, init_symlink); 191 err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode,
192 CONFIGFS_ITEM_LINK);
186 if (!err) { 193 if (!err) {
187 err = configfs_make_dirent(parent->d_fsdata, dentry, sl, 194 err = configfs_create(dentry, mode, init_symlink);
188 mode, CONFIGFS_ITEM_LINK);
189 if (!err) 195 if (!err)
190 dentry->d_op = &configfs_dentry_ops; 196 dentry->d_op = &configfs_dentry_ops;
197 else {
198 struct configfs_dirent *sd = dentry->d_fsdata;
199 if (sd) {
200 list_del_init(&sd->s_sibling);
201 configfs_put(sd);
202 }
203 }
191 } 204 }
192 return err; 205 return err;
193} 206}
@@ -241,13 +254,15 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
241 struct configfs_attribute * attr = sd->s_element; 254 struct configfs_attribute * attr = sd->s_element;
242 int error; 255 int error;
243 256
257 dentry->d_fsdata = configfs_get(sd);
258 sd->s_dentry = dentry;
244 error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG, init_file); 259 error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG, init_file);
245 if (error) 260 if (error) {
261 configfs_put(sd);
246 return error; 262 return error;
263 }
247 264
248 dentry->d_op = &configfs_dentry_ops; 265 dentry->d_op = &configfs_dentry_ops;
249 dentry->d_fsdata = configfs_get(sd);
250 sd->s_dentry = dentry;
251 d_rehash(dentry); 266 d_rehash(dentry);
252 267
253 return 0; 268 return 0;
@@ -839,6 +854,7 @@ struct inode_operations configfs_dir_inode_operations = {
839 .symlink = configfs_symlink, 854 .symlink = configfs_symlink,
840 .unlink = configfs_unlink, 855 .unlink = configfs_unlink,
841 .lookup = configfs_lookup, 856 .lookup = configfs_lookup,
857 .setattr = configfs_setattr,
842}; 858};
843 859
844#if 0 860#if 0
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index c26cd61f13af..3921920d8716 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -26,7 +26,6 @@
26 26
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/dnotify.h>
30#include <linux/slab.h> 29#include <linux/slab.h>
31#include <asm/uaccess.h> 30#include <asm/uaccess.h>
32#include <asm/semaphore.h> 31#include <asm/semaphore.h>
@@ -150,7 +149,7 @@ out:
150/** 149/**
151 * fill_write_buffer - copy buffer from userspace. 150 * fill_write_buffer - copy buffer from userspace.
152 * @buffer: data buffer for file. 151 * @buffer: data buffer for file.
153 * @userbuf: data from user. 152 * @buf: data from user.
154 * @count: number of bytes in @userbuf. 153 * @count: number of bytes in @userbuf.
155 * 154 *
156 * Allocate @buffer->page if it hasn't been already, then 155 * Allocate @buffer->page if it hasn't been already, then
@@ -177,8 +176,9 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
177 176
178/** 177/**
179 * flush_write_buffer - push buffer to config_item. 178 * flush_write_buffer - push buffer to config_item.
180 * @file: file pointer. 179 * @dentry: dentry to the attribute
181 * @buffer: data buffer for file. 180 * @buffer: data buffer for file.
181 * @count: number of bytes
182 * 182 *
183 * Get the correct pointers for the config_item and the attribute we're 183 * Get the correct pointers for the config_item and the attribute we're
184 * dealing with, then call the store() method for the attribute, 184 * dealing with, then call the store() method for the attribute,
@@ -217,15 +217,16 @@ static ssize_t
217configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 217configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
218{ 218{
219 struct configfs_buffer * buffer = file->private_data; 219 struct configfs_buffer * buffer = file->private_data;
220 ssize_t len;
220 221
221 down(&buffer->sem); 222 down(&buffer->sem);
222 count = fill_write_buffer(buffer,buf,count); 223 len = fill_write_buffer(buffer, buf, count);
223 if (count > 0) 224 if (len > 0)
224 count = flush_write_buffer(file->f_dentry,buffer,count); 225 len = flush_write_buffer(file->f_dentry, buffer, count);
225 if (count > 0) 226 if (len > 0)
226 *ppos += count; 227 *ppos += len;
227 up(&buffer->sem); 228 up(&buffer->sem);
228 return count; 229 return len;
229} 230}
230 231
231static int check_perm(struct inode * inode, struct file * file) 232static int check_perm(struct inode * inode, struct file * file)
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 6577c588de9d..c153bd9534cb 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -31,6 +31,7 @@
31#include <linux/pagemap.h> 31#include <linux/pagemap.h>
32#include <linux/namei.h> 32#include <linux/namei.h>
33#include <linux/backing-dev.h> 33#include <linux/backing-dev.h>
34#include <linux/capability.h>
34 35
35#include <linux/configfs.h> 36#include <linux/configfs.h>
36#include "configfs_internal.h" 37#include "configfs_internal.h"
@@ -48,18 +49,107 @@ static struct backing_dev_info configfs_backing_dev_info = {
48 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, 49 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
49}; 50};
50 51
51struct inode * configfs_new_inode(mode_t mode) 52static struct inode_operations configfs_inode_operations ={
53 .setattr = configfs_setattr,
54};
55
56int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
57{
58 struct inode * inode = dentry->d_inode;
59 struct configfs_dirent * sd = dentry->d_fsdata;
60 struct iattr * sd_iattr;
61 unsigned int ia_valid = iattr->ia_valid;
62 int error;
63
64 if (!sd)
65 return -EINVAL;
66
67 sd_iattr = sd->s_iattr;
68
69 error = inode_change_ok(inode, iattr);
70 if (error)
71 return error;
72
73 error = inode_setattr(inode, iattr);
74 if (error)
75 return error;
76
77 if (!sd_iattr) {
78 /* setting attributes for the first time, allocate now */
79 sd_iattr = kmalloc(sizeof(struct iattr), GFP_KERNEL);
80 if (!sd_iattr)
81 return -ENOMEM;
82 /* assign default attributes */
83 memset(sd_iattr, 0, sizeof(struct iattr));
84 sd_iattr->ia_mode = sd->s_mode;
85 sd_iattr->ia_uid = 0;
86 sd_iattr->ia_gid = 0;
87 sd_iattr->ia_atime = sd_iattr->ia_mtime = sd_iattr->ia_ctime = CURRENT_TIME;
88 sd->s_iattr = sd_iattr;
89 }
90
91 /* attributes were changed atleast once in past */
92
93 if (ia_valid & ATTR_UID)
94 sd_iattr->ia_uid = iattr->ia_uid;
95 if (ia_valid & ATTR_GID)
96 sd_iattr->ia_gid = iattr->ia_gid;
97 if (ia_valid & ATTR_ATIME)
98 sd_iattr->ia_atime = timespec_trunc(iattr->ia_atime,
99 inode->i_sb->s_time_gran);
100 if (ia_valid & ATTR_MTIME)
101 sd_iattr->ia_mtime = timespec_trunc(iattr->ia_mtime,
102 inode->i_sb->s_time_gran);
103 if (ia_valid & ATTR_CTIME)
104 sd_iattr->ia_ctime = timespec_trunc(iattr->ia_ctime,
105 inode->i_sb->s_time_gran);
106 if (ia_valid & ATTR_MODE) {
107 umode_t mode = iattr->ia_mode;
108
109 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
110 mode &= ~S_ISGID;
111 sd_iattr->ia_mode = sd->s_mode = mode;
112 }
113
114 return error;
115}
116
117static inline void set_default_inode_attr(struct inode * inode, mode_t mode)
118{
119 inode->i_mode = mode;
120 inode->i_uid = 0;
121 inode->i_gid = 0;
122 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
123}
124
125static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
126{
127 inode->i_mode = iattr->ia_mode;
128 inode->i_uid = iattr->ia_uid;
129 inode->i_gid = iattr->ia_gid;
130 inode->i_atime = iattr->ia_atime;
131 inode->i_mtime = iattr->ia_mtime;
132 inode->i_ctime = iattr->ia_ctime;
133}
134
135struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent * sd)
52{ 136{
53 struct inode * inode = new_inode(configfs_sb); 137 struct inode * inode = new_inode(configfs_sb);
54 if (inode) { 138 if (inode) {
55 inode->i_mode = mode;
56 inode->i_uid = 0;
57 inode->i_gid = 0;
58 inode->i_blksize = PAGE_CACHE_SIZE; 139 inode->i_blksize = PAGE_CACHE_SIZE;
59 inode->i_blocks = 0; 140 inode->i_blocks = 0;
60 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
61 inode->i_mapping->a_ops = &configfs_aops; 141 inode->i_mapping->a_ops = &configfs_aops;
62 inode->i_mapping->backing_dev_info = &configfs_backing_dev_info; 142 inode->i_mapping->backing_dev_info = &configfs_backing_dev_info;
143 inode->i_op = &configfs_inode_operations;
144
145 if (sd->s_iattr) {
146 /* sysfs_dirent has non-default attributes
147 * get them for the new inode from persistent copy
148 * in sysfs_dirent
149 */
150 set_inode_attr(inode, sd->s_iattr);
151 } else
152 set_default_inode_attr(inode, mode);
63 } 153 }
64 return inode; 154 return inode;
65} 155}
@@ -70,7 +160,8 @@ int configfs_create(struct dentry * dentry, int mode, int (*init)(struct inode *
70 struct inode * inode = NULL; 160 struct inode * inode = NULL;
71 if (dentry) { 161 if (dentry) {
72 if (!dentry->d_inode) { 162 if (!dentry->d_inode) {
73 if ((inode = configfs_new_inode(mode))) { 163 struct configfs_dirent *sd = dentry->d_fsdata;
164 if ((inode = configfs_new_inode(mode, sd))) {
74 if (dentry->d_parent && dentry->d_parent->d_inode) { 165 if (dentry->d_parent && dentry->d_parent->d_inode) {
75 struct inode *p_inode = dentry->d_parent->d_inode; 166 struct inode *p_inode = dentry->d_parent->d_inode;
76 p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; 167 p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
@@ -103,10 +194,9 @@ int configfs_create(struct dentry * dentry, int mode, int (*init)(struct inode *
103 */ 194 */
104const unsigned char * configfs_get_name(struct configfs_dirent *sd) 195const unsigned char * configfs_get_name(struct configfs_dirent *sd)
105{ 196{
106 struct attribute * attr; 197 struct configfs_attribute *attr;
107 198
108 if (!sd || !sd->s_element) 199 BUG_ON(!sd || !sd->s_element);
109 BUG();
110 200
111 /* These always have a dentry, so use that */ 201 /* These always have a dentry, so use that */
112 if (sd->s_type & (CONFIGFS_DIR | CONFIGFS_ITEM_LINK)) 202 if (sd->s_type & (CONFIGFS_DIR | CONFIGFS_ITEM_LINK))
@@ -114,7 +204,7 @@ const unsigned char * configfs_get_name(struct configfs_dirent *sd)
114 204
115 if (sd->s_type & CONFIGFS_ITEM_ATTR) { 205 if (sd->s_type & CONFIGFS_ITEM_ATTR) {
116 attr = sd->s_element; 206 attr = sd->s_element;
117 return attr->name; 207 return attr->ca_name;
118 } 208 }
119 return NULL; 209 return NULL;
120} 210}
@@ -130,13 +220,17 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
130 220
131 if (dentry) { 221 if (dentry) {
132 spin_lock(&dcache_lock); 222 spin_lock(&dcache_lock);
223 spin_lock(&dentry->d_lock);
133 if (!(d_unhashed(dentry) && dentry->d_inode)) { 224 if (!(d_unhashed(dentry) && dentry->d_inode)) {
134 dget_locked(dentry); 225 dget_locked(dentry);
135 __d_drop(dentry); 226 __d_drop(dentry);
227 spin_unlock(&dentry->d_lock);
136 spin_unlock(&dcache_lock); 228 spin_unlock(&dcache_lock);
137 simple_unlink(parent->d_inode, dentry); 229 simple_unlink(parent->d_inode, dentry);
138 } else 230 } else {
231 spin_unlock(&dentry->d_lock);
139 spin_unlock(&dcache_lock); 232 spin_unlock(&dcache_lock);
233 }
140 } 234 }
141} 235}
142 236
@@ -145,6 +239,10 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
145 struct configfs_dirent * sd; 239 struct configfs_dirent * sd;
146 struct configfs_dirent * parent_sd = dir->d_fsdata; 240 struct configfs_dirent * parent_sd = dir->d_fsdata;
147 241
242 if (dir->d_inode == NULL)
243 /* no inode means this hasn't been made visible yet */
244 return;
245
148 mutex_lock(&dir->d_inode->i_mutex); 246 mutex_lock(&dir->d_inode->i_mutex);
149 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 247 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
150 if (!sd->s_element) 248 if (!sd->s_element)
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index 1a2f6f6a4d91..f920d30478e5 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -38,6 +38,7 @@
38 38
39struct vfsmount * configfs_mount = NULL; 39struct vfsmount * configfs_mount = NULL;
40struct super_block * configfs_sb = NULL; 40struct super_block * configfs_sb = NULL;
41kmem_cache_t *configfs_dir_cachep;
41static int configfs_mnt_count = 0; 42static int configfs_mnt_count = 0;
42 43
43static struct super_operations configfs_ops = { 44static struct super_operations configfs_ops = {
@@ -62,6 +63,7 @@ static struct configfs_dirent configfs_root = {
62 .s_children = LIST_HEAD_INIT(configfs_root.s_children), 63 .s_children = LIST_HEAD_INIT(configfs_root.s_children),
63 .s_element = &configfs_root_group.cg_item, 64 .s_element = &configfs_root_group.cg_item,
64 .s_type = CONFIGFS_ROOT, 65 .s_type = CONFIGFS_ROOT,
66 .s_iattr = NULL,
65}; 67};
66 68
67static int configfs_fill_super(struct super_block *sb, void *data, int silent) 69static int configfs_fill_super(struct super_block *sb, void *data, int silent)
@@ -73,9 +75,11 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent)
73 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 75 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
74 sb->s_magic = CONFIGFS_MAGIC; 76 sb->s_magic = CONFIGFS_MAGIC;
75 sb->s_op = &configfs_ops; 77 sb->s_op = &configfs_ops;
78 sb->s_time_gran = 1;
76 configfs_sb = sb; 79 configfs_sb = sb;
77 80
78 inode = configfs_new_inode(S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO); 81 inode = configfs_new_inode(S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
82 &configfs_root);
79 if (inode) { 83 if (inode) {
80 inode->i_op = &configfs_dir_inode_operations; 84 inode->i_op = &configfs_dir_inode_operations;
81 inode->i_fop = &configfs_dir_operations; 85 inode->i_fop = &configfs_dir_operations;
@@ -128,19 +132,31 @@ static decl_subsys(config, NULL, NULL);
128 132
129static int __init configfs_init(void) 133static int __init configfs_init(void)
130{ 134{
131 int err; 135 int err = -ENOMEM;
136
137 configfs_dir_cachep = kmem_cache_create("configfs_dir_cache",
138 sizeof(struct configfs_dirent),
139 0, 0, NULL, NULL);
140 if (!configfs_dir_cachep)
141 goto out;
132 142
133 kset_set_kset_s(&config_subsys, kernel_subsys); 143 kset_set_kset_s(&config_subsys, kernel_subsys);
134 err = subsystem_register(&config_subsys); 144 err = subsystem_register(&config_subsys);
135 if (err) 145 if (err) {
136 return err; 146 kmem_cache_destroy(configfs_dir_cachep);
147 configfs_dir_cachep = NULL;
148 goto out;
149 }
137 150
138 err = register_filesystem(&configfs_fs_type); 151 err = register_filesystem(&configfs_fs_type);
139 if (err) { 152 if (err) {
140 printk(KERN_ERR "configfs: Unable to register filesystem!\n"); 153 printk(KERN_ERR "configfs: Unable to register filesystem!\n");
141 subsystem_unregister(&config_subsys); 154 subsystem_unregister(&config_subsys);
155 kmem_cache_destroy(configfs_dir_cachep);
156 configfs_dir_cachep = NULL;
142 } 157 }
143 158
159out:
144 return err; 160 return err;
145} 161}
146 162
@@ -148,11 +164,13 @@ static void __exit configfs_exit(void)
148{ 164{
149 unregister_filesystem(&configfs_fs_type); 165 unregister_filesystem(&configfs_fs_type);
150 subsystem_unregister(&config_subsys); 166 subsystem_unregister(&config_subsys);
167 kmem_cache_destroy(configfs_dir_cachep);
168 configfs_dir_cachep = NULL;
151} 169}
152 170
153MODULE_AUTHOR("Oracle"); 171MODULE_AUTHOR("Oracle");
154MODULE_LICENSE("GPL"); 172MODULE_LICENSE("GPL");
155MODULE_VERSION("0.0.1"); 173MODULE_VERSION("0.0.2");
156MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration."); 174MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration.");
157 175
158module_init(configfs_init); 176module_init(configfs_init);
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index 50f5840521a9..e5512e295cf2 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -162,8 +162,7 @@ int configfs_unlink(struct inode *dir, struct dentry *dentry)
162 if (!(sd->s_type & CONFIGFS_ITEM_LINK)) 162 if (!(sd->s_type & CONFIGFS_ITEM_LINK))
163 goto out; 163 goto out;
164 164
165 if (dentry->d_parent == configfs_sb->s_root) 165 BUG_ON(dentry->d_parent == configfs_sb->s_root);
166 BUG();
167 166
168 sl = sd->s_element; 167 sl = sd->s_element;
169 168
@@ -277,5 +276,6 @@ struct inode_operations configfs_symlink_inode_operations = {
277 .follow_link = configfs_follow_link, 276 .follow_link = configfs_follow_link,
278 .readlink = generic_readlink, 277 .readlink = generic_readlink,
279 .put_link = configfs_put_link, 278 .put_link = configfs_put_link,
279 .setattr = configfs_setattr,
280}; 280};
281 281
diff --git a/fs/dcache.c b/fs/dcache.c
index 86bdb93789c6..a173bba32666 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -743,7 +743,9 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
743 dentry->d_op = NULL; 743 dentry->d_op = NULL;
744 dentry->d_fsdata = NULL; 744 dentry->d_fsdata = NULL;
745 dentry->d_mounted = 0; 745 dentry->d_mounted = 0;
746#ifdef CONFIG_PROFILING
746 dentry->d_cookie = NULL; 747 dentry->d_cookie = NULL;
748#endif
747 INIT_HLIST_NODE(&dentry->d_hash); 749 INIT_HLIST_NODE(&dentry->d_hash);
748 INIT_LIST_HEAD(&dentry->d_lru); 750 INIT_LIST_HEAD(&dentry->d_lru);
749 INIT_LIST_HEAD(&dentry->d_subdirs); 751 INIT_LIST_HEAD(&dentry->d_subdirs);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 30dbbd1df511..848044af7e16 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -857,6 +857,7 @@ do_holes:
857 /* Handle holes */ 857 /* Handle holes */
858 if (!buffer_mapped(map_bh)) { 858 if (!buffer_mapped(map_bh)) {
859 char *kaddr; 859 char *kaddr;
860 loff_t i_size_aligned;
860 861
861 /* AKPM: eargh, -ENOTBLK is a hack */ 862 /* AKPM: eargh, -ENOTBLK is a hack */
862 if (dio->rw == WRITE) { 863 if (dio->rw == WRITE) {
@@ -864,8 +865,14 @@ do_holes:
864 return -ENOTBLK; 865 return -ENOTBLK;
865 } 866 }
866 867
868 /*
869 * Be sure to account for a partial block as the
870 * last block in the file
871 */
872 i_size_aligned = ALIGN(i_size_read(dio->inode),
873 1 << blkbits);
867 if (dio->block_in_file >= 874 if (dio->block_in_file >=
868 i_size_read(dio->inode)>>blkbits) { 875 i_size_aligned >> blkbits) {
869 /* We hit eof */ 876 /* We hit eof */
870 page_cache_release(page); 877 page_cache_release(page);
871 goto out; 878 goto out;
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 35acc43b897f..da52b4a5db64 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -220,7 +220,7 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
220 struct ext2_inode_info *ei = EXT2_I(inode); 220 struct ext2_inode_info *ei = EXT2_I(inode);
221 int name_index; 221 int name_index;
222 void *value = NULL; 222 void *value = NULL;
223 size_t size; 223 size_t size = 0;
224 int error; 224 int error;
225 225
226 if (S_ISLNK(inode->i_mode)) 226 if (S_ISLNK(inode->i_mode))
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 74714af4ae69..e52765219e16 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -605,7 +605,7 @@ got:
605 insert_inode_hash(inode); 605 insert_inode_hash(inode);
606 606
607 if (DQUOT_ALLOC_INODE(inode)) { 607 if (DQUOT_ALLOC_INODE(inode)) {
608 err = -ENOSPC; 608 err = -EDQUOT;
609 goto fail_drop; 609 goto fail_drop;
610 } 610 }
611 611
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 8d6819846fc9..cb6f9bd658de 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -221,6 +221,11 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
221 seq_puts(seq, ",grpquota"); 221 seq_puts(seq, ",grpquota");
222#endif 222#endif
223 223
224#if defined(CONFIG_EXT2_FS_XIP)
225 if (sbi->s_mount_opt & EXT2_MOUNT_XIP)
226 seq_puts(seq, ",xip");
227#endif
228
224 return 0; 229 return 0;
225} 230}
226 231
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 47a9da2dfb4f..0d21d558b87a 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -226,7 +226,7 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
226 struct ext3_inode_info *ei = EXT3_I(inode); 226 struct ext3_inode_info *ei = EXT3_I(inode);
227 int name_index; 227 int name_index;
228 void *value = NULL; 228 void *value = NULL;
229 size_t size; 229 size_t size = 0;
230 int error; 230 int error;
231 231
232 if (S_ISLNK(inode->i_mode)) 232 if (S_ISLNK(inode->i_mode))
diff --git a/fs/fat/file.c b/fs/fat/file.c
index e99c5a73b39e..88aa1ae13f9f 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -210,10 +210,30 @@ static int fat_free(struct inode *inode, int skip)
210 if (MSDOS_I(inode)->i_start == 0) 210 if (MSDOS_I(inode)->i_start == 0)
211 return 0; 211 return 0;
212 212
213 /* 213 fat_cache_inval_inode(inode);
214 * Write a new EOF, and get the remaining cluster chain for freeing. 214
215 */
216 wait = IS_DIRSYNC(inode); 215 wait = IS_DIRSYNC(inode);
216 i_start = free_start = MSDOS_I(inode)->i_start;
217 i_logstart = MSDOS_I(inode)->i_logstart;
218
219 /* First, we write the new file size. */
220 if (!skip) {
221 MSDOS_I(inode)->i_start = 0;
222 MSDOS_I(inode)->i_logstart = 0;
223 }
224 MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
225 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
226 if (wait) {
227 err = fat_sync_inode(inode);
228 if (err) {
229 MSDOS_I(inode)->i_start = i_start;
230 MSDOS_I(inode)->i_logstart = i_logstart;
231 return err;
232 }
233 } else
234 mark_inode_dirty(inode);
235
236 /* Write a new EOF, and get the remaining cluster chain for freeing. */
217 if (skip) { 237 if (skip) {
218 struct fat_entry fatent; 238 struct fat_entry fatent;
219 int ret, fclus, dclus; 239 int ret, fclus, dclus;
@@ -244,35 +264,11 @@ static int fat_free(struct inode *inode, int skip)
244 return ret; 264 return ret;
245 265
246 free_start = ret; 266 free_start = ret;
247 i_start = i_logstart = 0;
248 fat_cache_inval_inode(inode);
249 } else {
250 fat_cache_inval_inode(inode);
251
252 i_start = free_start = MSDOS_I(inode)->i_start;
253 i_logstart = MSDOS_I(inode)->i_logstart;
254 MSDOS_I(inode)->i_start = 0;
255 MSDOS_I(inode)->i_logstart = 0;
256 } 267 }
257 MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
258 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
259 if (wait) {
260 err = fat_sync_inode(inode);
261 if (err)
262 goto error;
263 } else
264 mark_inode_dirty(inode);
265 inode->i_blocks = skip << (MSDOS_SB(sb)->cluster_bits - 9); 268 inode->i_blocks = skip << (MSDOS_SB(sb)->cluster_bits - 9);
266 269
267 /* Freeing the remained cluster chain */ 270 /* Freeing the remained cluster chain */
268 return fat_free_clusters(inode, free_start); 271 return fat_free_clusters(inode, free_start);
269
270error:
271 if (i_start) {
272 MSDOS_I(inode)->i_start = i_start;
273 MSDOS_I(inode)->i_logstart = i_logstart;
274 }
275 return err;
276} 272}
277 273
278void fat_truncate(struct inode *inode) 274void fat_truncate(struct inode *inode)
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 32fb0a3f1da4..944652e9dde1 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -196,19 +196,9 @@ EXPORT_SYMBOL_GPL(fat_date_unix2dos);
196 196
197int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs) 197int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
198{ 198{
199 int i, e, err = 0; 199 int i, err = 0;
200 200
201 for (i = 0; i < nr_bhs; i++) { 201 ll_rw_block(SWRITE, nr_bhs, bhs);
202 lock_buffer(bhs[i]);
203 if (test_clear_buffer_dirty(bhs[i])) {
204 get_bh(bhs[i]);
205 bhs[i]->b_end_io = end_buffer_write_sync;
206 e = submit_bh(WRITE, bhs[i]);
207 if (!err && e)
208 err = e;
209 } else
210 unlock_buffer(bhs[i]);
211 }
212 for (i = 0; i < nr_bhs; i++) { 202 for (i = 0; i < nr_bhs; i++) {
213 wait_on_buffer(bhs[i]); 203 wait_on_buffer(bhs[i]);
214 if (buffer_eopnotsupp(bhs[i])) { 204 if (buffer_eopnotsupp(bhs[i])) {
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 5f96786d1c73..dc4a7007f4e7 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -208,8 +208,11 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
208 struct inode * inode = filp->f_dentry->d_inode; 208 struct inode * inode = filp->f_dentry->d_inode;
209 int error = 0; 209 int error = 0;
210 210
211 /* O_APPEND cannot be cleared if the file is marked as append-only */ 211 /*
212 if (!(arg & O_APPEND) && IS_APPEND(inode)) 212 * O_APPEND cannot be cleared if the file is marked as append-only
213 * and the file is open for write.
214 */
215 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
213 return -EPERM; 216 return -EPERM;
214 217
215 /* O_NOATIME can only be set by the owner or superuser */ 218 /* O_NOATIME can only be set by the owner or superuser */
diff --git a/fs/file.c b/fs/file.c
index fd066b261c75..cea7cbea11d0 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -379,7 +379,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
379void __init files_defer_init(void) 379void __init files_defer_init(void)
380{ 380{
381 int i; 381 int i;
382 /* Really early - can't use for_each_cpu */ 382 for_each_cpu(i)
383 for (i = 0; i < NR_CPUS; i++)
384 fdtable_defer_list_init(i); 383 fdtable_defer_list_init(i);
385} 384}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 4526da8907c6..f556a0d5c0d3 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -120,9 +120,9 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc)
120 return do_get_request(fc); 120 return do_get_request(fc);
121} 121}
122 122
123/* Must be called with fuse_lock held */
123static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) 124static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
124{ 125{
125 spin_lock(&fuse_lock);
126 if (req->preallocated) { 126 if (req->preallocated) {
127 atomic_dec(&fc->num_waiting); 127 atomic_dec(&fc->num_waiting);
128 list_add(&req->list, &fc->unused_list); 128 list_add(&req->list, &fc->unused_list);
@@ -134,11 +134,19 @@ static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
134 fc->outstanding_debt--; 134 fc->outstanding_debt--;
135 else 135 else
136 up(&fc->outstanding_sem); 136 up(&fc->outstanding_sem);
137 spin_unlock(&fuse_lock);
138} 137}
139 138
140void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 139void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
141{ 140{
141 if (atomic_dec_and_test(&req->count)) {
142 spin_lock(&fuse_lock);
143 fuse_putback_request(fc, req);
144 spin_unlock(&fuse_lock);
145 }
146}
147
148static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req)
149{
142 if (atomic_dec_and_test(&req->count)) 150 if (atomic_dec_and_test(&req->count))
143 fuse_putback_request(fc, req); 151 fuse_putback_request(fc, req);
144} 152}
@@ -163,26 +171,36 @@ void fuse_release_background(struct fuse_req *req)
163 * still waiting), the 'end' callback is called if given, else the 171 * still waiting), the 'end' callback is called if given, else the
164 * reference to the request is released 172 * reference to the request is released
165 * 173 *
174 * Releasing extra reference for foreground requests must be done
175 * within the same locked region as setting state to finished. This
176 * is because fuse_reset_request() may be called after request is
177 * finished and it must be the sole possessor. If request is
178 * interrupted and put in the background, it will return with an error
179 * and hence never be reset and reused.
180 *
166 * Called with fuse_lock, unlocks it 181 * Called with fuse_lock, unlocks it
167 */ 182 */
168static void request_end(struct fuse_conn *fc, struct fuse_req *req) 183static void request_end(struct fuse_conn *fc, struct fuse_req *req)
169{ 184{
170 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
171 req->end = NULL;
172 list_del(&req->list); 185 list_del(&req->list);
173 req->state = FUSE_REQ_FINISHED; 186 req->state = FUSE_REQ_FINISHED;
174 spin_unlock(&fuse_lock); 187 if (!req->background) {
175 if (req->background) { 188 wake_up(&req->waitq);
189 fuse_put_request_locked(fc, req);
190 spin_unlock(&fuse_lock);
191 } else {
192 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
193 req->end = NULL;
194 spin_unlock(&fuse_lock);
176 down_read(&fc->sbput_sem); 195 down_read(&fc->sbput_sem);
177 if (fc->mounted) 196 if (fc->mounted)
178 fuse_release_background(req); 197 fuse_release_background(req);
179 up_read(&fc->sbput_sem); 198 up_read(&fc->sbput_sem);
199 if (end)
200 end(fc, req);
201 else
202 fuse_put_request(fc, req);
180 } 203 }
181 wake_up(&req->waitq);
182 if (end)
183 end(fc, req);
184 else
185 fuse_put_request(fc, req);
186} 204}
187 205
188/* 206/*
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 429f4b263cf1..ca917973c2c0 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1308,6 +1308,7 @@ int journal_stop(handle_t *handle)
1308 transaction_t *transaction = handle->h_transaction; 1308 transaction_t *transaction = handle->h_transaction;
1309 journal_t *journal = transaction->t_journal; 1309 journal_t *journal = transaction->t_journal;
1310 int old_handle_count, err; 1310 int old_handle_count, err;
1311 pid_t pid;
1311 1312
1312 J_ASSERT(transaction->t_updates > 0); 1313 J_ASSERT(transaction->t_updates > 0);
1313 J_ASSERT(journal_current_handle() == handle); 1314 J_ASSERT(journal_current_handle() == handle);
@@ -1333,8 +1334,15 @@ int journal_stop(handle_t *handle)
1333 * It doesn't cost much - we're about to run a commit and sleep 1334 * It doesn't cost much - we're about to run a commit and sleep
1334 * on IO anyway. Speeds up many-threaded, many-dir operations 1335 * on IO anyway. Speeds up many-threaded, many-dir operations
1335 * by 30x or more... 1336 * by 30x or more...
1337 *
1338 * But don't do this if this process was the most recent one to
1339 * perform a synchronous write. We do this to detect the case where a
1340 * single process is doing a stream of sync writes. No point in waiting
1341 * for joiners in that case.
1336 */ 1342 */
1337 if (handle->h_sync) { 1343 pid = current->pid;
1344 if (handle->h_sync && journal->j_last_sync_writer != pid) {
1345 journal->j_last_sync_writer = pid;
1338 do { 1346 do {
1339 old_handle_count = transaction->t_handle_count; 1347 old_handle_count = transaction->t_handle_count;
1340 schedule_timeout_uninterruptible(1); 1348 schedule_timeout_uninterruptible(1);
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index b2e95421d932..ce7b54b0b2b7 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -1965,7 +1965,7 @@ retry:
1965 iovec_cnt++; 1965 iovec_cnt++;
1966 1966
1967 if (JFFS_GET_PAD_BYTES(raw_inode->nsize)) { 1967 if (JFFS_GET_PAD_BYTES(raw_inode->nsize)) {
1968 static char allff[3]={255,255,255}; 1968 static unsigned char allff[3]={255,255,255};
1969 /* Add some extra padding if necessary */ 1969 /* Add some extra padding if necessary */
1970 node_iovec[iovec_cnt].iov_base = allff; 1970 node_iovec[iovec_cnt].iov_base = allff;
1971 node_iovec[iovec_cnt].iov_len = 1971 node_iovec[iovec_cnt].iov_len =
diff --git a/fs/libfs.c b/fs/libfs.c
index 63c020e6589e..71fd08fa4103 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -388,6 +388,7 @@ int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files
388 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 388 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
389 inode->i_op = &simple_dir_inode_operations; 389 inode->i_op = &simple_dir_inode_operations;
390 inode->i_fop = &simple_dir_operations; 390 inode->i_fop = &simple_dir_operations;
391 inode->i_nlink = 2;
391 root = d_alloc_root(inode); 392 root = d_alloc_root(inode);
392 if (!root) { 393 if (!root) {
393 iput(inode); 394 iput(inode);
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 145524039577..220058d8616d 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -22,12 +22,14 @@
22#define NLMDBG_FACILITY NLMDBG_CLIENT 22#define NLMDBG_FACILITY NLMDBG_CLIENT
23#define NLMCLNT_GRACE_WAIT (5*HZ) 23#define NLMCLNT_GRACE_WAIT (5*HZ)
24#define NLMCLNT_POLL_TIMEOUT (30*HZ) 24#define NLMCLNT_POLL_TIMEOUT (30*HZ)
25#define NLMCLNT_MAX_RETRIES 3
25 26
26static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); 27static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
27static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); 28static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
28static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); 29static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
29static int nlm_stat_to_errno(u32 stat); 30static int nlm_stat_to_errno(u32 stat);
30static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); 31static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
32static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
31 33
32static const struct rpc_call_ops nlmclnt_unlock_ops; 34static const struct rpc_call_ops nlmclnt_unlock_ops;
33static const struct rpc_call_ops nlmclnt_cancel_ops; 35static const struct rpc_call_ops nlmclnt_cancel_ops;
@@ -598,7 +600,7 @@ out_unblock:
598 nlmclnt_finish_block(req); 600 nlmclnt_finish_block(req);
599 /* Cancel the blocked request if it is still pending */ 601 /* Cancel the blocked request if it is still pending */
600 if (resp->status == NLM_LCK_BLOCKED) 602 if (resp->status == NLM_LCK_BLOCKED)
601 nlmclnt_cancel(host, fl); 603 nlmclnt_cancel(host, req->a_args.block, fl);
602out: 604out:
603 nlmclnt_release_lockargs(req); 605 nlmclnt_release_lockargs(req);
604 return status; 606 return status;
@@ -728,8 +730,7 @@ static const struct rpc_call_ops nlmclnt_unlock_ops = {
728 * We always use an async RPC call for this in order not to hang a 730 * We always use an async RPC call for this in order not to hang a
729 * process that has been Ctrl-C'ed. 731 * process that has been Ctrl-C'ed.
730 */ 732 */
731int 733static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
732nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
733{ 734{
734 struct nlm_rqst *req; 735 struct nlm_rqst *req;
735 unsigned long flags; 736 unsigned long flags;
@@ -750,6 +751,7 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
750 req->a_flags = RPC_TASK_ASYNC; 751 req->a_flags = RPC_TASK_ASYNC;
751 752
752 nlmclnt_setlockargs(req, fl); 753 nlmclnt_setlockargs(req, fl);
754 req->a_args.block = block;
753 755
754 status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); 756 status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
755 if (status < 0) { 757 if (status < 0) {
@@ -801,6 +803,9 @@ die:
801 return; 803 return;
802 804
803retry_cancel: 805retry_cancel:
806 /* Don't ever retry more than 3 times */
807 if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
808 goto die;
804 nlm_rebind_host(req->a_host); 809 nlm_rebind_host(req->a_host);
805 rpc_restart_call(task); 810 rpc_restart_call(task);
806 rpc_delay(task, 30 * HZ); 811 rpc_delay(task, 30 * HZ);
diff --git a/fs/namei.c b/fs/namei.c
index 7ac9fb4acb2c..faf61c35308c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -790,7 +790,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
790 790
791 inode = nd->dentry->d_inode; 791 inode = nd->dentry->d_inode;
792 if (nd->depth) 792 if (nd->depth)
793 lookup_flags = LOOKUP_FOLLOW; 793 lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE);
794 794
795 /* At this point we know we have a real path component. */ 795 /* At this point we know we have a real path component. */
796 for(;;) { 796 for(;;) {
@@ -885,7 +885,8 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
885last_with_slashes: 885last_with_slashes:
886 lookup_flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; 886 lookup_flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
887last_component: 887last_component:
888 nd->flags &= ~LOOKUP_CONTINUE; 888 /* Clear LOOKUP_CONTINUE iff it was previously unset */
889 nd->flags &= lookup_flags | ~LOOKUP_CONTINUE;
889 if (lookup_flags & LOOKUP_PARENT) 890 if (lookup_flags & LOOKUP_PARENT)
890 goto lookup_parent; 891 goto lookup_parent;
891 if (this.name[0] == '.') switch (this.len) { 892 if (this.name[0] == '.') switch (this.len) {
@@ -1069,6 +1070,8 @@ static int fastcall do_path_lookup(int dfd, const char *name,
1069 unsigned int flags, struct nameidata *nd) 1070 unsigned int flags, struct nameidata *nd)
1070{ 1071{
1071 int retval = 0; 1072 int retval = 0;
1073 int fput_needed;
1074 struct file *file;
1072 1075
1073 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 1076 nd->last_type = LAST_ROOT; /* if there are only slashes... */
1074 nd->flags = flags; 1077 nd->flags = flags;
@@ -1090,29 +1093,22 @@ static int fastcall do_path_lookup(int dfd, const char *name,
1090 nd->mnt = mntget(current->fs->pwdmnt); 1093 nd->mnt = mntget(current->fs->pwdmnt);
1091 nd->dentry = dget(current->fs->pwd); 1094 nd->dentry = dget(current->fs->pwd);
1092 } else { 1095 } else {
1093 struct file *file;
1094 int fput_needed;
1095 struct dentry *dentry; 1096 struct dentry *dentry;
1096 1097
1097 file = fget_light(dfd, &fput_needed); 1098 file = fget_light(dfd, &fput_needed);
1098 if (!file) { 1099 retval = -EBADF;
1099 retval = -EBADF; 1100 if (!file)
1100 goto out_fail; 1101 goto unlock_fail;
1101 }
1102 1102
1103 dentry = file->f_dentry; 1103 dentry = file->f_dentry;
1104 1104
1105 if (!S_ISDIR(dentry->d_inode->i_mode)) { 1105 retval = -ENOTDIR;
1106 retval = -ENOTDIR; 1106 if (!S_ISDIR(dentry->d_inode->i_mode))
1107 fput_light(file, fput_needed); 1107 goto fput_unlock_fail;
1108 goto out_fail;
1109 }
1110 1108
1111 retval = file_permission(file, MAY_EXEC); 1109 retval = file_permission(file, MAY_EXEC);
1112 if (retval) { 1110 if (retval)
1113 fput_light(file, fput_needed); 1111 goto fput_unlock_fail;
1114 goto out_fail;
1115 }
1116 1112
1117 nd->mnt = mntget(file->f_vfsmnt); 1113 nd->mnt = mntget(file->f_vfsmnt);
1118 nd->dentry = dget(dentry); 1114 nd->dentry = dget(dentry);
@@ -1126,7 +1122,12 @@ out:
1126 if (unlikely(current->audit_context 1122 if (unlikely(current->audit_context
1127 && nd && nd->dentry && nd->dentry->d_inode)) 1123 && nd && nd->dentry && nd->dentry->d_inode))
1128 audit_inode(name, nd->dentry->d_inode, flags); 1124 audit_inode(name, nd->dentry->d_inode, flags);
1129out_fail: 1125 return retval;
1126
1127fput_unlock_fail:
1128 fput_light(file, fput_needed);
1129unlock_fail:
1130 read_unlock(&current->fs->lock);
1130 return retval; 1131 return retval;
1131} 1132}
1132 1133
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 10ae377e68ff..04ab2fc360e7 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -481,7 +481,7 @@ retry:
481 if (wdata->verf.committed != NFS_FILE_SYNC) { 481 if (wdata->verf.committed != NFS_FILE_SYNC) {
482 need_commit = 1; 482 need_commit = 1;
483 if (memcmp(&first_verf.verifier, &wdata->verf.verifier, 483 if (memcmp(&first_verf.verifier, &wdata->verf.verifier,
484 sizeof(first_verf.verifier))); 484 sizeof(first_verf.verifier)))
485 goto sync_retry; 485 goto sync_retry;
486 } 486 }
487 487
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d424041b38e9..bae3d7548bea 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -58,7 +58,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
58 goto out; 58 goto out;
59 } 59 }
60 60
61 down(&OCFS2_I(inode)->ip_io_sem); 61 mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
62 62
63 lock_buffer(bh); 63 lock_buffer(bh);
64 set_buffer_uptodate(bh); 64 set_buffer_uptodate(bh);
@@ -82,7 +82,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
82 brelse(bh); 82 brelse(bh);
83 } 83 }
84 84
85 up(&OCFS2_I(inode)->ip_io_sem); 85 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
86out: 86out:
87 mlog_exit(ret); 87 mlog_exit(ret);
88 return ret; 88 return ret;
@@ -125,13 +125,13 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr,
125 flags &= ~OCFS2_BH_CACHED; 125 flags &= ~OCFS2_BH_CACHED;
126 126
127 if (inode) 127 if (inode)
128 down(&OCFS2_I(inode)->ip_io_sem); 128 mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
129 for (i = 0 ; i < nr ; i++) { 129 for (i = 0 ; i < nr ; i++) {
130 if (bhs[i] == NULL) { 130 if (bhs[i] == NULL) {
131 bhs[i] = sb_getblk(sb, block++); 131 bhs[i] = sb_getblk(sb, block++);
132 if (bhs[i] == NULL) { 132 if (bhs[i] == NULL) {
133 if (inode) 133 if (inode)
134 up(&OCFS2_I(inode)->ip_io_sem); 134 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
135 status = -EIO; 135 status = -EIO;
136 mlog_errno(status); 136 mlog_errno(status);
137 goto bail; 137 goto bail;
@@ -220,7 +220,7 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr,
220 ocfs2_set_buffer_uptodate(inode, bh); 220 ocfs2_set_buffer_uptodate(inode, bh);
221 } 221 }
222 if (inode) 222 if (inode)
223 up(&OCFS2_I(inode)->ip_io_sem); 223 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
224 224
225 mlog(ML_BH_IO, "block=(%"MLFu64"), nr=(%d), cached=%s\n", block, nr, 225 mlog(ML_BH_IO, "block=(%"MLFu64"), nr=(%d), cached=%s\n", block, nr,
226 (!(flags & OCFS2_BH_CACHED) || ignore_cache) ? "no" : "yes"); 226 (!(flags & OCFS2_BH_CACHED) || ignore_cache) ? "no" : "yes");
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 7307ba528913..d08971d29b63 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -917,8 +917,9 @@ static int o2hb_thread(void *data)
917 elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); 917 elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
918 918
919 mlog(0, "start = %lu.%lu, end = %lu.%lu, msec = %u\n", 919 mlog(0, "start = %lu.%lu, end = %lu.%lu, msec = %u\n",
920 before_hb.tv_sec, before_hb.tv_usec, 920 before_hb.tv_sec, (unsigned long) before_hb.tv_usec,
921 after_hb.tv_sec, after_hb.tv_usec, elapsed_msec); 921 after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
922 elapsed_msec);
922 923
923 if (elapsed_msec < reg->hr_timeout_ms) { 924 if (elapsed_msec < reg->hr_timeout_ms) {
924 /* the kthread api has blocked signals for us so no 925 /* the kthread api has blocked signals for us so no
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 35d92c01a972..d22d4cf08db1 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1285,14 +1285,16 @@ static void o2net_idle_timer(unsigned long data)
1285 mlog(ML_NOTICE, "here are some times that might help debug the " 1285 mlog(ML_NOTICE, "here are some times that might help debug the "
1286 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " 1286 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
1287 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", 1287 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
1288 sc->sc_tv_timer.tv_sec, sc->sc_tv_timer.tv_usec, 1288 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
1289 now.tv_sec, now.tv_usec, 1289 now.tv_sec, (long) now.tv_usec,
1290 sc->sc_tv_data_ready.tv_sec, sc->sc_tv_data_ready.tv_usec, 1290 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
1291 sc->sc_tv_advance_start.tv_sec, sc->sc_tv_advance_start.tv_usec, 1291 sc->sc_tv_advance_start.tv_sec,
1292 sc->sc_tv_advance_stop.tv_sec, sc->sc_tv_advance_stop.tv_usec, 1292 (long) sc->sc_tv_advance_start.tv_usec,
1293 sc->sc_tv_advance_stop.tv_sec,
1294 (long) sc->sc_tv_advance_stop.tv_usec,
1293 sc->sc_msg_key, sc->sc_msg_type, 1295 sc->sc_msg_key, sc->sc_msg_type,
1294 sc->sc_tv_func_start.tv_sec, sc->sc_tv_func_start.tv_usec, 1296 sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec,
1295 sc->sc_tv_func_stop.tv_sec, sc->sc_tv_func_stop.tv_usec); 1297 sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec);
1296 1298
1297 o2net_sc_queue_work(sc, &sc->sc_shutdown_work); 1299 o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
1298} 1300}
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 3fecba0a6023..42eb53b5293b 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -657,6 +657,7 @@ void dlm_complete_thread(struct dlm_ctxt *dlm);
657int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); 657int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
658void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); 658void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
659void dlm_wait_for_recovery(struct dlm_ctxt *dlm); 659void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
660int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
660 661
661void dlm_put(struct dlm_ctxt *dlm); 662void dlm_put(struct dlm_ctxt *dlm);
662struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); 663struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index da3c22045f89..6ee30837389c 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -573,8 +573,11 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data)
573 spin_lock(&dlm_domain_lock); 573 spin_lock(&dlm_domain_lock);
574 dlm = __dlm_lookup_domain_full(query->domain, query->name_len); 574 dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
575 /* Once the dlm ctxt is marked as leaving then we don't want 575 /* Once the dlm ctxt is marked as leaving then we don't want
576 * to be put in someone's domain map. */ 576 * to be put in someone's domain map.
577 * Also, explicitly disallow joining at certain troublesome
578 * times (ie. during recovery). */
577 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { 579 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
580 int bit = query->node_idx;
578 spin_lock(&dlm->spinlock); 581 spin_lock(&dlm->spinlock);
579 582
580 if (dlm->dlm_state == DLM_CTXT_NEW && 583 if (dlm->dlm_state == DLM_CTXT_NEW &&
@@ -586,6 +589,19 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data)
586 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { 589 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
587 /* Disallow parallel joins. */ 590 /* Disallow parallel joins. */
588 response = JOIN_DISALLOW; 591 response = JOIN_DISALLOW;
592 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
593 mlog(ML_NOTICE, "node %u trying to join, but recovery "
594 "is ongoing.\n", bit);
595 response = JOIN_DISALLOW;
596 } else if (test_bit(bit, dlm->recovery_map)) {
597 mlog(ML_NOTICE, "node %u trying to join, but it "
598 "still needs recovery.\n", bit);
599 response = JOIN_DISALLOW;
600 } else if (test_bit(bit, dlm->domain_map)) {
601 mlog(ML_NOTICE, "node %u trying to join, but it "
602 "is still in the domain! needs recovery?\n",
603 bit);
604 response = JOIN_DISALLOW;
589 } else { 605 } else {
590 /* Alright we're fully a part of this domain 606 /* Alright we're fully a part of this domain
591 * so we keep some state as to who's joining 607 * so we keep some state as to who's joining
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 27e984f7e4cd..a3194fe173d9 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1050,17 +1050,10 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1050 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1050 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1051 while (node >= 0) { 1051 while (node >= 0) {
1052 if (sc == NODE_UP) { 1052 if (sc == NODE_UP) {
1053 /* a node came up. easy. might not even need 1053 /* a node came up. clear any old vote from
1054 * to talk to it if its node number is higher 1054 * the response map and set it in the vote map
1055 * or if we are already blocked. */ 1055 * then restart the mastery. */
1056 mlog(0, "node up! %d\n", node); 1056 mlog(ML_NOTICE, "node %d up while restarting\n", node);
1057 if (blocked)
1058 goto next;
1059
1060 if (node > dlm->node_num) {
1061 mlog(0, "node > this node. skipping.\n");
1062 goto next;
1063 }
1064 1057
1065 /* redo the master request, but only for the new node */ 1058 /* redo the master request, but only for the new node */
1066 mlog(0, "sending request to new node\n"); 1059 mlog(0, "sending request to new node\n");
@@ -2005,6 +1998,15 @@ fail:
2005 break; 1998 break;
2006 1999
2007 mlog(0, "timed out during migration\n"); 2000 mlog(0, "timed out during migration\n");
2001 /* avoid hang during shutdown when migrating lockres
2002 * to a node which also goes down */
2003 if (dlm_is_node_dead(dlm, target)) {
2004 mlog(0, "%s:%.*s: expected migration target %u "
2005 "is no longer up. restarting.\n",
2006 dlm->name, res->lockname.len,
2007 res->lockname.name, target);
2008 ret = -ERESTARTSYS;
2009 }
2008 } 2010 }
2009 if (ret == -ERESTARTSYS) { 2011 if (ret == -ERESTARTSYS) {
2010 /* migration failed, detach and clean up mle */ 2012 /* migration failed, detach and clean up mle */
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 0c8eb1093f00..186e9a76aa58 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -39,6 +39,7 @@
39#include <linux/inet.h> 39#include <linux/inet.h>
40#include <linux/timer.h> 40#include <linux/timer.h>
41#include <linux/kthread.h> 41#include <linux/kthread.h>
42#include <linux/delay.h>
42 43
43 44
44#include "cluster/heartbeat.h" 45#include "cluster/heartbeat.h"
@@ -256,6 +257,27 @@ static int dlm_recovery_thread(void *data)
256 return 0; 257 return 0;
257} 258}
258 259
260/* returns true when the recovery master has contacted us */
261static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
262{
263 int ready;
264 spin_lock(&dlm->spinlock);
265 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
266 spin_unlock(&dlm->spinlock);
267 return ready;
268}
269
270/* returns true if node is no longer in the domain
271 * could be dead or just not joined */
272int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
273{
274 int dead;
275 spin_lock(&dlm->spinlock);
276 dead = test_bit(node, dlm->domain_map);
277 spin_unlock(&dlm->spinlock);
278 return dead;
279}
280
259/* callers of the top-level api calls (dlmlock/dlmunlock) should 281/* callers of the top-level api calls (dlmlock/dlmunlock) should
260 * block on the dlm->reco.event when recovery is in progress. 282 * block on the dlm->reco.event when recovery is in progress.
261 * the dlm recovery thread will set this state when it begins 283 * the dlm recovery thread will set this state when it begins
@@ -297,6 +319,7 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm)
297static int dlm_do_recovery(struct dlm_ctxt *dlm) 319static int dlm_do_recovery(struct dlm_ctxt *dlm)
298{ 320{
299 int status = 0; 321 int status = 0;
322 int ret;
300 323
301 spin_lock(&dlm->spinlock); 324 spin_lock(&dlm->spinlock);
302 325
@@ -343,10 +366,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
343 goto master_here; 366 goto master_here;
344 367
345 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 368 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
346 /* choose a new master */ 369 /* choose a new master, returns 0 if this node
347 if (!dlm_pick_recovery_master(dlm)) { 370 * is the master, -EEXIST if it's another node.
371 * this does not return until a new master is chosen
372 * or recovery completes entirely. */
373 ret = dlm_pick_recovery_master(dlm);
374 if (!ret) {
348 /* already notified everyone. go. */ 375 /* already notified everyone. go. */
349 dlm->reco.new_master = dlm->node_num;
350 goto master_here; 376 goto master_here;
351 } 377 }
352 mlog(0, "another node will master this recovery session.\n"); 378 mlog(0, "another node will master this recovery session.\n");
@@ -371,8 +397,13 @@ master_here:
371 if (status < 0) { 397 if (status < 0) {
372 mlog(ML_ERROR, "error %d remastering locks for node %u, " 398 mlog(ML_ERROR, "error %d remastering locks for node %u, "
373 "retrying.\n", status, dlm->reco.dead_node); 399 "retrying.\n", status, dlm->reco.dead_node);
400 /* yield a bit to allow any final network messages
401 * to get handled on remaining nodes */
402 msleep(100);
374 } else { 403 } else {
375 /* success! see if any other nodes need recovery */ 404 /* success! see if any other nodes need recovery */
405 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
406 dlm->name, dlm->reco.dead_node, dlm->node_num);
376 dlm_reset_recovery(dlm); 407 dlm_reset_recovery(dlm);
377 } 408 }
378 dlm_end_recovery(dlm); 409 dlm_end_recovery(dlm);
@@ -477,7 +508,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
477 BUG(); 508 BUG();
478 break; 509 break;
479 case DLM_RECO_NODE_DATA_DEAD: 510 case DLM_RECO_NODE_DATA_DEAD:
480 mlog(0, "node %u died after " 511 mlog(ML_NOTICE, "node %u died after "
481 "requesting recovery info for " 512 "requesting recovery info for "
482 "node %u\n", ndata->node_num, 513 "node %u\n", ndata->node_num,
483 dead_node); 514 dead_node);
@@ -485,6 +516,19 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
485 // start all over 516 // start all over
486 destroy = 1; 517 destroy = 1;
487 status = -EAGAIN; 518 status = -EAGAIN;
519 /* instead of spinning like crazy here,
520 * wait for the domain map to catch up
521 * with the network state. otherwise this
522 * can be hit hundreds of times before
523 * the node is really seen as dead. */
524 wait_event_timeout(dlm->dlm_reco_thread_wq,
525 dlm_is_node_dead(dlm,
526 ndata->node_num),
527 msecs_to_jiffies(1000));
528 mlog(0, "waited 1 sec for %u, "
529 "dead? %s\n", ndata->node_num,
530 dlm_is_node_dead(dlm, ndata->node_num) ?
531 "yes" : "no");
488 goto leave; 532 goto leave;
489 case DLM_RECO_NODE_DATA_RECEIVING: 533 case DLM_RECO_NODE_DATA_RECEIVING:
490 case DLM_RECO_NODE_DATA_REQUESTED: 534 case DLM_RECO_NODE_DATA_REQUESTED:
@@ -678,11 +722,27 @@ static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
678 dlm = item->dlm; 722 dlm = item->dlm;
679 dead_node = item->u.ral.dead_node; 723 dead_node = item->u.ral.dead_node;
680 reco_master = item->u.ral.reco_master; 724 reco_master = item->u.ral.reco_master;
725 mres = (struct dlm_migratable_lockres *)data;
726
727 if (dead_node != dlm->reco.dead_node ||
728 reco_master != dlm->reco.new_master) {
729 /* show extra debug info if the recovery state is messed */
730 mlog(ML_ERROR, "%s: bad reco state: reco(dead=%u, master=%u), "
731 "request(dead=%u, master=%u)\n",
732 dlm->name, dlm->reco.dead_node, dlm->reco.new_master,
733 dead_node, reco_master);
734 mlog(ML_ERROR, "%s: name=%.*s master=%u locks=%u/%u flags=%u "
735 "entry[0]={c=%"MLFu64",l=%u,f=%u,t=%d,ct=%d,hb=%d,n=%u}\n",
736 dlm->name, mres->lockname_len, mres->lockname, mres->master,
737 mres->num_locks, mres->total_locks, mres->flags,
738 mres->ml[0].cookie, mres->ml[0].list, mres->ml[0].flags,
739 mres->ml[0].type, mres->ml[0].convert_type,
740 mres->ml[0].highest_blocked, mres->ml[0].node);
741 BUG();
742 }
681 BUG_ON(dead_node != dlm->reco.dead_node); 743 BUG_ON(dead_node != dlm->reco.dead_node);
682 BUG_ON(reco_master != dlm->reco.new_master); 744 BUG_ON(reco_master != dlm->reco.new_master);
683 745
684 mres = (struct dlm_migratable_lockres *)data;
685
686 /* lock resources should have already been moved to the 746 /* lock resources should have already been moved to the
687 * dlm->reco.resources list. now move items from that list 747 * dlm->reco.resources list. now move items from that list
688 * to a temp list if the dead owner matches. note that the 748 * to a temp list if the dead owner matches. note that the
@@ -757,15 +817,18 @@ int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
757 continue; 817 continue;
758 818
759 switch (ndata->state) { 819 switch (ndata->state) {
820 /* should have moved beyond INIT but not to FINALIZE yet */
760 case DLM_RECO_NODE_DATA_INIT: 821 case DLM_RECO_NODE_DATA_INIT:
761 case DLM_RECO_NODE_DATA_DEAD: 822 case DLM_RECO_NODE_DATA_DEAD:
762 case DLM_RECO_NODE_DATA_DONE:
763 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 823 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
764 mlog(ML_ERROR, "bad ndata state for node %u:" 824 mlog(ML_ERROR, "bad ndata state for node %u:"
765 " state=%d\n", ndata->node_num, 825 " state=%d\n", ndata->node_num,
766 ndata->state); 826 ndata->state);
767 BUG(); 827 BUG();
768 break; 828 break;
829 /* these states are possible at this point, anywhere along
830 * the line of recovery */
831 case DLM_RECO_NODE_DATA_DONE:
769 case DLM_RECO_NODE_DATA_RECEIVING: 832 case DLM_RECO_NODE_DATA_RECEIVING:
770 case DLM_RECO_NODE_DATA_REQUESTED: 833 case DLM_RECO_NODE_DATA_REQUESTED:
771 case DLM_RECO_NODE_DATA_REQUESTING: 834 case DLM_RECO_NODE_DATA_REQUESTING:
@@ -799,13 +862,31 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
799{ 862{
800 struct dlm_lock_resource *res; 863 struct dlm_lock_resource *res;
801 struct list_head *iter, *iter2; 864 struct list_head *iter, *iter2;
865 struct dlm_lock *lock;
802 866
803 spin_lock(&dlm->spinlock); 867 spin_lock(&dlm->spinlock);
804 list_for_each_safe(iter, iter2, &dlm->reco.resources) { 868 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
805 res = list_entry (iter, struct dlm_lock_resource, recovering); 869 res = list_entry (iter, struct dlm_lock_resource, recovering);
870 /* always prune any $RECOVERY entries for dead nodes,
871 * otherwise hangs can occur during later recovery */
806 if (dlm_is_recovery_lock(res->lockname.name, 872 if (dlm_is_recovery_lock(res->lockname.name,
807 res->lockname.len)) 873 res->lockname.len)) {
874 spin_lock(&res->spinlock);
875 list_for_each_entry(lock, &res->granted, list) {
876 if (lock->ml.node == dead_node) {
877 mlog(0, "AHA! there was "
878 "a $RECOVERY lock for dead "
879 "node %u (%s)!\n",
880 dead_node, dlm->name);
881 list_del_init(&lock->list);
882 dlm_lock_put(lock);
883 break;
884 }
885 }
886 spin_unlock(&res->spinlock);
808 continue; 887 continue;
888 }
889
809 if (res->owner == dead_node) { 890 if (res->owner == dead_node) {
810 mlog(0, "found lockres owned by dead node while " 891 mlog(0, "found lockres owned by dead node while "
811 "doing recovery for node %u. sending it.\n", 892 "doing recovery for node %u. sending it.\n",
@@ -1179,7 +1260,7 @@ static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1179again: 1260again:
1180 ret = dlm_lockres_master_requery(dlm, res, &real_master); 1261 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1181 if (ret < 0) { 1262 if (ret < 0) {
1182 mlog(0, "dlm_lockres_master_requery failure: %d\n", 1263 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1183 ret); 1264 ret);
1184 goto again; 1265 goto again;
1185 } 1266 }
@@ -1757,6 +1838,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
1757 struct dlm_lock_resource *res; 1838 struct dlm_lock_resource *res;
1758 int i; 1839 int i;
1759 struct list_head *bucket; 1840 struct list_head *bucket;
1841 struct dlm_lock *lock;
1760 1842
1761 1843
1762 /* purge any stale mles */ 1844 /* purge any stale mles */
@@ -1780,10 +1862,25 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
1780 bucket = &(dlm->resources[i]); 1862 bucket = &(dlm->resources[i]);
1781 list_for_each(iter, bucket) { 1863 list_for_each(iter, bucket) {
1782 res = list_entry (iter, struct dlm_lock_resource, list); 1864 res = list_entry (iter, struct dlm_lock_resource, list);
1865 /* always prune any $RECOVERY entries for dead nodes,
1866 * otherwise hangs can occur during later recovery */
1783 if (dlm_is_recovery_lock(res->lockname.name, 1867 if (dlm_is_recovery_lock(res->lockname.name,
1784 res->lockname.len)) 1868 res->lockname.len)) {
1869 spin_lock(&res->spinlock);
1870 list_for_each_entry(lock, &res->granted, list) {
1871 if (lock->ml.node == dead_node) {
1872 mlog(0, "AHA! there was "
1873 "a $RECOVERY lock for dead "
1874 "node %u (%s)!\n",
1875 dead_node, dlm->name);
1876 list_del_init(&lock->list);
1877 dlm_lock_put(lock);
1878 break;
1879 }
1880 }
1881 spin_unlock(&res->spinlock);
1785 continue; 1882 continue;
1786 1883 }
1787 spin_lock(&res->spinlock); 1884 spin_lock(&res->spinlock);
1788 /* zero the lvb if necessary */ 1885 /* zero the lvb if necessary */
1789 dlm_revalidate_lvb(dlm, res, dead_node); 1886 dlm_revalidate_lvb(dlm, res, dead_node);
@@ -1869,12 +1966,9 @@ void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
1869 return; 1966 return;
1870 1967
1871 spin_lock(&dlm->spinlock); 1968 spin_lock(&dlm->spinlock);
1872
1873 set_bit(idx, dlm->live_nodes_map); 1969 set_bit(idx, dlm->live_nodes_map);
1874 1970 /* do NOT notify mle attached to the heartbeat events.
1875 /* notify any mles attached to the heartbeat events */ 1971 * new nodes are not interesting in mastery until joined. */
1876 dlm_hb_event_notify_attached(dlm, idx, 1);
1877
1878 spin_unlock(&dlm->spinlock); 1972 spin_unlock(&dlm->spinlock);
1879 1973
1880 dlm_put(dlm); 1974 dlm_put(dlm);
@@ -1897,7 +1991,18 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
1897 mlog(0, "unlockast for recovery lock fired!\n"); 1991 mlog(0, "unlockast for recovery lock fired!\n");
1898} 1992}
1899 1993
1900 1994/*
1995 * dlm_pick_recovery_master will continually attempt to use
1996 * dlmlock() on the special "$RECOVERY" lockres with the
1997 * LKM_NOQUEUE flag to get an EX. every thread that enters
1998 * this function on each node racing to become the recovery
1999 * master will not stop attempting this until either:
2000 * a) this node gets the EX (and becomes the recovery master),
2001 * or b) dlm->reco.new_master gets set to some nodenum
2002 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2003 * so each time a recovery master is needed, the entire cluster
2004 * will sync at this point. if the new master dies, that will
2005 * be detected in dlm_do_recovery */
1901static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) 2006static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
1902{ 2007{
1903 enum dlm_status ret; 2008 enum dlm_status ret;
@@ -1906,23 +2011,45 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
1906 2011
1907 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2012 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
1908 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2013 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
1909retry: 2014again:
1910 memset(&lksb, 0, sizeof(lksb)); 2015 memset(&lksb, 0, sizeof(lksb));
1911 2016
1912 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2017 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
1913 DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast); 2018 DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
1914 2019
2020 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2021 dlm->name, ret, lksb.status);
2022
1915 if (ret == DLM_NORMAL) { 2023 if (ret == DLM_NORMAL) {
1916 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2024 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
1917 dlm->name, dlm->node_num); 2025 dlm->name, dlm->node_num);
1918 /* I am master, send message to all nodes saying 2026
1919 * that I am beginning a recovery session */ 2027 /* got the EX lock. check to see if another node
1920 status = dlm_send_begin_reco_message(dlm, 2028 * just became the reco master */
1921 dlm->reco.dead_node); 2029 if (dlm_reco_master_ready(dlm)) {
2030 mlog(0, "%s: got reco EX lock, but %u will "
2031 "do the recovery\n", dlm->name,
2032 dlm->reco.new_master);
2033 status = -EEXIST;
2034 } else {
2035 status = dlm_send_begin_reco_message(dlm,
2036 dlm->reco.dead_node);
2037 /* this always succeeds */
2038 BUG_ON(status);
2039
2040 /* set the new_master to this node */
2041 spin_lock(&dlm->spinlock);
2042 dlm->reco.new_master = dlm->node_num;
2043 spin_unlock(&dlm->spinlock);
2044 }
1922 2045
1923 /* recovery lock is a special case. ast will not get fired, 2046 /* recovery lock is a special case. ast will not get fired,
1924 * so just go ahead and unlock it. */ 2047 * so just go ahead and unlock it. */
1925 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); 2048 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2049 if (ret == DLM_DENIED) {
2050 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2051 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2052 }
1926 if (ret != DLM_NORMAL) { 2053 if (ret != DLM_NORMAL) {
1927 /* this would really suck. this could only happen 2054 /* this would really suck. this could only happen
1928 * if there was a network error during the unlock 2055 * if there was a network error during the unlock
@@ -1930,20 +2057,42 @@ retry:
1930 * is actually "done" and the lock structure is 2057 * is actually "done" and the lock structure is
1931 * even freed. we can continue, but only 2058 * even freed. we can continue, but only
1932 * because this specific lock name is special. */ 2059 * because this specific lock name is special. */
1933 mlog(0, "dlmunlock returned %d\n", ret); 2060 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
1934 }
1935
1936 if (status < 0) {
1937 mlog(0, "failed to send recovery message. "
1938 "must retry with new node map.\n");
1939 goto retry;
1940 } 2061 }
1941 } else if (ret == DLM_NOTQUEUED) { 2062 } else if (ret == DLM_NOTQUEUED) {
1942 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2063 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
1943 dlm->name, dlm->node_num); 2064 dlm->name, dlm->node_num);
1944 /* another node is master. wait on 2065 /* another node is master. wait on
1945 * reco.new_master != O2NM_INVALID_NODE_NUM */ 2066 * reco.new_master != O2NM_INVALID_NODE_NUM
2067 * for at most one second */
2068 wait_event_timeout(dlm->dlm_reco_thread_wq,
2069 dlm_reco_master_ready(dlm),
2070 msecs_to_jiffies(1000));
2071 if (!dlm_reco_master_ready(dlm)) {
2072 mlog(0, "%s: reco master taking awhile\n",
2073 dlm->name);
2074 goto again;
2075 }
2076 /* another node has informed this one that it is reco master */
2077 mlog(0, "%s: reco master %u is ready to recover %u\n",
2078 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
1946 status = -EEXIST; 2079 status = -EEXIST;
2080 } else {
2081 struct dlm_lock_resource *res;
2082
2083 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2084 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2085 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2086 dlm_errname(lksb.status));
2087 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2088 DLM_RECOVERY_LOCK_NAME_LEN);
2089 if (res) {
2090 dlm_print_one_lock_resource(res);
2091 dlm_lockres_put(res);
2092 } else {
2093 mlog(ML_ERROR, "recovery lock not found\n");
2094 }
2095 BUG();
1947 } 2096 }
1948 2097
1949 return status; 2098 return status;
@@ -1982,7 +2131,7 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
1982 mlog(0, "not sending begin reco to self\n"); 2131 mlog(0, "not sending begin reco to self\n");
1983 continue; 2132 continue;
1984 } 2133 }
1985 2134retry:
1986 ret = -EINVAL; 2135 ret = -EINVAL;
1987 mlog(0, "attempting to send begin reco msg to %d\n", 2136 mlog(0, "attempting to send begin reco msg to %d\n",
1988 nodenum); 2137 nodenum);
@@ -1991,8 +2140,17 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
1991 /* negative status is handled ok by caller here */ 2140 /* negative status is handled ok by caller here */
1992 if (ret >= 0) 2141 if (ret >= 0)
1993 ret = status; 2142 ret = status;
2143 if (dlm_is_host_down(ret)) {
2144 /* node is down. not involved in recovery
2145 * so just keep going */
2146 mlog(0, "%s: node %u was down when sending "
2147 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2148 ret = 0;
2149 }
1994 if (ret < 0) { 2150 if (ret < 0) {
1995 struct dlm_lock_resource *res; 2151 struct dlm_lock_resource *res;
2152 /* this is now a serious problem, possibly ENOMEM
2153 * in the network stack. must retry */
1996 mlog_errno(ret); 2154 mlog_errno(ret);
1997 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2155 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
1998 " returned %d\n", dlm->name, nodenum, ret); 2156 " returned %d\n", dlm->name, nodenum, ret);
@@ -2004,7 +2162,10 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2004 } else { 2162 } else {
2005 mlog(ML_ERROR, "recovery lock not found\n"); 2163 mlog(ML_ERROR, "recovery lock not found\n");
2006 } 2164 }
2007 break; 2165 /* sleep for a bit in hopes that we can avoid
2166 * another ENOMEM */
2167 msleep(100);
2168 goto retry;
2008 } 2169 }
2009 } 2170 }
2010 2171
@@ -2027,19 +2188,34 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2027 2188
2028 spin_lock(&dlm->spinlock); 2189 spin_lock(&dlm->spinlock);
2029 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2190 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2030 mlog(0, "new_master already set to %u!\n", 2191 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2031 dlm->reco.new_master); 2192 mlog(0, "%s: new_master %u died, changing "
2193 "to %u\n", dlm->name, dlm->reco.new_master,
2194 br->node_idx);
2195 } else {
2196 mlog(0, "%s: new_master %u NOT DEAD, changing "
2197 "to %u\n", dlm->name, dlm->reco.new_master,
2198 br->node_idx);
2199 /* may not have seen the new master as dead yet */
2200 }
2032 } 2201 }
2033 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2202 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2034 mlog(0, "dead_node already set to %u!\n", 2203 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2035 dlm->reco.dead_node); 2204 "node %u changing it to %u\n", dlm->name,
2205 dlm->reco.dead_node, br->node_idx, br->dead_node);
2036 } 2206 }
2037 dlm->reco.new_master = br->node_idx; 2207 dlm->reco.new_master = br->node_idx;
2038 dlm->reco.dead_node = br->dead_node; 2208 dlm->reco.dead_node = br->dead_node;
2039 if (!test_bit(br->dead_node, dlm->recovery_map)) { 2209 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2040 mlog(ML_ERROR, "recovery master %u sees %u as dead, but this " 2210 mlog(0, "recovery master %u sees %u as dead, but this "
2041 "node has not yet. marking %u as dead\n", 2211 "node has not yet. marking %u as dead\n",
2042 br->node_idx, br->dead_node, br->dead_node); 2212 br->node_idx, br->dead_node, br->dead_node);
2213 if (!test_bit(br->dead_node, dlm->domain_map) ||
2214 !test_bit(br->dead_node, dlm->live_nodes_map))
2215 mlog(0, "%u not in domain/live_nodes map "
2216 "so setting it in reco map manually\n",
2217 br->dead_node);
2218 set_bit(br->dead_node, dlm->recovery_map);
2043 __dlm_hb_node_down(dlm, br->dead_node); 2219 __dlm_hb_node_down(dlm, br->dead_node);
2044 } 2220 }
2045 spin_unlock(&dlm->spinlock); 2221 spin_unlock(&dlm->spinlock);
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index cec2ce1cd318..c95f08d2e925 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -188,6 +188,19 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
188 actions &= ~(DLM_UNLOCK_REMOVE_LOCK| 188 actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
189 DLM_UNLOCK_REGRANT_LOCK| 189 DLM_UNLOCK_REGRANT_LOCK|
190 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 190 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
191 } else if (status == DLM_RECOVERING ||
192 status == DLM_MIGRATING ||
193 status == DLM_FORWARD) {
194 /* must clear the actions because this unlock
195 * is about to be retried. cannot free or do
196 * any list manipulation. */
197 mlog(0, "%s:%.*s: clearing actions, %s\n",
198 dlm->name, res->lockname.len,
199 res->lockname.name,
200 status==DLM_RECOVERING?"recovering":
201 (status==DLM_MIGRATING?"migrating":
202 "forward"));
203 actions = 0;
191 } 204 }
192 if (flags & LKM_CANCEL) 205 if (flags & LKM_CANCEL)
193 lock->cancel_pending = 0; 206 lock->cancel_pending = 0;
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index e1fdd288796e..c3764f4744ee 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -27,7 +27,7 @@
27 * Boston, MA 021110-1307, USA. 27 * Boston, MA 021110-1307, USA.
28 */ 28 */
29 29
30#include <asm/signal.h> 30#include <linux/signal.h>
31 31
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/fs.h> 33#include <linux/fs.h>
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index f2fb40cd296a..b6ba292e9544 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -262,8 +262,7 @@ static int ocfs2_extent_map_find_leaf(struct inode *inode,
262 el = &eb->h_list; 262 el = &eb->h_list;
263 } 263 }
264 264
265 if (el->l_tree_depth) 265 BUG_ON(el->l_tree_depth);
266 BUG();
267 266
268 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { 267 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
269 rec = &el->l_recs[i]; 268 rec = &el->l_recs[i];
@@ -364,8 +363,8 @@ static int ocfs2_extent_map_lookup_read(struct inode *inode,
364 return ret; 363 return ret;
365 } 364 }
366 365
367 if (ent->e_tree_depth) 366 /* FIXME: Make sure this isn't a corruption */
368 BUG(); /* FIXME: Make sure this isn't a corruption */ 367 BUG_ON(ent->e_tree_depth);
369 368
370 *ret_ent = ent; 369 *ret_ent = ent;
371 370
@@ -423,8 +422,7 @@ static int ocfs2_extent_map_try_insert(struct inode *inode,
423 le32_to_cpu(rec->e_clusters), NULL, 422 le32_to_cpu(rec->e_clusters), NULL,
424 NULL); 423 NULL);
425 424
426 if (!old_ent) 425 BUG_ON(!old_ent);
427 BUG();
428 426
429 ret = -EEXIST; 427 ret = -EEXIST;
430 if (old_ent->e_tree_depth < tree_depth) 428 if (old_ent->e_tree_depth < tree_depth)
@@ -988,7 +986,7 @@ int __init init_ocfs2_extent_maps(void)
988 return 0; 986 return 0;
989} 987}
990 988
991void __exit exit_ocfs2_extent_maps(void) 989void exit_ocfs2_extent_maps(void)
992{ 990{
993 kmem_cache_destroy(ocfs2_em_ent_cachep); 991 kmem_cache_destroy(ocfs2_em_ent_cachep);
994} 992}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index eaf33caa0a1f..1715bc90e705 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1022,8 +1022,9 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1022 } 1022 }
1023 newsize = count + saved_pos; 1023 newsize = count + saved_pos;
1024 1024
1025 mlog(0, "pos=%lld newsize=%"MLFu64" cursize=%lld\n", 1025 mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
1026 saved_pos, newsize, i_size_read(inode)); 1026 (long long) saved_pos, (long long) newsize,
1027 (long long) i_size_read(inode));
1027 1028
1028 /* No need for a higher level metadata lock if we're 1029 /* No need for a higher level metadata lock if we're
1029 * never going past i_size. */ 1030 * never going past i_size. */
@@ -1042,8 +1043,9 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1042 spin_unlock(&OCFS2_I(inode)->ip_lock); 1043 spin_unlock(&OCFS2_I(inode)->ip_lock);
1043 1044
1044 mlog(0, "Writing at EOF, may need more allocation: " 1045 mlog(0, "Writing at EOF, may need more allocation: "
1045 "i_size = %lld, newsize = %"MLFu64", need %u clusters\n", 1046 "i_size = %lld, newsize = %lld, need %u clusters\n",
1046 i_size_read(inode), newsize, clusters); 1047 (long long) i_size_read(inode), (long long) newsize,
1048 clusters);
1047 1049
1048 /* We only want to continue the rest of this loop if 1050 /* We only want to continue the rest of this loop if
1049 * our extend will actually require more 1051 * our extend will actually require more
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index d4ecc0627716..8122489c5762 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -903,10 +903,10 @@ void ocfs2_clear_inode(struct inode *inode)
903 "Clear inode of %"MLFu64", inode is locked\n", 903 "Clear inode of %"MLFu64", inode is locked\n",
904 oi->ip_blkno); 904 oi->ip_blkno);
905 905
906 mlog_bug_on_msg(down_trylock(&oi->ip_io_sem), 906 mlog_bug_on_msg(!mutex_trylock(&oi->ip_io_mutex),
907 "Clear inode of %"MLFu64", io_sem is locked\n", 907 "Clear inode of %"MLFu64", io_mutex is locked\n",
908 oi->ip_blkno); 908 oi->ip_blkno);
909 up(&oi->ip_io_sem); 909 mutex_unlock(&oi->ip_io_mutex);
910 910
911 /* 911 /*
912 * down_trylock() returns 0, down_write_trylock() returns 1 912 * down_trylock() returns 0, down_write_trylock() returns 1
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 9b0177433653..84c507961287 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -46,10 +46,10 @@ struct ocfs2_inode_info
46 struct list_head ip_io_markers; 46 struct list_head ip_io_markers;
47 int ip_orphaned_slot; 47 int ip_orphaned_slot;
48 48
49 struct semaphore ip_io_sem; 49 struct mutex ip_io_mutex;
50 50
51 /* Used by the journalling code to attach an inode to a 51 /* Used by the journalling code to attach an inode to a
52 * handle. These are protected by ip_io_sem in order to lock 52 * handle. These are protected by ip_io_mutex in order to lock
53 * out other I/O to the inode until we either commit or 53 * out other I/O to the inode until we either commit or
54 * abort. */ 54 * abort. */
55 struct list_head ip_handle_list; 55 struct list_head ip_handle_list;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 303c8d96457f..fa0bcac5ceae 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -147,8 +147,7 @@ struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb,
147 147
148 mlog_entry("(max_buffs = %d)\n", max_buffs); 148 mlog_entry("(max_buffs = %d)\n", max_buffs);
149 149
150 if (!osb || !osb->journal->j_journal) 150 BUG_ON(!osb || !osb->journal->j_journal);
151 BUG();
152 151
153 if (ocfs2_is_hard_readonly(osb)) { 152 if (ocfs2_is_hard_readonly(osb)) {
154 ret = -EROFS; 153 ret = -EROFS;
@@ -401,7 +400,7 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
401 * j_trans_barrier for us. */ 400 * j_trans_barrier for us. */
402 ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode); 401 ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode);
403 402
404 down(&OCFS2_I(inode)->ip_io_sem); 403 mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
405 switch (type) { 404 switch (type) {
406 case OCFS2_JOURNAL_ACCESS_CREATE: 405 case OCFS2_JOURNAL_ACCESS_CREATE:
407 case OCFS2_JOURNAL_ACCESS_WRITE: 406 case OCFS2_JOURNAL_ACCESS_WRITE:
@@ -416,7 +415,7 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
416 status = -EINVAL; 415 status = -EINVAL;
417 mlog(ML_ERROR, "Uknown access type!\n"); 416 mlog(ML_ERROR, "Uknown access type!\n");
418 } 417 }
419 up(&OCFS2_I(inode)->ip_io_sem); 418 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
420 419
421 if (status < 0) 420 if (status < 0)
422 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", 421 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
@@ -561,7 +560,11 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
561 SET_INODE_JOURNAL(inode); 560 SET_INODE_JOURNAL(inode);
562 OCFS2_I(inode)->ip_open_count++; 561 OCFS2_I(inode)->ip_open_count++;
563 562
564 status = ocfs2_meta_lock(inode, NULL, &bh, 1); 563 /* Skip recovery waits here - journal inode metadata never
564 * changes in a live cluster so it can be considered an
565 * exception to the rule. */
566 status = ocfs2_meta_lock_full(inode, NULL, &bh, 1,
567 OCFS2_META_LOCK_RECOVERY);
565 if (status < 0) { 568 if (status < 0) {
566 if (status != -ERESTARTSYS) 569 if (status != -ERESTARTSYS)
567 mlog(ML_ERROR, "Could not get lock on journal!\n"); 570 mlog(ML_ERROR, "Could not get lock on journal!\n");
@@ -672,8 +675,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
672 675
673 mlog_entry_void(); 676 mlog_entry_void();
674 677
675 if (!osb) 678 BUG_ON(!osb);
676 BUG();
677 679
678 journal = osb->journal; 680 journal = osb->journal;
679 if (!journal) 681 if (!journal)
@@ -805,8 +807,7 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
805 807
806 mlog_entry_void(); 808 mlog_entry_void();
807 809
808 if (!journal) 810 BUG_ON(!journal);
809 BUG();
810 811
811 status = journal_wipe(journal->j_journal, full); 812 status = journal_wipe(journal->j_journal, full);
812 if (status < 0) { 813 if (status < 0) {
@@ -1072,10 +1073,10 @@ restart:
1072 NULL); 1073 NULL);
1073 1074
1074bail: 1075bail:
1075 down(&osb->recovery_lock); 1076 mutex_lock(&osb->recovery_lock);
1076 if (!status && 1077 if (!status &&
1077 !ocfs2_node_map_is_empty(osb, &osb->recovery_map)) { 1078 !ocfs2_node_map_is_empty(osb, &osb->recovery_map)) {
1078 up(&osb->recovery_lock); 1079 mutex_unlock(&osb->recovery_lock);
1079 goto restart; 1080 goto restart;
1080 } 1081 }
1081 1082
@@ -1083,7 +1084,7 @@ bail:
1083 mb(); /* sync with ocfs2_recovery_thread_running */ 1084 mb(); /* sync with ocfs2_recovery_thread_running */
1084 wake_up(&osb->recovery_event); 1085 wake_up(&osb->recovery_event);
1085 1086
1086 up(&osb->recovery_lock); 1087 mutex_unlock(&osb->recovery_lock);
1087 1088
1088 mlog_exit(status); 1089 mlog_exit(status);
1089 /* no one is callint kthread_stop() for us so the kthread() api 1090 /* no one is callint kthread_stop() for us so the kthread() api
@@ -1098,7 +1099,7 @@ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1098 mlog_entry("(node_num=%d, osb->node_num = %d)\n", 1099 mlog_entry("(node_num=%d, osb->node_num = %d)\n",
1099 node_num, osb->node_num); 1100 node_num, osb->node_num);
1100 1101
1101 down(&osb->recovery_lock); 1102 mutex_lock(&osb->recovery_lock);
1102 if (osb->disable_recovery) 1103 if (osb->disable_recovery)
1103 goto out; 1104 goto out;
1104 1105
@@ -1120,7 +1121,7 @@ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1120 } 1121 }
1121 1122
1122out: 1123out:
1123 up(&osb->recovery_lock); 1124 mutex_unlock(&osb->recovery_lock);
1124 wake_up(&osb->recovery_event); 1125 wake_up(&osb->recovery_event);
1125 1126
1126 mlog_exit_void(); 1127 mlog_exit_void();
@@ -1271,8 +1272,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
1271 1272
1272 /* Should not ever be called to recover ourselves -- in that 1273 /* Should not ever be called to recover ourselves -- in that
1273 * case we should've called ocfs2_journal_load instead. */ 1274 * case we should've called ocfs2_journal_load instead. */
1274 if (osb->node_num == node_num) 1275 BUG_ON(osb->node_num == node_num);
1275 BUG();
1276 1276
1277 slot_num = ocfs2_node_num_to_slot(si, node_num); 1277 slot_num = ocfs2_node_num_to_slot(si, node_num);
1278 if (slot_num == OCFS2_INVALID_SLOT) { 1278 if (slot_num == OCFS2_INVALID_SLOT) {
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index f468c600cf92..8d8e4779df92 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -33,6 +33,7 @@
33#include <linux/rbtree.h> 33#include <linux/rbtree.h>
34#include <linux/workqueue.h> 34#include <linux/workqueue.h>
35#include <linux/kref.h> 35#include <linux/kref.h>
36#include <linux/mutex.h>
36 37
37#include "cluster/nodemanager.h" 38#include "cluster/nodemanager.h"
38#include "cluster/heartbeat.h" 39#include "cluster/heartbeat.h"
@@ -233,7 +234,7 @@ struct ocfs2_super
233 struct proc_dir_entry *proc_sub_dir; /* points to /proc/fs/ocfs2/<maj_min> */ 234 struct proc_dir_entry *proc_sub_dir; /* points to /proc/fs/ocfs2/<maj_min> */
234 235
235 atomic_t vol_state; 236 atomic_t vol_state;
236 struct semaphore recovery_lock; 237 struct mutex recovery_lock;
237 struct task_struct *recovery_thread_task; 238 struct task_struct *recovery_thread_task;
238 int disable_recovery; 239 int disable_recovery;
239 wait_queue_head_t checkpoint_event; 240 wait_queue_head_t checkpoint_event;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 364d64bd5f10..046824b6b625 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -932,7 +932,7 @@ static void ocfs2_inode_init_once(void *data,
932 oi->ip_dir_start_lookup = 0; 932 oi->ip_dir_start_lookup = 0;
933 933
934 init_rwsem(&oi->ip_alloc_sem); 934 init_rwsem(&oi->ip_alloc_sem);
935 init_MUTEX(&(oi->ip_io_sem)); 935 mutex_init(&oi->ip_io_mutex);
936 936
937 oi->ip_blkno = 0ULL; 937 oi->ip_blkno = 0ULL;
938 oi->ip_clusters = 0; 938 oi->ip_clusters = 0;
@@ -1137,9 +1137,9 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1137 1137
1138 /* disable any new recovery threads and wait for any currently 1138 /* disable any new recovery threads and wait for any currently
1139 * running ones to exit. Do this before setting the vol_state. */ 1139 * running ones to exit. Do this before setting the vol_state. */
1140 down(&osb->recovery_lock); 1140 mutex_lock(&osb->recovery_lock);
1141 osb->disable_recovery = 1; 1141 osb->disable_recovery = 1;
1142 up(&osb->recovery_lock); 1142 mutex_unlock(&osb->recovery_lock);
1143 wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb)); 1143 wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
1144 1144
1145 /* At this point, we know that no more recovery threads can be 1145 /* At this point, we know that no more recovery threads can be
@@ -1254,8 +1254,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1254 osb->sb = sb; 1254 osb->sb = sb;
1255 /* Save off for ocfs2_rw_direct */ 1255 /* Save off for ocfs2_rw_direct */
1256 osb->s_sectsize_bits = blksize_bits(sector_size); 1256 osb->s_sectsize_bits = blksize_bits(sector_size);
1257 if (!osb->s_sectsize_bits) 1257 BUG_ON(!osb->s_sectsize_bits);
1258 BUG();
1259 1258
1260 osb->net_response_ids = 0; 1259 osb->net_response_ids = 0;
1261 spin_lock_init(&osb->net_response_lock); 1260 spin_lock_init(&osb->net_response_lock);
@@ -1283,7 +1282,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1283 snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u", 1282 snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u",
1284 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); 1283 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
1285 1284
1286 init_MUTEX(&osb->recovery_lock); 1285 mutex_init(&osb->recovery_lock);
1287 1286
1288 osb->disable_recovery = 0; 1287 osb->disable_recovery = 0;
1289 osb->recovery_thread_task = NULL; 1288 osb->recovery_thread_task = NULL;
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c
index 600a8bc5b541..fc29cb7a437d 100644
--- a/fs/ocfs2/sysfile.c
+++ b/fs/ocfs2/sysfile.c
@@ -77,8 +77,7 @@ struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb,
77 if (arr && ((inode = *arr) != NULL)) { 77 if (arr && ((inode = *arr) != NULL)) {
78 /* get a ref in addition to the array ref */ 78 /* get a ref in addition to the array ref */
79 inode = igrab(inode); 79 inode = igrab(inode);
80 if (!inode) 80 BUG_ON(!inode);
81 BUG();
82 81
83 return inode; 82 return inode;
84 } 83 }
@@ -89,8 +88,7 @@ struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb,
89 /* add one more if putting into array for first time */ 88 /* add one more if putting into array for first time */
90 if (arr && inode) { 89 if (arr && inode) {
91 *arr = igrab(inode); 90 *arr = igrab(inode);
92 if (!*arr) 91 BUG_ON(!*arr);
93 BUG();
94 } 92 }
95 return inode; 93 return inode;
96} 94}
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 3a0458fd3e1b..300b5bedfb21 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -388,7 +388,7 @@ out_free:
388 } 388 }
389} 389}
390 390
391/* Item insertion is guarded by ip_io_sem, so the insertion path takes 391/* Item insertion is guarded by ip_io_mutex, so the insertion path takes
392 * advantage of this by not rechecking for a duplicate insert during 392 * advantage of this by not rechecking for a duplicate insert during
393 * the slow case. Additionally, if the cache needs to be bumped up to 393 * the slow case. Additionally, if the cache needs to be bumped up to
394 * a tree, the code will not recheck after acquiring the lock -- 394 * a tree, the code will not recheck after acquiring the lock --
@@ -418,7 +418,7 @@ void ocfs2_set_buffer_uptodate(struct inode *inode,
418 (unsigned long long) bh->b_blocknr); 418 (unsigned long long) bh->b_blocknr);
419 419
420 /* No need to recheck under spinlock - insertion is guarded by 420 /* No need to recheck under spinlock - insertion is guarded by
421 * ip_io_sem */ 421 * ip_io_mutex */
422 spin_lock(&oi->ip_lock); 422 spin_lock(&oi->ip_lock);
423 if (ocfs2_insert_can_use_array(oi, ci)) { 423 if (ocfs2_insert_can_use_array(oi, ci)) {
424 /* Fast case - it's an array and there's a free 424 /* Fast case - it's an array and there's a free
@@ -440,7 +440,7 @@ void ocfs2_set_buffer_uptodate(struct inode *inode,
440 440
441/* Called against a newly allocated buffer. Most likely nobody should 441/* Called against a newly allocated buffer. Most likely nobody should
442 * be able to read this sort of metadata while it's still being 442 * be able to read this sort of metadata while it's still being
443 * allocated, but this is careful to take ip_io_sem anyway. */ 443 * allocated, but this is careful to take ip_io_mutex anyway. */
444void ocfs2_set_new_buffer_uptodate(struct inode *inode, 444void ocfs2_set_new_buffer_uptodate(struct inode *inode,
445 struct buffer_head *bh) 445 struct buffer_head *bh)
446{ 446{
@@ -451,9 +451,9 @@ void ocfs2_set_new_buffer_uptodate(struct inode *inode,
451 451
452 set_buffer_uptodate(bh); 452 set_buffer_uptodate(bh);
453 453
454 down(&oi->ip_io_sem); 454 mutex_lock(&oi->ip_io_mutex);
455 ocfs2_set_buffer_uptodate(inode, bh); 455 ocfs2_set_buffer_uptodate(inode, bh);
456 up(&oi->ip_io_sem); 456 mutex_unlock(&oi->ip_io_mutex);
457} 457}
458 458
459/* Requires ip_lock. */ 459/* Requires ip_lock. */
@@ -537,7 +537,7 @@ int __init init_ocfs2_uptodate_cache(void)
537 return 0; 537 return 0;
538} 538}
539 539
540void __exit exit_ocfs2_uptodate_cache(void) 540void exit_ocfs2_uptodate_cache(void)
541{ 541{
542 if (ocfs2_uptodate_cachep) 542 if (ocfs2_uptodate_cachep)
543 kmem_cache_destroy(ocfs2_uptodate_cachep); 543 kmem_cache_destroy(ocfs2_uptodate_cachep);
diff --git a/fs/ocfs2/uptodate.h b/fs/ocfs2/uptodate.h
index e5aacdf4eabf..01cd32d26b06 100644
--- a/fs/ocfs2/uptodate.h
+++ b/fs/ocfs2/uptodate.h
@@ -27,7 +27,7 @@
27#define OCFS2_UPTODATE_H 27#define OCFS2_UPTODATE_H
28 28
29int __init init_ocfs2_uptodate_cache(void); 29int __init init_ocfs2_uptodate_cache(void);
30void __exit exit_ocfs2_uptodate_cache(void); 30void exit_ocfs2_uptodate_cache(void);
31 31
32void ocfs2_metadata_cache_init(struct inode *inode); 32void ocfs2_metadata_cache_init(struct inode *inode);
33void ocfs2_metadata_cache_purge(struct inode *inode); 33void ocfs2_metadata_cache_purge(struct inode *inode);
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 8f8014285a34..1d24fead51a6 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -548,7 +548,7 @@ static int show_stat(struct seq_file *p, void *v)
548 } 548 }
549 seq_printf(p, "intr %llu", (unsigned long long)sum); 549 seq_printf(p, "intr %llu", (unsigned long long)sum);
550 550
551#if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA) 551#if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
552 for (i = 0; i < NR_IRQS; i++) 552 for (i = 0; i < NR_IRQS; i++)
553 seq_printf(p, " %u", kstat_irqs(i)); 553 seq_printf(p, " %u", kstat_irqs(i));
554#endif 554#endif
diff --git a/fs/quota_v2.c b/fs/quota_v2.c
index a4ef91bb4f3b..b4199ec3ece4 100644
--- a/fs/quota_v2.c
+++ b/fs/quota_v2.c
@@ -35,7 +35,7 @@ static int v2_check_quota_file(struct super_block *sb, int type)
35 35
36 size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); 36 size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
37 if (size != sizeof(struct v2_disk_dqheader)) { 37 if (size != sizeof(struct v2_disk_dqheader)) {
38 printk("quota_v2: failed read expected=%d got=%d\n", 38 printk("quota_v2: failed read expected=%zd got=%zd\n",
39 sizeof(struct v2_disk_dqheader), size); 39 sizeof(struct v2_disk_dqheader), size);
40 return 0; 40 return 0;
41 } 41 }
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 77891de0e02e..ef5e5414e7a8 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1125,7 +1125,7 @@ static void handle_attrs(struct super_block *s)
1125 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_ATTRS); 1125 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_ATTRS);
1126 } 1126 }
1127 } else if (le32_to_cpu(rs->s_flags) & reiserfs_attrs_cleared) { 1127 } else if (le32_to_cpu(rs->s_flags) & reiserfs_attrs_cleared) {
1128 REISERFS_SB(s)->s_mount_opt |= REISERFS_ATTRS; 1128 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ATTRS);
1129 } 1129 }
1130} 1130}
1131 1131
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 4fae57d9d115..201049ac8a96 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -579,10 +579,9 @@ static void udf_table_free_blocks(struct super_block * sb,
579 { 579 {
580 loffset = nextoffset; 580 loffset = nextoffset;
581 aed->lengthAllocDescs = cpu_to_le32(adsize); 581 aed->lengthAllocDescs = cpu_to_le32(adsize);
582 if (obh) 582 sptr = UDF_I_DATA(inode) + nextoffset -
583 sptr = UDF_I_DATA(inode) + nextoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode) - adsize; 583 udf_file_entry_alloc_offset(inode) +
584 else 584 UDF_I_LENEATTR(inode) - adsize;
585 sptr = obh->b_data + nextoffset - adsize;
586 dptr = nbh->b_data + sizeof(struct allocExtDesc); 585 dptr = nbh->b_data + sizeof(struct allocExtDesc);
587 memcpy(dptr, sptr, adsize); 586 memcpy(dptr, sptr, adsize);
588 nextoffset = sizeof(struct allocExtDesc) + adsize; 587 nextoffset = sizeof(struct allocExtDesc) + adsize;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index ca732e79c48b..ab9a7629d23e 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -296,7 +296,7 @@ static struct dentry *
296udf_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 296udf_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
297{ 297{
298 struct inode *inode = NULL; 298 struct inode *inode = NULL;
299 struct fileIdentDesc cfi, *fi; 299 struct fileIdentDesc cfi;
300 struct udf_fileident_bh fibh; 300 struct udf_fileident_bh fibh;
301 301
302 if (dentry->d_name.len > UDF_NAME_LEN-2) 302 if (dentry->d_name.len > UDF_NAME_LEN-2)
@@ -318,7 +318,7 @@ udf_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
318 else 318 else
319#endif /* UDF_RECOVERY */ 319#endif /* UDF_RECOVERY */
320 320
321 if ((fi = udf_find_entry(dir, dentry, &fibh, &cfi))) 321 if (udf_find_entry(dir, dentry, &fibh, &cfi))
322 { 322 {
323 if (fibh.sbh != fibh.ebh) 323 if (fibh.sbh != fibh.ebh)
324 udf_release_data(fibh.ebh); 324 udf_release_data(fibh.ebh);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index e0c04e36a051..3c3f62ce2ad9 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -376,7 +376,7 @@ out:
376 * This function gets the block which contains the fragment. 376 * This function gets the block which contains the fragment.
377 */ 377 */
378 378
379static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 379int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
380{ 380{
381 struct super_block * sb = inode->i_sb; 381 struct super_block * sb = inode->i_sb;
382 struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi; 382 struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index d4aacee593ff..e9055ef7f5ac 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -388,7 +388,8 @@ static int ufs_parse_options (char * options, unsigned * mount_options)
388/* 388/*
389 * Read on-disk structures associated with cylinder groups 389 * Read on-disk structures associated with cylinder groups
390 */ 390 */
391static int ufs_read_cylinder_structures (struct super_block *sb) { 391static int ufs_read_cylinder_structures (struct super_block *sb)
392{
392 struct ufs_sb_info * sbi = UFS_SB(sb); 393 struct ufs_sb_info * sbi = UFS_SB(sb);
393 struct ufs_sb_private_info * uspi; 394 struct ufs_sb_private_info * uspi;
394 struct ufs_super_block *usb; 395 struct ufs_super_block *usb;
@@ -415,6 +416,7 @@ static int ufs_read_cylinder_structures (struct super_block *sb) {
415 base = space = kmalloc(size, GFP_KERNEL); 416 base = space = kmalloc(size, GFP_KERNEL);
416 if (!base) 417 if (!base)
417 goto failed; 418 goto failed;
419 sbi->s_csp = (struct ufs_csum *)space;
418 for (i = 0; i < blks; i += uspi->s_fpb) { 420 for (i = 0; i < blks; i += uspi->s_fpb) {
419 size = uspi->s_bsize; 421 size = uspi->s_bsize;
420 if (i + uspi->s_fpb > blks) 422 if (i + uspi->s_fpb > blks)
@@ -430,7 +432,6 @@ static int ufs_read_cylinder_structures (struct super_block *sb) {
430 goto failed; 432 goto failed;
431 433
432 ubh_ubhcpymem (space, ubh, size); 434 ubh_ubhcpymem (space, ubh, size);
433 sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
434 435
435 space += size; 436 space += size;
436 ubh_brelse (ubh); 437 ubh_brelse (ubh);
@@ -486,7 +487,8 @@ failed:
486 * Put on-disk structures associated with cylinder groups and 487 * Put on-disk structures associated with cylinder groups and
487 * write them back to disk 488 * write them back to disk
488 */ 489 */
489static void ufs_put_cylinder_structures (struct super_block *sb) { 490static void ufs_put_cylinder_structures (struct super_block *sb)
491{
490 struct ufs_sb_info * sbi = UFS_SB(sb); 492 struct ufs_sb_info * sbi = UFS_SB(sb);
491 struct ufs_sb_private_info * uspi; 493 struct ufs_sb_private_info * uspi;
492 struct ufs_buffer_head * ubh; 494 struct ufs_buffer_head * ubh;
@@ -499,7 +501,7 @@ static void ufs_put_cylinder_structures (struct super_block *sb) {
499 501
500 size = uspi->s_cssize; 502 size = uspi->s_cssize;
501 blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift; 503 blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
502 base = space = (char*) sbi->s_csp[0]; 504 base = space = (char*) sbi->s_csp;
503 for (i = 0; i < blks; i += uspi->s_fpb) { 505 for (i = 0; i < blks; i += uspi->s_fpb) {
504 size = uspi->s_bsize; 506 size = uspi->s_bsize;
505 if (i + uspi->s_fpb > blks) 507 if (i + uspi->s_fpb > blks)
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 61d2e35012a4..02e86291ef8a 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -29,6 +29,11 @@
29 * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr> 29 * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr>
30 */ 30 */
31 31
32/*
33 * Modified to avoid infinite loop on 2006 by
34 * Evgeniy Dushistov <dushistov@mail.ru>
35 */
36
32#include <linux/errno.h> 37#include <linux/errno.h>
33#include <linux/fs.h> 38#include <linux/fs.h>
34#include <linux/ufs_fs.h> 39#include <linux/ufs_fs.h>
@@ -65,19 +70,16 @@
65#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) 70#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
66#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 71#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
67 72
68#define DATA_BUFFER_USED(bh) \
69 (atomic_read(&bh->b_count)>1 || buffer_locked(bh))
70 73
71static int ufs_trunc_direct (struct inode * inode) 74static int ufs_trunc_direct (struct inode * inode)
72{ 75{
73 struct ufs_inode_info *ufsi = UFS_I(inode); 76 struct ufs_inode_info *ufsi = UFS_I(inode);
74 struct super_block * sb; 77 struct super_block * sb;
75 struct ufs_sb_private_info * uspi; 78 struct ufs_sb_private_info * uspi;
76 struct buffer_head * bh;
77 __fs32 * p; 79 __fs32 * p;
78 unsigned frag1, frag2, frag3, frag4, block1, block2; 80 unsigned frag1, frag2, frag3, frag4, block1, block2;
79 unsigned frag_to_free, free_count; 81 unsigned frag_to_free, free_count;
80 unsigned i, j, tmp; 82 unsigned i, tmp;
81 int retry; 83 int retry;
82 84
83 UFSD(("ENTER\n")) 85 UFSD(("ENTER\n"))
@@ -117,15 +119,7 @@ static int ufs_trunc_direct (struct inode * inode)
117 ufs_panic (sb, "ufs_trunc_direct", "internal error"); 119 ufs_panic (sb, "ufs_trunc_direct", "internal error");
118 frag1 = ufs_fragnum (frag1); 120 frag1 = ufs_fragnum (frag1);
119 frag2 = ufs_fragnum (frag2); 121 frag2 = ufs_fragnum (frag2);
120 for (j = frag1; j < frag2; j++) { 122
121 bh = sb_find_get_block (sb, tmp + j);
122 if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
123 retry = 1;
124 brelse (bh);
125 goto next1;
126 }
127 bforget (bh);
128 }
129 inode->i_blocks -= (frag2-frag1) << uspi->s_nspfshift; 123 inode->i_blocks -= (frag2-frag1) << uspi->s_nspfshift;
130 mark_inode_dirty(inode); 124 mark_inode_dirty(inode);
131 ufs_free_fragments (inode, tmp + frag1, frag2 - frag1); 125 ufs_free_fragments (inode, tmp + frag1, frag2 - frag1);
@@ -140,15 +134,7 @@ next1:
140 tmp = fs32_to_cpu(sb, *p); 134 tmp = fs32_to_cpu(sb, *p);
141 if (!tmp) 135 if (!tmp)
142 continue; 136 continue;
143 for (j = 0; j < uspi->s_fpb; j++) { 137
144 bh = sb_find_get_block(sb, tmp + j);
145 if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
146 retry = 1;
147 brelse (bh);
148 goto next2;
149 }
150 bforget (bh);
151 }
152 *p = 0; 138 *p = 0;
153 inode->i_blocks -= uspi->s_nspb; 139 inode->i_blocks -= uspi->s_nspb;
154 mark_inode_dirty(inode); 140 mark_inode_dirty(inode);
@@ -162,7 +148,6 @@ next1:
162 frag_to_free = tmp; 148 frag_to_free = tmp;
163 free_count = uspi->s_fpb; 149 free_count = uspi->s_fpb;
164 } 150 }
165next2:;
166 } 151 }
167 152
168 if (free_count > 0) 153 if (free_count > 0)
@@ -179,15 +164,7 @@ next2:;
179 if (!tmp ) 164 if (!tmp )
180 ufs_panic(sb, "ufs_truncate_direct", "internal error"); 165 ufs_panic(sb, "ufs_truncate_direct", "internal error");
181 frag4 = ufs_fragnum (frag4); 166 frag4 = ufs_fragnum (frag4);
182 for (j = 0; j < frag4; j++) { 167
183 bh = sb_find_get_block (sb, tmp + j);
184 if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
185 retry = 1;
186 brelse (bh);
187 goto next1;
188 }
189 bforget (bh);
190 }
191 *p = 0; 168 *p = 0;
192 inode->i_blocks -= frag4 << uspi->s_nspfshift; 169 inode->i_blocks -= frag4 << uspi->s_nspfshift;
193 mark_inode_dirty(inode); 170 mark_inode_dirty(inode);
@@ -204,9 +181,8 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
204 struct super_block * sb; 181 struct super_block * sb;
205 struct ufs_sb_private_info * uspi; 182 struct ufs_sb_private_info * uspi;
206 struct ufs_buffer_head * ind_ubh; 183 struct ufs_buffer_head * ind_ubh;
207 struct buffer_head * bh;
208 __fs32 * ind; 184 __fs32 * ind;
209 unsigned indirect_block, i, j, tmp; 185 unsigned indirect_block, i, tmp;
210 unsigned frag_to_free, free_count; 186 unsigned frag_to_free, free_count;
211 int retry; 187 int retry;
212 188
@@ -238,15 +214,7 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
238 tmp = fs32_to_cpu(sb, *ind); 214 tmp = fs32_to_cpu(sb, *ind);
239 if (!tmp) 215 if (!tmp)
240 continue; 216 continue;
241 for (j = 0; j < uspi->s_fpb; j++) { 217
242 bh = sb_find_get_block(sb, tmp + j);
243 if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *ind)) {
244 retry = 1;
245 brelse (bh);
246 goto next;
247 }
248 bforget (bh);
249 }
250 *ind = 0; 218 *ind = 0;
251 ubh_mark_buffer_dirty(ind_ubh); 219 ubh_mark_buffer_dirty(ind_ubh);
252 if (free_count == 0) { 220 if (free_count == 0) {
@@ -261,7 +229,6 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
261 } 229 }
262 inode->i_blocks -= uspi->s_nspb; 230 inode->i_blocks -= uspi->s_nspb;
263 mark_inode_dirty(inode); 231 mark_inode_dirty(inode);
264next:;
265 } 232 }
266 233
267 if (free_count > 0) { 234 if (free_count > 0) {
@@ -430,9 +397,7 @@ void ufs_truncate (struct inode * inode)
430 struct ufs_inode_info *ufsi = UFS_I(inode); 397 struct ufs_inode_info *ufsi = UFS_I(inode);
431 struct super_block * sb; 398 struct super_block * sb;
432 struct ufs_sb_private_info * uspi; 399 struct ufs_sb_private_info * uspi;
433 struct buffer_head * bh; 400 int retry;
434 unsigned offset;
435 int err, retry;
436 401
437 UFSD(("ENTER\n")) 402 UFSD(("ENTER\n"))
438 sb = inode->i_sb; 403 sb = inode->i_sb;
@@ -442,6 +407,9 @@ void ufs_truncate (struct inode * inode)
442 return; 407 return;
443 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 408 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
444 return; 409 return;
410
411 block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block);
412
445 lock_kernel(); 413 lock_kernel();
446 while (1) { 414 while (1) {
447 retry = ufs_trunc_direct(inode); 415 retry = ufs_trunc_direct(inode);
@@ -457,15 +425,7 @@ void ufs_truncate (struct inode * inode)
457 blk_run_address_space(inode->i_mapping); 425 blk_run_address_space(inode->i_mapping);
458 yield(); 426 yield();
459 } 427 }
460 offset = inode->i_size & uspi->s_fshift; 428
461 if (offset) {
462 bh = ufs_bread (inode, inode->i_size >> uspi->s_fshift, 0, &err);
463 if (bh) {
464 memset (bh->b_data + offset, 0, uspi->s_fsize - offset);
465 mark_buffer_dirty (bh);
466 brelse (bh);
467 }
468 }
469 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 429 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
470 ufsi->i_lastfrag = DIRECT_FRAGMENT; 430 ufsi->i_lastfrag = DIRECT_FRAGMENT;
471 unlock_kernel(); 431 unlock_kernel();
diff --git a/include/asm-arm/arch-s3c2410/hardware.h b/include/asm-arm/arch-s3c2410/hardware.h
index 1c9de29cafef..a2330bf83695 100644
--- a/include/asm-arm/arch-s3c2410/hardware.h
+++ b/include/asm-arm/arch-s3c2410/hardware.h
@@ -17,6 +17,7 @@
17 * 14-Sep-2004 BJD Added misccr and getpin to gpio 17 * 14-Sep-2004 BJD Added misccr and getpin to gpio
18 * 01-Oct-2004 BJD Added the new gpio functions 18 * 01-Oct-2004 BJD Added the new gpio functions
19 * 16-Oct-2004 BJD Removed the clock variables 19 * 16-Oct-2004 BJD Removed the clock variables
20 * 15-Jan-2006 LCVR Added s3c2400_gpio_getirq()
20*/ 21*/
21 22
22#ifndef __ASM_ARCH_HARDWARE_H 23#ifndef __ASM_ARCH_HARDWARE_H
@@ -55,6 +56,12 @@ extern unsigned int s3c2410_gpio_getcfg(unsigned int pin);
55 56
56extern int s3c2410_gpio_getirq(unsigned int pin); 57extern int s3c2410_gpio_getirq(unsigned int pin);
57 58
59#ifdef CONFIG_CPU_S3C2400
60
61extern int s3c2400_gpio_getirq(unsigned int pin);
62
63#endif /* CONFIG_CPU_S3C2400 */
64
58/* s3c2410_gpio_irqfilter 65/* s3c2410_gpio_irqfilter
59 * 66 *
60 * set the irq filtering on the given pin 67 * set the irq filtering on the given pin
diff --git a/include/asm-arm/arch-s3c2410/regs-gpio.h b/include/asm-arm/arch-s3c2410/regs-gpio.h
index 7f1be48ad67e..9697f93afe74 100644
--- a/include/asm-arm/arch-s3c2410/regs-gpio.h
+++ b/include/asm-arm/arch-s3c2410/regs-gpio.h
@@ -22,6 +22,7 @@
22 * 28-Mar-2005 LCVR Fixed definition of GPB10 22 * 28-Mar-2005 LCVR Fixed definition of GPB10
23 * 26-Oct-2005 BJD Added generic configuration types 23 * 26-Oct-2005 BJD Added generic configuration types
24 * 27-Nov-2005 LCVR Added definitions to S3C2400 registers 24 * 27-Nov-2005 LCVR Added definitions to S3C2400 registers
25 * 15-Jan-2006 LCVR Written S3C24XX_GPIO_BASE() macro
25*/ 26*/
26 27
27 28
@@ -39,6 +40,27 @@
39#define S3C2410_GPIO_BANKG (32*6) 40#define S3C2410_GPIO_BANKG (32*6)
40#define S3C2410_GPIO_BANKH (32*7) 41#define S3C2410_GPIO_BANKH (32*7)
41 42
43#ifdef CONFIG_CPU_S3C2400
44#define S3C24XX_GPIO_BASE(x) S3C2400_GPIO_BASE(x)
45#define S3C24XX_MISCCR S3C2400_MISCCR
46#else
47#define S3C24XX_GPIO_BASE(x) S3C2410_GPIO_BASE(x)
48#define S3C24XX_MISCCR S3C2410_MISCCR
49#endif /* CONFIG_CPU_S3C2400 */
50
51
52/* S3C2400 doesn't have a 1:1 mapping to S3C2410 gpio base pins */
53
54#define S3C2400_BANKNUM(pin) (((pin) & ~31) / 32)
55#define S3C2400_BASEA2B(pin) ((((pin) & ~31) >> 2))
56#define S3C2400_BASEC2H(pin) ((S3C2400_BANKNUM(pin) * 10) + \
57 (2 * (S3C2400_BANKNUM(pin)-2)))
58
59#define S3C2400_GPIO_BASE(pin) (pin < S3C2410_GPIO_BANKC ? \
60 S3C2400_BASEA2B(pin)+S3C24XX_VA_GPIO : \
61 S3C2400_BASEC2H(pin)+S3C24XX_VA_GPIO)
62
63
42#define S3C2410_GPIO_BASE(pin) ((((pin) & ~31) >> 1) + S3C24XX_VA_GPIO) 64#define S3C2410_GPIO_BASE(pin) ((((pin) & ~31) >> 1) + S3C24XX_VA_GPIO)
43#define S3C2410_GPIO_OFFSET(pin) ((pin) & 31) 65#define S3C2410_GPIO_OFFSET(pin) ((pin) & 31)
44 66
diff --git a/include/asm-arm/checksum.h b/include/asm-arm/checksum.h
index d4256d5f3a7c..747bdd31a74b 100644
--- a/include/asm-arm/checksum.h
+++ b/include/asm-arm/checksum.h
@@ -77,7 +77,7 @@ ip_fast_csum(unsigned char * iph, unsigned int ihl)
77 mov %0, %0, lsr #16" 77 mov %0, %0, lsr #16"
78 : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1) 78 : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
79 : "1" (iph), "2" (ihl) 79 : "1" (iph), "2" (ihl)
80 : "cc"); 80 : "cc", "memory");
81 return sum; 81 return sum;
82} 82}
83 83
diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h
index d3eb0f1e4208..b7fef1572dc0 100644
--- a/include/asm-cris/bitops.h
+++ b/include/asm-cris/bitops.h
@@ -290,7 +290,7 @@ static inline int find_next_zero_bit (const unsigned long * addr, int size, int
290 tmp = *p; 290 tmp = *p;
291 291
292 found_first: 292 found_first:
293 tmp |= ~0UL >> size; 293 tmp |= ~0UL << size;
294 found_middle: 294 found_middle:
295 return result + ffz(tmp); 295 return result + ffz(tmp);
296} 296}
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h
index 02be7b3a8a83..f686b519878e 100644
--- a/include/asm-frv/bitops.h
+++ b/include/asm-frv/bitops.h
@@ -209,7 +209,7 @@ static inline int find_next_zero_bit(const void *addr, int size, int offset)
209 tmp = *p; 209 tmp = *p;
210 210
211found_first: 211found_first:
212 tmp |= ~0UL >> size; 212 tmp |= ~0UL << size;
213found_middle: 213found_middle:
214 return result + ffz(tmp); 214 return result + ffz(tmp);
215} 215}
diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h
index c0411ec9d651..ff7c2b721594 100644
--- a/include/asm-h8300/bitops.h
+++ b/include/asm-h8300/bitops.h
@@ -227,7 +227,7 @@ static __inline__ int find_next_zero_bit (const unsigned long * addr, int size,
227 tmp = *p; 227 tmp = *p;
228 228
229found_first: 229found_first:
230 tmp |= ~0UL >> size; 230 tmp |= ~0UL << size;
231found_middle: 231found_middle:
232 return result + ffz(tmp); 232 return result + ffz(tmp);
233} 233}
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 36a92ed6a9d0..399145a247f2 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -507,7 +507,7 @@ struct alt_instr {
507#define smp_rmb() rmb() 507#define smp_rmb() rmb()
508#define smp_wmb() wmb() 508#define smp_wmb() wmb()
509#define smp_read_barrier_depends() read_barrier_depends() 509#define smp_read_barrier_depends() read_barrier_depends()
510#define set_mb(var, value) do { xchg(&var, value); } while (0) 510#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
511#else 511#else
512#define smp_mb() barrier() 512#define smp_mb() barrier()
513#define smp_rmb() barrier() 513#define smp_rmb() barrier()
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index d7e19eb344b7..af503a122b23 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -27,6 +27,15 @@
27#ifndef _ASM_I386_TOPOLOGY_H 27#ifndef _ASM_I386_TOPOLOGY_H
28#define _ASM_I386_TOPOLOGY_H 28#define _ASM_I386_TOPOLOGY_H
29 29
30#ifdef CONFIG_SMP
31#define topology_physical_package_id(cpu) \
32 (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu])
33#define topology_core_id(cpu) \
34 (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
35#define topology_core_siblings(cpu) (cpu_core_map[cpu])
36#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
37#endif
38
30#ifdef CONFIG_NUMA 39#ifdef CONFIG_NUMA
31 40
32#include <asm/mpspec.h> 41#include <asm/mpspec.h>
diff --git a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h
index e62b95301d51..93f45c5f189f 100644
--- a/include/asm-ia64/ide.h
+++ b/include/asm-ia64/ide.h
@@ -17,14 +17,6 @@
17 17
18#include <linux/irq.h> 18#include <linux/irq.h>
19 19
20#ifndef MAX_HWIFS
21# ifdef CONFIG_PCI
22#define MAX_HWIFS 10
23# else
24#define MAX_HWIFS 6
25# endif
26#endif
27
28#define IDE_ARCH_OBSOLETE_DEFAULTS 20#define IDE_ARCH_OBSOLETE_DEFAULTS
29 21
30static inline int ide_default_irq(unsigned long base) 22static inline int ide_default_irq(unsigned long base)
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index 412ef8e493a8..3ee19dfa46df 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -102,6 +102,13 @@ void build_cpu_to_node_map(void);
102 102
103#endif /* CONFIG_NUMA */ 103#endif /* CONFIG_NUMA */
104 104
105#ifdef CONFIG_SMP
106#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
107#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
108#define topology_core_siblings(cpu) (cpu_core_map[cpu])
109#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
110#endif
111
105#include <asm-generic/topology.h> 112#include <asm-generic/topology.h>
106 113
107#endif /* _ASM_IA64_TOPOLOGY_H */ 114#endif /* _ASM_IA64_TOPOLOGY_H */
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 2ca56d34aaad..4dc7253ff5d0 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -1,9 +1,13 @@
1/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3 */
4
1#ifndef _ASM_PARISC_ATOMIC_H_ 5#ifndef _ASM_PARISC_ATOMIC_H_
2#define _ASM_PARISC_ATOMIC_H_ 6#define _ASM_PARISC_ATOMIC_H_
3 7
4#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/types.h>
5#include <asm/system.h> 10#include <asm/system.h>
6/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. */
7 11
8/* 12/*
9 * Atomic operations that C can't guarantee us. Useful for 13 * Atomic operations that C can't guarantee us. Useful for
@@ -46,15 +50,6 @@ extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
46# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) 50# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
47#endif 51#endif
48 52
49/* Note that we need not lock read accesses - aligned word writes/reads
50 * are atomic, so a reader never sees unconsistent values.
51 *
52 * Cache-line alignment would conflict with, for example, linux/module.h
53 */
54
55typedef struct { volatile int counter; } atomic_t;
56
57
58/* This should get optimized out since it's never called. 53/* This should get optimized out since it's never called.
59** Or get a link error if xchg is used "wrong". 54** Or get a link error if xchg is used "wrong".
60*/ 55*/
@@ -69,10 +64,9 @@ extern unsigned long __xchg64(unsigned long, unsigned long *);
69#endif 64#endif
70 65
71/* optimizer better get rid of switch since size is a constant */ 66/* optimizer better get rid of switch since size is a constant */
72static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, 67static __inline__ unsigned long
73 int size) 68__xchg(unsigned long x, __volatile__ void * ptr, int size)
74{ 69{
75
76 switch(size) { 70 switch(size) {
77#ifdef __LP64__ 71#ifdef __LP64__
78 case 8: return __xchg64(x,(unsigned long *) ptr); 72 case 8: return __xchg64(x,(unsigned long *) ptr);
@@ -129,7 +123,13 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
129 (unsigned long)_n_, sizeof(*(ptr))); \ 123 (unsigned long)_n_, sizeof(*(ptr))); \
130 }) 124 })
131 125
126/* Note that we need not lock read accesses - aligned word writes/reads
127 * are atomic, so a reader never sees unconsistent values.
128 *
129 * Cache-line alignment would conflict with, for example, linux/module.h
130 */
132 131
132typedef struct { volatile int counter; } atomic_t;
133 133
134/* It's possible to reduce all atomic operations to either 134/* It's possible to reduce all atomic operations to either
135 * __atomic_add_return, atomic_set and atomic_read (the latter 135 * __atomic_add_return, atomic_set and atomic_read (the latter
@@ -210,12 +210,66 @@ static __inline__ int atomic_read(const atomic_t *v)
210 210
211#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 211#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
212 212
213#define ATOMIC_INIT(i) { (i) } 213#define ATOMIC_INIT(i) ((atomic_t) { (i) })
214 214
215#define smp_mb__before_atomic_dec() smp_mb() 215#define smp_mb__before_atomic_dec() smp_mb()
216#define smp_mb__after_atomic_dec() smp_mb() 216#define smp_mb__after_atomic_dec() smp_mb()
217#define smp_mb__before_atomic_inc() smp_mb() 217#define smp_mb__before_atomic_inc() smp_mb()
218#define smp_mb__after_atomic_inc() smp_mb() 218#define smp_mb__after_atomic_inc() smp_mb()
219 219
220#ifdef __LP64__
221
222typedef struct { volatile s64 counter; } atomic64_t;
223
224#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
225
226static __inline__ int
227__atomic64_add_return(s64 i, atomic64_t *v)
228{
229 int ret;
230 unsigned long flags;
231 _atomic_spin_lock_irqsave(v, flags);
232
233 ret = (v->counter += i);
234
235 _atomic_spin_unlock_irqrestore(v, flags);
236 return ret;
237}
238
239static __inline__ void
240atomic64_set(atomic64_t *v, s64 i)
241{
242 unsigned long flags;
243 _atomic_spin_lock_irqsave(v, flags);
244
245 v->counter = i;
246
247 _atomic_spin_unlock_irqrestore(v, flags);
248}
249
250static __inline__ s64
251atomic64_read(const atomic64_t *v)
252{
253 return v->counter;
254}
255
256#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v))))
257#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v))))
258#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
259#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
260
261#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v)))
262#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v)))
263#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
264#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
265
266#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
267
268#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
269#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
270
271#endif /* __LP64__ */
272
220#include <asm-generic/atomic.h> 273#include <asm-generic/atomic.h>
221#endif 274
275#endif /* _ASM_PARISC_ATOMIC_H_ */
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index 1bc3c83ee74b..c53af9ff41b5 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -183,4 +183,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
183 __flush_cache_page(vma, vmaddr); 183 __flush_cache_page(vma, vmaddr);
184 184
185} 185}
186
187#ifdef CONFIG_DEBUG_RODATA
188void mark_rodata_ro(void);
186#endif 189#endif
190
191#endif /* _PARISC_CACHEFLUSH_H */
192
diff --git a/include/asm-parisc/compat_ucontext.h b/include/asm-parisc/compat_ucontext.h
index a1228a3d2071..2f7292afde3c 100644
--- a/include/asm-parisc/compat_ucontext.h
+++ b/include/asm-parisc/compat_ucontext.h
@@ -1,8 +1,7 @@
1#ifndef _ASM_PARISC_COMPAT_UCONTEXT_H 1#ifndef _ASM_PARISC_COMPAT_UCONTEXT_H
2#define _ASM_PARISC_COMPAT_UCONTEXT_H 2#define _ASM_PARISC_COMPAT_UCONTEXT_H
3 3
4#include<linux/compat.h> 4#include <linux/compat.h>
5#include<asm/compat_signal.h>
6 5
7/* 32-bit ucontext as seen from an 64-bit kernel */ 6/* 32-bit ucontext as seen from an 64-bit kernel */
8struct compat_ucontext { 7struct compat_ucontext {
diff --git a/include/asm-parisc/grfioctl.h b/include/asm-parisc/grfioctl.h
index 6a910311b56b..671e06042b40 100644
--- a/include/asm-parisc/grfioctl.h
+++ b/include/asm-parisc/grfioctl.h
@@ -58,7 +58,7 @@
58#define CRT_ID_ELK_1024DB 0x27849CA5 /* Elk 1024x768 double buffer */ 58#define CRT_ID_ELK_1024DB 0x27849CA5 /* Elk 1024x768 double buffer */
59#define CRT_ID_ELK_GS S9000_ID_A1924A /* Elk 1280x1024 GreyScale */ 59#define CRT_ID_ELK_GS S9000_ID_A1924A /* Elk 1280x1024 GreyScale */
60#define CRT_ID_CRX24 S9000_ID_A1439A /* Piranha */ 60#define CRT_ID_CRX24 S9000_ID_A1439A /* Piranha */
61#define CRT_ID_VISUALIZE_EG 0x2D08C0A7 /* Graffiti (built-in B132+/B160L) */ 61#define CRT_ID_VISUALIZE_EG 0x2D08C0A7 /* Graffiti, A4450A (built-in B132+/B160L) */
62#define CRT_ID_THUNDER 0x2F23E5FC /* Thunder 1 VISUALIZE 48*/ 62#define CRT_ID_THUNDER 0x2F23E5FC /* Thunder 1 VISUALIZE 48*/
63#define CRT_ID_THUNDER2 0x2F8D570E /* Thunder 2 VISUALIZE 48 XP*/ 63#define CRT_ID_THUNDER2 0x2F8D570E /* Thunder 2 VISUALIZE 48 XP*/
64#define CRT_ID_HCRX S9000_ID_HCRX /* Hyperdrive HCRX */ 64#define CRT_ID_HCRX S9000_ID_HCRX /* Hyperdrive HCRX */
diff --git a/include/asm-parisc/pci.h b/include/asm-parisc/pci.h
index f277254159b7..fe7f6a2f5aa7 100644
--- a/include/asm-parisc/pci.h
+++ b/include/asm-parisc/pci.h
@@ -18,6 +18,18 @@
18*/ 18*/
19#define PCI_MAX_BUSSES 256 19#define PCI_MAX_BUSSES 256
20 20
21
22/* To be used as: mdelay(pci_post_reset_delay);
23 *
24 * post_reset is the time the kernel should stall to prevent anyone from
25 * accessing the PCI bus once #RESET is de-asserted.
26 * PCI spec somewhere says 1 second but with multi-PCI bus systems,
27 * this makes the boot time much longer than necessary.
28 * 20ms seems to work for all the HP PCI implementations to date.
29 */
30#define pci_post_reset_delay 50
31
32
21/* 33/*
22** pci_hba_data (aka H2P_OBJECT in HP/UX) 34** pci_hba_data (aka H2P_OBJECT in HP/UX)
23** 35**
@@ -83,7 +95,7 @@ static __inline__ int pci_is_lmmio(struct pci_hba_data *hba, unsigned long a)
83 95
84/* 96/*
85** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses. 97** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses.
86** See pcibios.c for more conversions used by Generic PCI code. 98** See pci.c for more conversions used by Generic PCI code.
87** 99**
88** Platform characteristics/firmware guarantee that 100** Platform characteristics/firmware guarantee that
89** (1) PA_VIEW - IO_VIEW = lmmio_offset for both LMMIO and ELMMIO 101** (1) PA_VIEW - IO_VIEW = lmmio_offset for both LMMIO and ELMMIO
@@ -191,9 +203,6 @@ struct pci_bios_ops {
191*/ 203*/
192extern struct pci_port_ops *pci_port; 204extern struct pci_port_ops *pci_port;
193extern struct pci_bios_ops *pci_bios; 205extern struct pci_bios_ops *pci_bios;
194extern int pci_post_reset_delay; /* delay after de-asserting #RESET */
195extern int pci_hba_count;
196extern struct pci_hba_data *parisc_pci_hba[];
197 206
198#ifdef CONFIG_PCI 207#ifdef CONFIG_PCI
199extern void pcibios_register_hba(struct pci_hba_data *); 208extern void pcibios_register_hba(struct pci_hba_data *);
diff --git a/include/asm-parisc/pgalloc.h b/include/asm-parisc/pgalloc.h
index 6291d6692e5d..3122fad38a1b 100644
--- a/include/asm-parisc/pgalloc.h
+++ b/include/asm-parisc/pgalloc.h
@@ -137,7 +137,6 @@ static inline void pte_free_kernel(pte_t *pte)
137 137
138#define pte_free(page) pte_free_kernel(page_address(page)) 138#define pte_free(page) pte_free_kernel(page_address(page))
139 139
140extern int do_check_pgt_cache(int, int);
141#define check_pgt_cache() do { } while (0) 140#define check_pgt_cache() do { } while (0)
142 141
143#endif 142#endif
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index b4554711c3e7..4e34c6b44059 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -213,7 +213,7 @@ extern void *vmalloc_start;
213#define PAGE_COPY PAGE_EXECREAD 213#define PAGE_COPY PAGE_EXECREAD
214#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) 214#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
215#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 215#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
216#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) 216#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
217#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) 217#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
218#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) 218#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
219#define PAGE_FLUSH __pgprot(_PAGE_FLUSH) 219#define PAGE_FLUSH __pgprot(_PAGE_FLUSH)
diff --git a/include/asm-parisc/rt_sigframe.h b/include/asm-parisc/rt_sigframe.h
index 5623c032b64c..f0dd3b30f6c4 100644
--- a/include/asm-parisc/rt_sigframe.h
+++ b/include/asm-parisc/rt_sigframe.h
@@ -1,10 +1,6 @@
1#ifndef _ASM_PARISC_RT_SIGFRAME_H 1#ifndef _ASM_PARISC_RT_SIGFRAME_H
2#define _ASM_PARISC_RT_SIGFRAME_H 2#define _ASM_PARISC_RT_SIGFRAME_H
3 3
4#ifdef CONFIG_COMPAT
5#include <asm/compat_rt_sigframe.h>
6#endif
7
8#define SIGRETURN_TRAMP 4 4#define SIGRETURN_TRAMP 4
9#define SIGRESTARTBLOCK_TRAMP 5 5#define SIGRESTARTBLOCK_TRAMP 5
10#define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP) 6#define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP)
diff --git a/include/asm-parisc/unistd.h b/include/asm-parisc/unistd.h
index 80b7b98c70a1..c56fccbf34ad 100644
--- a/include/asm-parisc/unistd.h
+++ b/include/asm-parisc/unistd.h
@@ -761,8 +761,27 @@
761#define __NR_keyctl (__NR_Linux + 266) 761#define __NR_keyctl (__NR_Linux + 266)
762#define __NR_ioprio_set (__NR_Linux + 267) 762#define __NR_ioprio_set (__NR_Linux + 267)
763#define __NR_ioprio_get (__NR_Linux + 268) 763#define __NR_ioprio_get (__NR_Linux + 268)
764#define __NR_inotify_init (__NR_Linux + 269)
765#define __NR_inotify_add_watch (__NR_Linux + 270)
766#define __NR_inotify_rm_watch (__NR_Linux + 271)
767#define __NR_migrate_pages (__NR_Linux + 272)
768#define __NR_pselect6 (__NR_Linux + 273)
769#define __NR_ppoll (__NR_Linux + 274)
770#define __NR_openat (__NR_Linux + 275)
771#define __NR_mkdirat (__NR_Linux + 276)
772#define __NR_mknodat (__NR_Linux + 277)
773#define __NR_fchownat (__NR_Linux + 278)
774#define __NR_futimesat (__NR_Linux + 279)
775#define __NR_newfstatat (__NR_Linux + 280)
776#define __NR_unlinkat (__NR_Linux + 281)
777#define __NR_renameat (__NR_Linux + 282)
778#define __NR_linkat (__NR_Linux + 283)
779#define __NR_symlinkat (__NR_Linux + 284)
780#define __NR_readlinkat (__NR_Linux + 285)
781#define __NR_fchmodat (__NR_Linux + 286)
782#define __NR_faccessat (__NR_Linux + 287)
764 783
765#define __NR_Linux_syscalls 269 784#define __NR_Linux_syscalls 288
766 785
767#define HPUX_GATEWAY_ADDR 0xC0000004 786#define HPUX_GATEWAY_ADDR 0xC0000004
768#define LINUX_GATEWAY_ADDR 0x100 787#define LINUX_GATEWAY_ADDR 0x100
diff --git a/include/asm-s390/dasd.h b/include/asm-s390/dasd.h
index 1630c26e8f45..c744ff33b1df 100644
--- a/include/asm-s390/dasd.h
+++ b/include/asm-s390/dasd.h
@@ -204,7 +204,8 @@ typedef struct attrib_data_t {
204 * 204 *
205 * Here ist how the ioctl-nr should be used: 205 * Here ist how the ioctl-nr should be used:
206 * 0 - 31 DASD driver itself 206 * 0 - 31 DASD driver itself
207 * 32 - 239 still open 207 * 32 - 229 still open
208 * 230 - 239 DASD extended error reporting
208 * 240 - 255 reserved for EMC 209 * 240 - 255 reserved for EMC
209 *******************************************************************************/ 210 *******************************************************************************/
210 211
@@ -236,12 +237,22 @@ typedef struct attrib_data_t {
236#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t) 237#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t)
237/* Get Attributes (cache operations) */ 238/* Get Attributes (cache operations) */
238#define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t) 239#define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t)
240/* retrieve extended error-reporting value */
241#define BIODASDEERGET _IOR(DASD_IOCTL_LETTER,6,int)
239 242
240 243
241/* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */ 244/* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */
242#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t) 245#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t)
243/* Set Attributes (cache operations) */ 246/* Set Attributes (cache operations) */
244#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) 247#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
248/* retrieve extended error-reporting value */
249#define BIODASDEERSET _IOW(DASD_IOCTL_LETTER,3,int)
250
251
252/* remove all records from the eer buffer */
253#define DASD_EER_PURGE _IO(DASD_IOCTL_LETTER,230)
254/* set the number of pages that are used for the internal eer buffer */
255#define DASD_EER_SETBUFSIZE _IOW(DASD_IOCTL_LETTER,230,int)
245 256
246 257
247#endif /* DASD_H */ 258#endif /* DASD_H */
diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h
index 71f55eb2350a..b05825dd16d7 100644
--- a/include/asm-s390/io.h
+++ b/include/asm-s390/io.h
@@ -90,10 +90,16 @@ extern void iounmap(void *addr);
90#define readb_relaxed(addr) readb(addr) 90#define readb_relaxed(addr) readb(addr)
91#define readw_relaxed(addr) readw(addr) 91#define readw_relaxed(addr) readw(addr)
92#define readl_relaxed(addr) readl(addr) 92#define readl_relaxed(addr) readl(addr)
93#define __raw_readb readb
94#define __raw_readw readw
95#define __raw_readl readl
93 96
94#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b)) 97#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
95#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b)) 98#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
96#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b)) 99#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
100#define __raw_writeb writeb
101#define __raw_writew writew
102#define __raw_writel writel
97 103
98#define memset_io(a,b,c) memset(__io_virt(a),(b),(c)) 104#define memset_io(a,b,c) memset(__io_virt(a),(b),(c))
99#define memcpy_fromio(a,b,c) memcpy((a),__io_virt(b),(c)) 105#define memcpy_fromio(a,b,c) memcpy((a),__io_virt(b),(c))
diff --git a/include/asm-s390/timer.h b/include/asm-s390/timer.h
index ea0788967c51..fcd6c256a2d1 100644
--- a/include/asm-s390/timer.h
+++ b/include/asm-s390/timer.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * include/asm-s390/timer.h 2 * include/asm-s390/timer.h
3 * 3 *
4 * (C) Copyright IBM Corp. 2003 4 * (C) Copyright IBM Corp. 2003,2006
5 * Virtual CPU timer 5 * Virtual CPU timer
6 * 6 *
7 * Author: Jan Glauber (jang@de.ibm.com) 7 * Author: Jan Glauber (jang@de.ibm.com)
@@ -10,6 +10,8 @@
10#ifndef _ASM_S390_TIMER_H 10#ifndef _ASM_S390_TIMER_H
11#define _ASM_S390_TIMER_H 11#define _ASM_S390_TIMER_H
12 12
13#ifdef __KERNEL__
14
13#include <linux/timer.h> 15#include <linux/timer.h>
14 16
15#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL) 17#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
@@ -43,4 +45,6 @@ extern void add_virt_timer_periodic(void *new);
43extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires); 45extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
44extern int del_virt_timer(struct vtimer_list *timer); 46extern int del_virt_timer(struct vtimer_list *timer);
45 47
46#endif 48#endif /* __KERNEL__ */
49
50#endif /* _ASM_S390_TIMER_H */
diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h
index 8955d2376ac8..609b9e87222a 100644
--- a/include/asm-v850/bitops.h
+++ b/include/asm-v850/bitops.h
@@ -188,7 +188,7 @@ static inline int find_next_zero_bit(const void *addr, int size, int offset)
188 tmp = *p; 188 tmp = *p;
189 189
190 found_first: 190 found_first:
191 tmp |= ~0UL >> size; 191 tmp |= ~0UL << size;
192 found_middle: 192 found_middle:
193 return result + ffz (tmp); 193 return result + ffz (tmp);
194} 194}
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 4f6a4dc455bb..bdbd8935612a 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -17,6 +17,7 @@
17#define APIC_DEBUG 2 17#define APIC_DEBUG 2
18 18
19extern int apic_verbosity; 19extern int apic_verbosity;
20extern int apic_runs_main_timer;
20 21
21/* 22/*
22 * Define the default level of output to be very little 23 * Define the default level of output to be very little
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index 41c0ac8559be..76bb6193ae91 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -61,7 +61,7 @@
61#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ 61#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
62#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 62#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
63#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ 63#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
64/* 4 free */ 64#define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */
65#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ 65#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
66#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ 66#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
67 67
diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h
index 8661b476fb40..8689951e3503 100644
--- a/include/asm-x86_64/hardirq.h
+++ b/include/asm-x86_64/hardirq.h
@@ -16,23 +16,6 @@
16#define set_softirq_pending(x) write_pda(__softirq_pending, (x)) 16#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
17#define or_softirq_pending(x) or_pda(__softirq_pending, (x)) 17#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
18 18
19/* 19extern void ack_bad_irq(unsigned int irq);
20 * 'what should we do if we get a hw irq event on an illegal vector'. 20
21 * each architecture has to answer this themselves.
22 */
23static inline void ack_bad_irq(unsigned int irq)
24{
25 printk("unexpected IRQ trap at vector %02x\n", irq);
26#ifdef CONFIG_X86_LOCAL_APIC
27 /*
28 * Currently unexpected vectors happen only on SMP and APIC.
29 * We _must_ ack these because every local APIC has only N
30 * irq slots per priority level, and a 'hanging, unacked' IRQ
31 * holds up an irq slot - in excessive cases (when multiple
32 * unexpected vectors occur) that might lock up the APIC
33 * completely.
34 */
35 ack_APIC_irq();
36#endif
37}
38#endif /* __ASM_HARDIRQ_H */ 21#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
index ae28cd44bcd3..c564bae03433 100644
--- a/include/asm-x86_64/kexec.h
+++ b/include/asm-x86_64/kexec.h
@@ -1,8 +1,9 @@
1#ifndef _X86_64_KEXEC_H 1#ifndef _X86_64_KEXEC_H
2#define _X86_64_KEXEC_H 2#define _X86_64_KEXEC_H
3 3
4#include <linux/string.h>
5
4#include <asm/page.h> 6#include <asm/page.h>
5#include <asm/proto.h>
6#include <asm/ptrace.h> 7#include <asm/ptrace.h>
7 8
8/* 9/*
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 115e496c6139..c99832e7bf3f 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -41,10 +41,18 @@ extern void iommu_hole_init(void);
41 41
42extern void time_init_gtod(void); 42extern void time_init_gtod(void);
43extern int pmtimer_mark_offset(void); 43extern int pmtimer_mark_offset(void);
44extern void pmtimer_resume(void);
45extern void pmtimer_wait(unsigned);
44extern unsigned int do_gettimeoffset_pm(void); 46extern unsigned int do_gettimeoffset_pm(void);
47#ifdef CONFIG_X86_PM_TIMER
45extern u32 pmtmr_ioport; 48extern u32 pmtmr_ioport;
49#else
50#define pmtmr_ioport 0
51#endif
46extern unsigned long long monotonic_base; 52extern unsigned long long monotonic_base;
47extern int sysctl_vsyscall; 53extern int sysctl_vsyscall;
54extern int nohpet;
55extern unsigned long vxtime_hz;
48 56
49extern void do_softirq_thunk(void); 57extern void do_softirq_thunk(void);
50 58
@@ -65,6 +73,9 @@ extern void free_bootmem_generic(unsigned long phys, unsigned len);
65 73
66extern void load_gs_index(unsigned gs); 74extern void load_gs_index(unsigned gs);
67 75
76extern void stop_timer_interrupt(void);
77extern void main_timer_handler(struct pt_regs *regs);
78
68extern unsigned long end_pfn_map; 79extern unsigned long end_pfn_map;
69 80
70extern void show_trace(unsigned long * rsp); 81extern void show_trace(unsigned long * rsp);
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index a73f0c789d8b..b7f66034ae7a 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -327,7 +327,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
327#define wmb() asm volatile("" ::: "memory") 327#define wmb() asm volatile("" ::: "memory")
328#endif 328#endif
329#define read_barrier_depends() do {} while(0) 329#define read_barrier_depends() do {} while(0)
330#define set_mb(var, value) do { xchg(&var, value); } while (0) 330#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
331#define set_wmb(var, value) do { var = value; wmb(); } while (0) 331#define set_wmb(var, value) do { var = value; wmb(); } while (0)
332 332
333#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) 333#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 2fa7f27381b4..c642f5d9882d 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -57,6 +57,15 @@ extern int __node_distance(int, int);
57 57
58#endif 58#endif
59 59
60#ifdef CONFIG_SMP
61#define topology_physical_package_id(cpu) \
62 (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu])
63#define topology_core_id(cpu) \
64 (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
65#define topology_core_siblings(cpu) (cpu_core_map[cpu])
66#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
67#endif
68
60#include <asm-generic/topology.h> 69#include <asm-generic/topology.h>
61 70
62#endif 71#endif
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 6a2a19f14bb2..208650b1ad3a 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -81,7 +81,7 @@ static inline int generic_fls64(__u64 x)
81{ 81{
82 __u32 h = x >> 32; 82 __u32 h = x >> 32;
83 if (h) 83 if (h)
84 return fls(x) + 32; 84 return fls(h) + 32;
85 return fls(x); 85 return fls(x);
86} 86}
87 87
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index acffb8c9073a..a7f015027535 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -126,7 +126,7 @@ extern struct config_item *config_group_find_obj(struct config_group *, const ch
126 126
127 127
128struct configfs_attribute { 128struct configfs_attribute {
129 char *ca_name; 129 const char *ca_name;
130 struct module *ca_owner; 130 struct module *ca_owner;
131 mode_t ca_mode; 131 mode_t ca_mode;
132}; 132};
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index a3ed5e059d47..a3f09947940e 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -108,7 +108,9 @@ struct dentry {
108 struct dentry_operations *d_op; 108 struct dentry_operations *d_op;
109 struct super_block *d_sb; /* The root of the dentry tree */ 109 struct super_block *d_sb; /* The root of the dentry tree */
110 void *d_fsdata; /* fs-specific data */ 110 void *d_fsdata; /* fs-specific data */
111#ifdef CONFIG_PROFILING
111 struct dcookie_struct *d_cookie; /* cookie, if any */ 112 struct dcookie_struct *d_cookie; /* cookie, if any */
113#endif
112 int d_mounted; 114 int d_mounted;
113 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ 115 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
114}; 116};
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index dbd7bb4a33b7..0cf0bea010fe 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -5,6 +5,7 @@
5#include <linux/signal.h> 5#include <linux/signal.h>
6#include <linux/time.h> 6#include <linux/time.h>
7#include <linux/user.h> 7#include <linux/user.h>
8#include <linux/ptrace.h>
8 9
9struct elf_siginfo 10struct elf_siginfo
10{ 11{
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 9ba806796667..5a9d8c599171 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -1115,9 +1115,11 @@ static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
1115 return ERR_PTR(-ENOMEM); 1115 return ERR_PTR(-ENOMEM);
1116 1116
1117 mmsg->mfa = readl(c->in_port); 1117 mmsg->mfa = readl(c->in_port);
1118 if (mmsg->mfa == I2O_QUEUE_EMPTY) { 1118 if (unlikely(mmsg->mfa >= c->in_queue.len)) {
1119 mempool_free(mmsg, c->in_msg.mempool); 1119 mempool_free(mmsg, c->in_msg.mempool);
1120 return ERR_PTR(-EBUSY); 1120 if(mmsg->mfa == I2O_QUEUE_EMPTY)
1121 return ERR_PTR(-EBUSY);
1122 return ERR_PTR(-EFAULT);
1121 } 1123 }
1122 1124
1123 return &mmsg->msg; 1125 return &mmsg->msg;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 110b3cfac021..a7fc4cc79b23 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -582,7 +582,6 @@ typedef struct ide_drive_s {
582 unsigned noprobe : 1; /* from: hdx=noprobe */ 582 unsigned noprobe : 1; /* from: hdx=noprobe */
583 unsigned removable : 1; /* 1 if need to do check_media_change */ 583 unsigned removable : 1; /* 1 if need to do check_media_change */
584 unsigned attach : 1; /* needed for removable devices */ 584 unsigned attach : 1; /* needed for removable devices */
585 unsigned is_flash : 1; /* 1 if probed as flash */
586 unsigned forced_geom : 1; /* 1 if hdx=c,h,s was given at boot */ 585 unsigned forced_geom : 1; /* 1 if hdx=c,h,s was given at boot */
587 unsigned no_unmask : 1; /* disallow setting unmask bit */ 586 unsigned no_unmask : 1; /* disallow setting unmask bit */
588 unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */ 587 unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */
@@ -1006,7 +1005,6 @@ extern ide_hwif_t ide_hwifs[]; /* master data repository */
1006extern int noautodma; 1005extern int noautodma;
1007 1006
1008extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); 1007extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
1009extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs);
1010 1008
1011/* 1009/*
1012 * This is used on exit from the driver to designate the next irq handler 1010 * This is used on exit from the driver to designate the next irq handler
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 558cb4c26ec9..751bb3849467 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -23,6 +23,7 @@
23#define jfs_debug jbd_debug 23#define jfs_debug jbd_debug
24#else 24#else
25 25
26#include <linux/types.h>
26#include <linux/buffer_head.h> 27#include <linux/buffer_head.h>
27#include <linux/journal-head.h> 28#include <linux/journal-head.h>
28#include <linux/stddef.h> 29#include <linux/stddef.h>
@@ -618,6 +619,7 @@ struct transaction_s
618 * @j_wbuf: array of buffer_heads for journal_commit_transaction 619 * @j_wbuf: array of buffer_heads for journal_commit_transaction
619 * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the 620 * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
620 * number that will fit in j_blocksize 621 * number that will fit in j_blocksize
622 * @j_last_sync_writer: most recent pid which did a synchronous write
621 * @j_private: An opaque pointer to fs-private information. 623 * @j_private: An opaque pointer to fs-private information.
622 */ 624 */
623 625
@@ -807,6 +809,8 @@ struct journal_s
807 struct buffer_head **j_wbuf; 809 struct buffer_head **j_wbuf;
808 int j_wbufsize; 810 int j_wbufsize;
809 811
812 pid_t j_last_sync_writer;
813
810 /* 814 /*
811 * An opaque pointer to fs-private information. ext3 puts its 815 * An opaque pointer to fs-private information. ext3 puts its
812 * superblock pointer here 816 * superblock pointer here
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index 45f625d7d0b2..3aed37314ab8 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -151,6 +151,11 @@ extern unsigned int keymap_count;
151 151
152static inline void con_schedule_flip(struct tty_struct *t) 152static inline void con_schedule_flip(struct tty_struct *t)
153{ 153{
154 unsigned long flags;
155 spin_lock_irqsave(&t->buf.lock, flags);
156 if (t->buf.tail != NULL)
157 t->buf.tail->active = 0;
158 spin_unlock_irqrestore(&t->buf.lock, flags);
154 schedule_work(&t->buf.work); 159 schedule_work(&t->buf.work);
155} 160}
156 161
diff --git a/include/linux/list.h b/include/linux/list.h
index 945daa1f13dd..47208bd99f9e 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -34,9 +34,11 @@ struct list_head {
34#define LIST_HEAD(name) \ 34#define LIST_HEAD(name) \
35 struct list_head name = LIST_HEAD_INIT(name) 35 struct list_head name = LIST_HEAD_INIT(name)
36 36
37#define INIT_LIST_HEAD(ptr) do { \ 37static inline void INIT_LIST_HEAD(struct list_head *list)
38 (ptr)->next = (ptr); (ptr)->prev = (ptr); \ 38{
39} while (0) 39 list->next = list;
40 list->prev = list;
41}
40 42
41/* 43/*
42 * Insert a new entry between two known consecutive entries. 44 * Insert a new entry between two known consecutive entries.
@@ -534,7 +536,11 @@ struct hlist_node {
534#define HLIST_HEAD_INIT { .first = NULL } 536#define HLIST_HEAD_INIT { .first = NULL }
535#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } 537#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
536#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) 538#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
537#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL) 539static inline void INIT_HLIST_NODE(struct hlist_node *h)
540{
541 h->next = NULL;
542 h->pprev = NULL;
543}
538 544
539static inline int hlist_unhashed(const struct hlist_node *h) 545static inline int hlist_unhashed(const struct hlist_node *h)
540{ 546{
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 95c8fea293ba..920766cea79c 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -84,6 +84,7 @@ struct nlm_rqst {
84 struct nlm_args a_args; /* arguments */ 84 struct nlm_args a_args; /* arguments */
85 struct nlm_res a_res; /* result */ 85 struct nlm_res a_res; /* result */
86 struct nlm_wait * a_block; 86 struct nlm_wait * a_block;
87 unsigned int a_retries; /* Retry count */
87 char a_owner[NLMCLNT_OHSIZE]; 88 char a_owner[NLMCLNT_OHSIZE];
88}; 89};
89 90
@@ -148,7 +149,6 @@ struct nlm_rqst * nlmclnt_alloc_call(void);
148int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl); 149int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl);
149void nlmclnt_finish_block(struct nlm_rqst *req); 150void nlmclnt_finish_block(struct nlm_rqst *req);
150long nlmclnt_block(struct nlm_rqst *req, long timeout); 151long nlmclnt_block(struct nlm_rqst *req, long timeout);
151int nlmclnt_cancel(struct nlm_host *, struct file_lock *);
152u32 nlmclnt_grant(struct nlm_lock *); 152u32 nlmclnt_grant(struct nlm_lock *);
153void nlmclnt_recovery(struct nlm_host *, u32); 153void nlmclnt_recovery(struct nlm_host *, u32);
154int nlmclnt_reclaim(struct nlm_host *, struct file_lock *); 154int nlmclnt_reclaim(struct nlm_host *, struct file_lock *);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index ccd3e13de1e8..f38872abc126 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -21,24 +21,35 @@ struct mmc_command {
21 u32 arg; 21 u32 arg;
22 u32 resp[4]; 22 u32 resp[4];
23 unsigned int flags; /* expected response type */ 23 unsigned int flags; /* expected response type */
24#define MMC_RSP_NONE (0 << 0) 24#define MMC_RSP_PRESENT (1 << 0)
25#define MMC_RSP_SHORT (1 << 0) 25#define MMC_RSP_136 (1 << 1) /* 136 bit response */
26#define MMC_RSP_LONG (2 << 0) 26#define MMC_RSP_CRC (1 << 2) /* expect valid crc */
27#define MMC_RSP_MASK (3 << 0) 27#define MMC_RSP_BUSY (1 << 3) /* card may send busy */
28#define MMC_RSP_CRC (1 << 3) /* expect valid crc */ 28#define MMC_RSP_OPCODE (1 << 4) /* response contains opcode */
29#define MMC_RSP_BUSY (1 << 4) /* card may send busy */ 29#define MMC_CMD_MASK (3 << 5) /* command type */
30#define MMC_RSP_OPCODE (1 << 5) /* response contains opcode */ 30#define MMC_CMD_AC (0 << 5)
31#define MMC_CMD_ADTC (1 << 5)
32#define MMC_CMD_BC (2 << 5)
33#define MMC_CMD_BCR (3 << 5)
31 34
32/* 35/*
33 * These are the response types, and correspond to valid bit 36 * These are the response types, and correspond to valid bit
34 * patterns of the above flags. One additional valid pattern 37 * patterns of the above flags. One additional valid pattern
35 * is all zeros, which means we don't expect a response. 38 * is all zeros, which means we don't expect a response.
36 */ 39 */
37#define MMC_RSP_R1 (MMC_RSP_SHORT|MMC_RSP_CRC|MMC_RSP_OPCODE) 40#define MMC_RSP_NONE (0)
38#define MMC_RSP_R1B (MMC_RSP_SHORT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY) 41#define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
39#define MMC_RSP_R2 (MMC_RSP_LONG|MMC_RSP_CRC) 42#define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY)
40#define MMC_RSP_R3 (MMC_RSP_SHORT) 43#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
41#define MMC_RSP_R6 (MMC_RSP_SHORT|MMC_RSP_CRC) 44#define MMC_RSP_R3 (MMC_RSP_PRESENT)
45#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC)
46
47#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
48
49/*
50 * These are the command types.
51 */
52#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_TYPE)
42 53
43 unsigned int retries; /* max number of retries */ 54 unsigned int retries; /* max number of retries */
44 unsigned int error; /* command error */ 55 unsigned int error; /* command error */
diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h
index a14dc306545b..81c3f77f652c 100644
--- a/include/linux/mmc/protocol.h
+++ b/include/linux/mmc/protocol.h
@@ -79,7 +79,7 @@
79/* SD commands type argument response */ 79/* SD commands type argument response */
80 /* class 8 */ 80 /* class 8 */
81/* This is basically the same command as for MMC with some quirks. */ 81/* This is basically the same command as for MMC with some quirks. */
82#define SD_SEND_RELATIVE_ADDR 3 /* ac R6 */ 82#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */
83 83
84 /* Application commands */ 84 /* Application commands */
85#define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */ 85#define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */
diff --git a/include/linux/netfilter_ipv4/ipt_connbytes.h b/include/linux/netfilter_ipv4/ipt_connbytes.h
index b04dfa3083c9..f63e6ee91113 100644
--- a/include/linux/netfilter_ipv4/ipt_connbytes.h
+++ b/include/linux/netfilter_ipv4/ipt_connbytes.h
@@ -1,10 +1,10 @@
1#ifndef _IPT_CONNBYTES_H 1#ifndef _IPT_CONNBYTES_H
2#define _IPT_CONNBYTES_H 2#define _IPT_CONNBYTES_H
3 3
4#include <net/netfilter/xt_connbytes.h> 4#include <linux/netfilter/xt_connbytes.h>
5#define ipt_connbytes_what xt_connbytes_what 5#define ipt_connbytes_what xt_connbytes_what
6 6
7#define IPT_CONNBYTES_PKTS XT_CONNBYTES_PACKETS 7#define IPT_CONNBYTES_PKTS XT_CONNBYTES_PKTS
8#define IPT_CONNBYTES_BYTES XT_CONNBYTES_BYTES 8#define IPT_CONNBYTES_BYTES XT_CONNBYTES_BYTES
9#define IPT_CONNBYTES_AVGPKT XT_CONNBYTES_AVGPKT 9#define IPT_CONNBYTES_AVGPKT XT_CONNBYTES_AVGPKT
10 10
diff --git a/include/linux/netfilter_ipv4/ipt_policy.h b/include/linux/netfilter_ipv4/ipt_policy.h
index 7fd1bec453f1..a3f6eff39d33 100644
--- a/include/linux/netfilter_ipv4/ipt_policy.h
+++ b/include/linux/netfilter_ipv4/ipt_policy.h
@@ -27,16 +27,22 @@ struct ipt_policy_spec
27 reqid:1; 27 reqid:1;
28}; 28};
29 29
30union ipt_policy_addr
31{
32 struct in_addr a4;
33 struct in6_addr a6;
34};
35
30struct ipt_policy_elem 36struct ipt_policy_elem
31{ 37{
32 u_int32_t saddr; 38 union ipt_policy_addr saddr;
33 u_int32_t smask; 39 union ipt_policy_addr smask;
34 u_int32_t daddr; 40 union ipt_policy_addr daddr;
35 u_int32_t dmask; 41 union ipt_policy_addr dmask;
36 u_int32_t spi; 42 u_int32_t spi;
37 u_int32_t reqid; 43 u_int32_t reqid;
38 u_int8_t proto; 44 u_int8_t proto;
39 u_int8_t mode; 45 u_int8_t mode;
40 46
41 struct ipt_policy_spec match; 47 struct ipt_policy_spec match;
42 struct ipt_policy_spec invert; 48 struct ipt_policy_spec invert;
diff --git a/include/linux/netfilter_ipv6/ip6t_policy.h b/include/linux/netfilter_ipv6/ip6t_policy.h
index 5a93afcd2ff1..671bd818300f 100644
--- a/include/linux/netfilter_ipv6/ip6t_policy.h
+++ b/include/linux/netfilter_ipv6/ip6t_policy.h
@@ -27,16 +27,22 @@ struct ip6t_policy_spec
27 reqid:1; 27 reqid:1;
28}; 28};
29 29
30union ip6t_policy_addr
31{
32 struct in_addr a4;
33 struct in6_addr a6;
34};
35
30struct ip6t_policy_elem 36struct ip6t_policy_elem
31{ 37{
32 struct in6_addr saddr; 38 union ip6t_policy_addr saddr;
33 struct in6_addr smask; 39 union ip6t_policy_addr smask;
34 struct in6_addr daddr; 40 union ip6t_policy_addr daddr;
35 struct in6_addr dmask; 41 union ip6t_policy_addr dmask;
36 u_int32_t spi; 42 u_int32_t spi;
37 u_int32_t reqid; 43 u_int32_t reqid;
38 u_int8_t proto; 44 u_int8_t proto;
39 u_int8_t mode; 45 u_int8_t mode;
40 46
41 struct ip6t_policy_spec match; 47 struct ip6t_policy_spec match;
42 struct ip6t_policy_spec invert; 48 struct ip6t_policy_spec invert;
diff --git a/include/linux/parport.h b/include/linux/parport.h
index f67f838a3a1f..008d736a6c9a 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -128,6 +128,11 @@ struct amiga_parport_state {
128 unsigned char statusdir;/* ciab.ddrb & 7 */ 128 unsigned char statusdir;/* ciab.ddrb & 7 */
129}; 129};
130 130
131struct ip32_parport_state {
132 unsigned int dcr;
133 unsigned int ecr;
134};
135
131struct parport_state { 136struct parport_state {
132 union { 137 union {
133 struct pc_parport_state pc; 138 struct pc_parport_state pc;
@@ -135,6 +140,7 @@ struct parport_state {
135 struct ax_parport_state ax; 140 struct ax_parport_state ax;
136 struct amiga_parport_state amiga; 141 struct amiga_parport_state amiga;
137 /* Atari has not state. */ 142 /* Atari has not state. */
143 struct ip32_parport_state ip32;
138 void *misc; 144 void *misc;
139 } u; 145 } u;
140}; 146};
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index b0b908f583c5..92a619ba163f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1670,6 +1670,9 @@
1670#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060 1670#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060
1671#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061 1671#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061
1672#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062 1672#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062
1673#define PCI_DEVICE_ID_SIIG_8S_20x_550 0x2080
1674#define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081
1675#define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082
1673#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 1676#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
1674 1677
1675#define PCI_VENDOR_ID_RADISYS 0x1331 1678#define PCI_VENDOR_ID_RADISYS 0x1331
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 2c177e4c8f22..8a94c717c266 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -114,7 +114,7 @@ struct pkt_ctrl_command {
114 114
115struct packet_settings 115struct packet_settings
116{ 116{
117 __u8 size; /* packet size in (512 byte) sectors */ 117 __u32 size; /* packet size in (512 byte) sectors */
118 __u8 fp; /* fixed packets */ 118 __u8 fp; /* fixed packets */
119 __u8 link_loss; /* the rest is specified 119 __u8 link_loss; /* the rest is specified
120 * as per Mt Fuji */ 120 * as per Mt Fuji */
@@ -169,8 +169,8 @@ struct packet_iosched
169#if (PAGE_SIZE % CD_FRAMESIZE) != 0 169#if (PAGE_SIZE % CD_FRAMESIZE) != 0
170#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE" 170#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE"
171#endif 171#endif
172#define PACKET_MAX_SIZE 32 172#define PACKET_MAX_SIZE 128
173#define PAGES_PER_PACKET (PACKET_MAX_SIZE * CD_FRAMESIZE / PAGE_SIZE) 173#define FRAMES_PER_PAGE (PAGE_SIZE / CD_FRAMESIZE)
174#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9) 174#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9)
175 175
176enum packet_data_state { 176enum packet_data_state {
@@ -219,7 +219,7 @@ struct packet_data
219 atomic_t io_errors; /* Number of read/write errors during IO */ 219 atomic_t io_errors; /* Number of read/write errors during IO */
220 220
221 struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */ 221 struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
222 struct page *pages[PAGES_PER_PACKET]; 222 struct page *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE];
223 223
224 int cache_valid; /* If non-zero, the data for the zone defined */ 224 int cache_valid; /* If non-zero, the data for the zone defined */
225 /* by the sector variable is completely cached */ 225 /* by the sector variable is completely cached */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 4f34d3d60f2e..21e5a9124856 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -190,7 +190,6 @@ static __inline__ int DQUOT_OFF(struct super_block *sb)
190 */ 190 */
191#define sb_dquot_ops (NULL) 191#define sb_dquot_ops (NULL)
192#define sb_quotactl_ops (NULL) 192#define sb_quotactl_ops (NULL)
193#define sync_dquots_dev(dev,type) (NULL)
194#define DQUOT_INIT(inode) do { } while(0) 193#define DQUOT_INIT(inode) do { } while(0)
195#define DQUOT_DROP(inode) do { } while(0) 194#define DQUOT_DROP(inode) do { } while(0)
196#define DQUOT_ALLOC_INODE(inode) (0) 195#define DQUOT_ALLOC_INODE(inode) (0)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 981f9aa43353..b87aefa082e2 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -240,11 +240,14 @@ extern int rcu_pending(int cpu);
240 * This means that all preempt_disable code sequences, including NMI and 240 * This means that all preempt_disable code sequences, including NMI and
241 * hardware-interrupt handlers, in progress on entry will have completed 241 * hardware-interrupt handlers, in progress on entry will have completed
242 * before this primitive returns. However, this does not guarantee that 242 * before this primitive returns. However, this does not guarantee that
243 * softirq handlers will have completed, since in some kernels 243 * softirq handlers will have completed, since in some kernels, these
244 * handlers can run in process context, and can block.
244 * 245 *
245 * This primitive provides the guarantees made by the (deprecated) 246 * This primitive provides the guarantees made by the (deprecated)
246 * synchronize_kernel() API. In contrast, synchronize_rcu() only 247 * synchronize_kernel() API. In contrast, synchronize_rcu() only
247 * guarantees that rcu_read_lock() sections will have completed. 248 * guarantees that rcu_read_lock() sections will have completed.
249 * In "classic RCU", these two guarantees happen to be one and
250 * the same, but can differ in realtime RCU implementations.
248 */ 251 */
249#define synchronize_sched() synchronize_rcu() 252#define synchronize_sched() synchronize_rcu()
250 253
diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h
index 0a3605099c44..806ec5b06707 100644
--- a/include/linux/reiserfs_acl.h
+++ b/include/linux/reiserfs_acl.h
@@ -58,9 +58,13 @@ extern struct reiserfs_xattr_handler posix_acl_default_handler;
58extern struct reiserfs_xattr_handler posix_acl_access_handler; 58extern struct reiserfs_xattr_handler posix_acl_access_handler;
59#else 59#else
60 60
61#define reiserfs_get_acl NULL
62#define reiserfs_cache_default_acl(inode) 0 61#define reiserfs_cache_default_acl(inode) 0
63 62
63static inline struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
64{
65 return NULL;
66}
67
64static inline int reiserfs_xattr_posix_acl_init(void) 68static inline int reiserfs_xattr_posix_acl_init(void)
65{ 69{
66 return 0; 70 return 0;
diff --git a/include/linux/security.h b/include/linux/security.h
index bb1da86747c7..7cbef482e13a 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1499,15 +1499,11 @@ static inline void security_sb_post_pivotroot (struct nameidata *old_nd,
1499 1499
1500static inline int security_inode_alloc (struct inode *inode) 1500static inline int security_inode_alloc (struct inode *inode)
1501{ 1501{
1502 if (unlikely (IS_PRIVATE (inode)))
1503 return 0;
1504 return security_ops->inode_alloc_security (inode); 1502 return security_ops->inode_alloc_security (inode);
1505} 1503}
1506 1504
1507static inline void security_inode_free (struct inode *inode) 1505static inline void security_inode_free (struct inode *inode)
1508{ 1506{
1509 if (unlikely (IS_PRIVATE (inode)))
1510 return;
1511 security_ops->inode_free_security (inode); 1507 security_ops->inode_free_security (inode);
1512} 1508}
1513 1509
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index b68c11a2d6dd..be4772ed43c0 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -48,7 +48,7 @@ struct rpc_cred {
48 48
49 /* per-flavor data */ 49 /* per-flavor data */
50}; 50};
51#define RPCAUTH_CRED_LOCKED 0x0001 51#define RPCAUTH_CRED_NEW 0x0001
52#define RPCAUTH_CRED_UPTODATE 0x0002 52#define RPCAUTH_CRED_UPTODATE 0x0002
53 53
54#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 54#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0
@@ -83,9 +83,10 @@ struct rpc_auth {
83 struct rpc_cred_cache * au_credcache; 83 struct rpc_cred_cache * au_credcache;
84 /* per-flavor data */ 84 /* per-flavor data */
85}; 85};
86#define RPC_AUTH_PROC_CREDS 0x0010 /* process creds (including 86
87 * uid/gid, fs[ug]id, gids) 87/* Flags for rpcauth_lookupcred() */
88 */ 88#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */
89#define RPCAUTH_LOOKUP_ROOTCREDS 0x02 /* This really ought to go! */
89 90
90/* 91/*
91 * Client authentication ops 92 * Client authentication ops
@@ -105,6 +106,7 @@ struct rpc_authops {
105 106
106struct rpc_credops { 107struct rpc_credops {
107 const char * cr_name; /* Name of the auth flavour */ 108 const char * cr_name; /* Name of the auth flavour */
109 int (*cr_init)(struct rpc_auth *, struct rpc_cred *);
108 void (*crdestroy)(struct rpc_cred *); 110 void (*crdestroy)(struct rpc_cred *);
109 111
110 int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); 112 int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5dc94e777fab..43bcd13eb1ec 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -42,10 +42,6 @@ extern void mark_free_pages(struct zone *zone);
42#ifdef CONFIG_PM 42#ifdef CONFIG_PM
43/* kernel/power/swsusp.c */ 43/* kernel/power/swsusp.c */
44extern int software_suspend(void); 44extern int software_suspend(void);
45
46extern int pm_prepare_console(void);
47extern void pm_restore_console(void);
48
49#else 45#else
50static inline int software_suspend(void) 46static inline int software_suspend(void)
51{ 47{
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 3787102e4b12..a7bd3b4558d2 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -57,6 +57,7 @@ struct tty_buffer {
57 unsigned char *flag_buf_ptr; 57 unsigned char *flag_buf_ptr;
58 int used; 58 int used;
59 int size; 59 int size;
60 int active;
60 /* Data points here */ 61 /* Data points here */
61 unsigned long data[0]; 62 unsigned long data[0];
62}; 63};
@@ -64,6 +65,7 @@ struct tty_buffer {
64struct tty_bufhead { 65struct tty_bufhead {
65 struct work_struct work; 66 struct work_struct work;
66 struct semaphore pty_sem; 67 struct semaphore pty_sem;
68 spinlock_t lock;
67 struct tty_buffer *head; /* Queue head */ 69 struct tty_buffer *head; /* Queue head */
68 struct tty_buffer *tail; /* Active buffer */ 70 struct tty_buffer *tail; /* Active buffer */
69 struct tty_buffer *free; /* Free queue head */ 71 struct tty_buffer *free; /* Free queue head */
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index be1400e82482..82961eb19888 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -17,7 +17,7 @@ _INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
17 unsigned char ch, char flag) 17 unsigned char ch, char flag)
18{ 18{
19 struct tty_buffer *tb = tty->buf.tail; 19 struct tty_buffer *tb = tty->buf.tail;
20 if (tb && tb->used < tb->size) { 20 if (tb && tb->active && tb->used < tb->size) {
21 tb->flag_buf_ptr[tb->used] = flag; 21 tb->flag_buf_ptr[tb->used] = flag;
22 tb->char_buf_ptr[tb->used++] = ch; 22 tb->char_buf_ptr[tb->used++] = ch;
23 return 1; 23 return 1;
@@ -27,6 +27,11 @@ _INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
27 27
28_INLINE_ void tty_schedule_flip(struct tty_struct *tty) 28_INLINE_ void tty_schedule_flip(struct tty_struct *tty)
29{ 29{
30 unsigned long flags;
31 spin_lock_irqsave(&tty->buf.lock, flags);
32 if (tty->buf.tail != NULL)
33 tty->buf.tail->active = 0;
34 spin_unlock_irqrestore(&tty->buf.lock, flags);
30 schedule_delayed_work(&tty->buf.work, 1); 35 schedule_delayed_work(&tty->buf.work, 1);
31} 36}
32 37
diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h
index 7a6babeca256..b0ffe4356e5a 100644
--- a/include/linux/ufs_fs.h
+++ b/include/linux/ufs_fs.h
@@ -148,11 +148,11 @@ typedef __u16 __bitwise __fs16;
148#define UFS_USEEFT ((__u16)65535) 148#define UFS_USEEFT ((__u16)65535)
149 149
150#define UFS_FSOK 0x7c269d38 150#define UFS_FSOK 0x7c269d38
151#define UFS_FSACTIVE ((char)0x00) 151#define UFS_FSACTIVE ((__s8)0x00)
152#define UFS_FSCLEAN ((char)0x01) 152#define UFS_FSCLEAN ((__s8)0x01)
153#define UFS_FSSTABLE ((char)0x02) 153#define UFS_FSSTABLE ((__s8)0x02)
154#define UFS_FSOSF1 ((char)0x03) /* is this correct for DEC OSF/1? */ 154#define UFS_FSOSF1 ((__s8)0x03) /* is this correct for DEC OSF/1? */
155#define UFS_FSBAD ((char)0xff) 155#define UFS_FSBAD ((__s8)0xff)
156 156
157/* From here to next blank line, s_flags for ufs_sb_info */ 157/* From here to next blank line, s_flags for ufs_sb_info */
158/* directory entry encoding */ 158/* directory entry encoding */
@@ -502,8 +502,7 @@ struct ufs_super_block {
502/* 502/*
503 * Convert cylinder group to base address of its global summary info. 503 * Convert cylinder group to base address of its global summary info.
504 */ 504 */
505#define fs_cs(indx) \ 505#define fs_cs(indx) s_csp[(indx)]
506 s_csp[(indx) >> uspi->s_csshift][(indx) & ~uspi->s_csmask]
507 506
508/* 507/*
509 * Cylinder group block for a file system. 508 * Cylinder group block for a file system.
@@ -913,6 +912,7 @@ extern int ufs_sync_inode (struct inode *);
913extern void ufs_delete_inode (struct inode *); 912extern void ufs_delete_inode (struct inode *);
914extern struct buffer_head * ufs_getfrag (struct inode *, unsigned, int, int *); 913extern struct buffer_head * ufs_getfrag (struct inode *, unsigned, int, int *);
915extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *); 914extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *);
915extern int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create);
916 916
917/* namei.c */ 917/* namei.c */
918extern struct file_operations ufs_dir_operations; 918extern struct file_operations ufs_dir_operations;
diff --git a/include/linux/ufs_fs_sb.h b/include/linux/ufs_fs_sb.h
index c1be4c226486..8ff13c160f3d 100644
--- a/include/linux/ufs_fs_sb.h
+++ b/include/linux/ufs_fs_sb.h
@@ -25,7 +25,7 @@ struct ufs_csum;
25 25
26struct ufs_sb_info { 26struct ufs_sb_info {
27 struct ufs_sb_private_info * s_uspi; 27 struct ufs_sb_private_info * s_uspi;
28 struct ufs_csum * s_csp[UFS_MAXCSBUFS]; 28 struct ufs_csum * s_csp;
29 unsigned s_bytesex; 29 unsigned s_bytesex;
30 unsigned s_flags; 30 unsigned s_flags;
31 struct buffer_head ** s_ucg; 31 struct buffer_head ** s_ucg;
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 67856eb93b43..dac43b15a5b0 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -88,12 +88,6 @@ extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
88extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto); 88extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
89extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto); 89extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
90 90
91static inline struct nf_conntrack_l3proto *
92__nf_ct_l3proto_find(u_int16_t l3proto)
93{
94 return nf_ct_l3protos[l3proto];
95}
96
97extern struct nf_conntrack_l3proto * 91extern struct nf_conntrack_l3proto *
98nf_ct_l3proto_find_get(u_int16_t l3proto); 92nf_ct_l3proto_find_get(u_int16_t l3proto);
99 93
@@ -103,4 +97,13 @@ extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
103extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; 97extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
104extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; 98extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
105extern struct nf_conntrack_l3proto nf_conntrack_generic_l3proto; 99extern struct nf_conntrack_l3proto nf_conntrack_generic_l3proto;
100
101static inline struct nf_conntrack_l3proto *
102__nf_ct_l3proto_find(u_int16_t l3proto)
103{
104 if (unlikely(l3proto >= AF_MAX))
105 return &nf_conntrack_generic_l3proto;
106 return nf_ct_l3protos[l3proto];
107}
108
106#endif /*_NF_CONNTRACK_L3PROTO_H*/ 109#endif /*_NF_CONNTRACK_L3PROTO_H*/
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 8c522ae031bb..072f407848a6 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -700,7 +700,7 @@ struct sctp_chunk {
700 __u8 ecn_ce_done; /* Have we processed the ECN CE bit? */ 700 __u8 ecn_ce_done; /* Have we processed the ECN CE bit? */
701 __u8 pdiscard; /* Discard the whole packet now? */ 701 __u8 pdiscard; /* Discard the whole packet now? */
702 __u8 tsn_gap_acked; /* Is this chunk acked by a GAP ACK? */ 702 __u8 tsn_gap_acked; /* Is this chunk acked by a GAP ACK? */
703 __u8 fast_retransmit; /* Is this chunk fast retransmitted? */ 703 __s8 fast_retransmit; /* Is this chunk fast retransmitted? */
704 __u8 tsn_missing_report; /* Data chunk missing counter. */ 704 __u8 tsn_missing_report; /* Data chunk missing counter. */
705}; 705};
706 706
diff --git a/include/net/sock.h b/include/net/sock.h
index 1806e5b61419..30758035d616 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1354,12 +1354,12 @@ extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1354 * Enable debug/info messages 1354 * Enable debug/info messages
1355 */ 1355 */
1356 1356
1357#if 0 1357#ifdef CONFIG_NETDEBUG
1358#define NETDEBUG(fmt, args...) do { } while (0)
1359#define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
1360#else
1361#define NETDEBUG(fmt, args...) printk(fmt,##args) 1358#define NETDEBUG(fmt, args...) printk(fmt,##args)
1362#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0) 1359#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0)
1360#else
1361#define NETDEBUG(fmt, args...) do { } while (0)
1362#define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
1363#endif 1363#endif
1364 1364
1365/* 1365/*
diff --git a/init/Kconfig b/init/Kconfig
index b9923b1434a2..8b7abae87bf9 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -31,19 +31,8 @@ config EXPERIMENTAL
31 you say Y here, you will be offered the choice of using features or 31 you say Y here, you will be offered the choice of using features or
32 drivers that are currently considered to be in the alpha-test phase. 32 drivers that are currently considered to be in the alpha-test phase.
33 33
34config CLEAN_COMPILE
35 bool "Select only drivers expected to compile cleanly" if EXPERIMENTAL
36 default y
37 help
38 Select this option if you don't even want to see the option
39 to configure known-broken drivers.
40
41 If unsure, say Y
42
43config BROKEN 34config BROKEN
44 bool 35 bool
45 depends on !CLEAN_COMPILE
46 default y
47 36
48config BROKEN_ON_SMP 37config BROKEN_ON_SMP
49 bool 38 bool
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index fe2f71f92ae0..ba42b0a76961 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -641,7 +641,7 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
641 * task has been modifying its cpuset. 641 * task has been modifying its cpuset.
642 */ 642 */
643 643
644void cpuset_update_task_memory_state() 644void cpuset_update_task_memory_state(void)
645{ 645{
646 int my_cpusets_mem_gen; 646 int my_cpusets_mem_gen;
647 struct task_struct *tsk = current; 647 struct task_struct *tsk = current;
diff --git a/kernel/intermodule.c b/kernel/intermodule.c
index 0cbe633420fb..55b1e5b85db9 100644
--- a/kernel/intermodule.c
+++ b/kernel/intermodule.c
@@ -179,3 +179,6 @@ EXPORT_SYMBOL(inter_module_register);
179EXPORT_SYMBOL(inter_module_unregister); 179EXPORT_SYMBOL(inter_module_unregister);
180EXPORT_SYMBOL(inter_module_get_request); 180EXPORT_SYMBOL(inter_module_get_request);
181EXPORT_SYMBOL(inter_module_put); 181EXPORT_SYMBOL(inter_module_put);
182
183MODULE_LICENSE("GPL");
184
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3ea6325228da..fef1af8a73ce 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -344,23 +344,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
344 spin_unlock_irqrestore(&kretprobe_lock, flags); 344 spin_unlock_irqrestore(&kretprobe_lock, flags);
345} 345}
346 346
347/*
348 * This kprobe pre_handler is registered with every kretprobe. When probe
349 * hits it will set up the return probe.
350 */
351static int __kprobes pre_handler_kretprobe(struct kprobe *p,
352 struct pt_regs *regs)
353{
354 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
355 unsigned long flags = 0;
356
357 /*TODO: consider to only swap the RA after the last pre_handler fired */
358 spin_lock_irqsave(&kretprobe_lock, flags);
359 arch_prepare_kretprobe(rp, regs);
360 spin_unlock_irqrestore(&kretprobe_lock, flags);
361 return 0;
362}
363
364static inline void free_rp_inst(struct kretprobe *rp) 347static inline void free_rp_inst(struct kretprobe *rp)
365{ 348{
366 struct kretprobe_instance *ri; 349 struct kretprobe_instance *ri;
@@ -578,6 +561,23 @@ void __kprobes unregister_jprobe(struct jprobe *jp)
578 561
579#ifdef ARCH_SUPPORTS_KRETPROBES 562#ifdef ARCH_SUPPORTS_KRETPROBES
580 563
564/*
565 * This kprobe pre_handler is registered with every kretprobe. When probe
566 * hits it will set up the return probe.
567 */
568static int __kprobes pre_handler_kretprobe(struct kprobe *p,
569 struct pt_regs *regs)
570{
571 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
572 unsigned long flags = 0;
573
574 /*TODO: consider to only swap the RA after the last pre_handler fired */
575 spin_lock_irqsave(&kretprobe_lock, flags);
576 arch_prepare_kretprobe(rp, regs);
577 spin_unlock_irqrestore(&kretprobe_lock, flags);
578 return 0;
579}
580
581int __kprobes register_kretprobe(struct kretprobe *rp) 581int __kprobes register_kretprobe(struct kretprobe *rp)
582{ 582{
583 int ret = 0; 583 int ret = 0;
@@ -631,12 +631,12 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp)
631 unregister_kprobe(&rp->kp); 631 unregister_kprobe(&rp->kp);
632 /* No race here */ 632 /* No race here */
633 spin_lock_irqsave(&kretprobe_lock, flags); 633 spin_lock_irqsave(&kretprobe_lock, flags);
634 free_rp_inst(rp);
635 while ((ri = get_used_rp_inst(rp)) != NULL) { 634 while ((ri = get_used_rp_inst(rp)) != NULL) {
636 ri->rp = NULL; 635 ri->rp = NULL;
637 hlist_del(&ri->uflist); 636 hlist_del(&ri->uflist);
638 } 637 }
639 spin_unlock_irqrestore(&kretprobe_lock, flags); 638 spin_unlock_irqrestore(&kretprobe_lock, flags);
639 free_rp_inst(rp);
640} 640}
641 641
642static int __init init_kprobes(void) 642static int __init init_kprobes(void)
diff --git a/kernel/module.c b/kernel/module.c
index 618ed6e23ecc..e058aedf6b93 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2092,7 +2092,8 @@ static unsigned long mod_find_symname(struct module *mod, const char *name)
2092 unsigned int i; 2092 unsigned int i;
2093 2093
2094 for (i = 0; i < mod->num_symtab; i++) 2094 for (i = 0; i < mod->num_symtab; i++)
2095 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0) 2095 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
2096 mod->symtab[i].st_info != 'U')
2096 return mod->symtab[i].st_value; 2097 return mod->symtab[i].st_value;
2097 return 0; 2098 return 0;
2098} 2099}
diff --git a/kernel/sched.c b/kernel/sched.c
index f77f23f8f479..bc38804e40dd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5551,13 +5551,15 @@ static void calibrate_migration_costs(const cpumask_t *cpu_map)
5551 -1 5551 -1
5552#endif 5552#endif
5553 ); 5553 );
5554 printk("migration_cost="); 5554 if (system_state == SYSTEM_BOOTING) {
5555 for (distance = 0; distance <= max_distance; distance++) { 5555 printk("migration_cost=");
5556 if (distance) 5556 for (distance = 0; distance <= max_distance; distance++) {
5557 printk(","); 5557 if (distance)
5558 printk("%ld", (long)migration_cost[distance] / 1000); 5558 printk(",");
5559 printk("%ld", (long)migration_cost[distance] / 1000);
5560 }
5561 printk("\n");
5559 } 5562 }
5560 printk("\n");
5561 j1 = jiffies; 5563 j1 = jiffies;
5562 if (migration_debug) 5564 if (migration_debug)
5563 printk("migration: %ld seconds\n", (j1-j0)/HZ); 5565 printk("migration: %ld seconds\n", (j1-j0)/HZ);
@@ -6109,7 +6111,7 @@ void __init sched_init(void)
6109 runqueue_t *rq; 6111 runqueue_t *rq;
6110 int i, j, k; 6112 int i, j, k;
6111 6113
6112 for (i = 0; i < NR_CPUS; i++) { 6114 for_each_cpu(i) {
6113 prio_array_t *array; 6115 prio_array_t *array;
6114 6116
6115 rq = cpu_rq(i); 6117 rq = cpu_rq(i);
diff --git a/kernel/signal.c b/kernel/signal.c
index d3efafd8109a..b373fc2420da 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -283,7 +283,7 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
283 return(q); 283 return(q);
284} 284}
285 285
286static inline void __sigqueue_free(struct sigqueue *q) 286static void __sigqueue_free(struct sigqueue *q)
287{ 287{
288 if (q->flags & SIGQUEUE_PREALLOC) 288 if (q->flags & SIGQUEUE_PREALLOC)
289 return; 289 return;
diff --git a/kernel/time.c b/kernel/time.c
index 1f23e683d6aa..804539165d8b 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -637,15 +637,16 @@ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
637 * 637 *
638 * Returns the timespec representation of the nsec parameter. 638 * Returns the timespec representation of the nsec parameter.
639 */ 639 */
640inline struct timespec ns_to_timespec(const nsec_t nsec) 640struct timespec ns_to_timespec(const nsec_t nsec)
641{ 641{
642 struct timespec ts; 642 struct timespec ts;
643 643
644 if (nsec) 644 if (!nsec)
645 ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, 645 return (struct timespec) {0, 0};
646 &ts.tv_nsec); 646
647 else 647 ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec);
648 ts.tv_sec = ts.tv_nsec = 0; 648 if (unlikely(nsec < 0))
649 set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec);
649 650
650 return ts; 651 return ts;
651} 652}
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index a5d2cdc5684c..fd355a99327c 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -15,7 +15,7 @@ unsigned long int_sqrt(unsigned long x)
15 op = x; 15 op = x;
16 res = 0; 16 res = 0;
17 17
18 one = 1 << 30; 18 one = 1UL << (BITS_PER_LONG - 2);
19 while (one > op) 19 while (one > op)
20 one >>= 2; 20 one >>= 2;
21 21
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 8a8b3a16133e..c4c1ac5fbd1a 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -94,10 +94,28 @@ next: bs = bm->bad_shift[text[shift-i]];
94 return UINT_MAX; 94 return UINT_MAX;
95} 95}
96 96
97static int subpattern(u8 *pattern, int i, int j, int g)
98{
99 int x = i+g-1, y = j+g-1, ret = 0;
100
101 while(pattern[x--] == pattern[y--]) {
102 if (y < 0) {
103 ret = 1;
104 break;
105 }
106 if (--g == 0) {
107 ret = pattern[i-1] != pattern[j-1];
108 break;
109 }
110 }
111
112 return ret;
113}
114
97static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, 115static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
98 unsigned int len) 116 unsigned int len)
99{ 117{
100 int i, j, ended, l[ASIZE]; 118 int i, j, g;
101 119
102 for (i = 0; i < ASIZE; i++) 120 for (i = 0; i < ASIZE; i++)
103 bm->bad_shift[i] = len; 121 bm->bad_shift[i] = len;
@@ -106,23 +124,15 @@ static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
106 124
107 /* Compute the good shift array, used to match reocurrences 125 /* Compute the good shift array, used to match reocurrences
108 * of a subpattern */ 126 * of a subpattern */
109 for (i = 1; i < bm->patlen; i++) {
110 for (j = 0; j < bm->patlen && bm->pattern[bm->patlen - 1 - j]
111 == bm->pattern[bm->patlen - 1 - i - j]; j++);
112 l[i] = j;
113 }
114
115 bm->good_shift[0] = 1; 127 bm->good_shift[0] = 1;
116 for (i = 1; i < bm->patlen; i++) 128 for (i = 1; i < bm->patlen; i++)
117 bm->good_shift[i] = bm->patlen; 129 bm->good_shift[i] = bm->patlen;
118 for (i = bm->patlen - 1; i > 0; i--) 130 for (i = bm->patlen-1, g = 1; i > 0; g++, i--) {
119 bm->good_shift[l[i]] = i; 131 for (j = i-1; j >= 1-g ; j--)
120 ended = 0; 132 if (subpattern(bm->pattern, i, j, g)) {
121 for (i = 0; i < bm->patlen; i++) { 133 bm->good_shift[g] = bm->patlen-j-g;
122 if (l[i] == bm->patlen - 1 - i) 134 break;
123 ended = i; 135 }
124 if (ended)
125 bm->good_shift[i] = ended;
126 } 136 }
127} 137}
128 138
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b21d78c941b5..ceb3ebb3c399 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -444,6 +444,15 @@ retry:
444 page = alloc_huge_page(vma, address); 444 page = alloc_huge_page(vma, address);
445 if (!page) { 445 if (!page) {
446 hugetlb_put_quota(mapping); 446 hugetlb_put_quota(mapping);
447 /*
448 * No huge pages available. So this is an OOM
449 * condition but we do not want to trigger the OOM
450 * killer, so we return VM_FAULT_SIGBUS.
451 *
452 * A program using hugepages may fault with Bus Error
453 * because no huge pages are available in the cpuset, per
454 * memory policy or because all are in use!
455 */
447 goto out; 456 goto out;
448 } 457 }
449 458
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 27da6d5c77ba..3bd7fb7e4b75 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1159,6 +1159,7 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
1159 return interleave_nodes(pol); 1159 return interleave_nodes(pol);
1160} 1160}
1161 1161
1162#ifdef CONFIG_HUGETLBFS
1162/* Return a zonelist suitable for a huge page allocation. */ 1163/* Return a zonelist suitable for a huge page allocation. */
1163struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr) 1164struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
1164{ 1165{
@@ -1172,6 +1173,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
1172 } 1173 }
1173 return zonelist_policy(GFP_HIGHUSER, pol); 1174 return zonelist_policy(GFP_HIGHUSER, pol);
1174} 1175}
1176#endif
1175 1177
1176/* Allocate a page in interleaved policy. 1178/* Allocate a page in interleaved policy.
1177 Own path because it needs to do special accounting. */ 1179 Own path because it needs to do special accounting. */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 44b4eb4202d9..dde04ff4be31 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
1213{ 1213{
1214 int cpu = 0; 1214 int cpu = 0;
1215 1215
1216 memset(ret, 0, sizeof(*ret)); 1216 memset(ret, 0, nr * sizeof(unsigned long));
1217 cpus_and(*cpumask, *cpumask, cpu_online_map); 1217 cpus_and(*cpumask, *cpumask, cpu_online_map);
1218 1218
1219 cpu = first_cpu(*cpumask); 1219 cpu = first_cpu(*cpumask);
1220 while (cpu < NR_CPUS) { 1220 while (cpu < NR_CPUS) {
1221 unsigned long *in, *out, off; 1221 unsigned long *in, *out, off;
1222 1222
1223 if (!cpu_isset(cpu, *cpumask))
1224 continue;
1225
1223 in = (unsigned long *)&per_cpu(page_states, cpu); 1226 in = (unsigned long *)&per_cpu(page_states, cpu);
1224 1227
1225 cpu = next_cpu(cpu, *cpumask); 1228 cpu = next_cpu(cpu, *cpumask);
1226 1229
1227 if (cpu < NR_CPUS) 1230 if (likely(cpu < NR_CPUS))
1228 prefetch(&per_cpu(page_states, cpu)); 1231 prefetch(&per_cpu(page_states, cpu));
1229 1232
1230 out = (unsigned long *)ret; 1233 out = (unsigned long *)ret;
@@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
1886 * not check if the processor is online before following the pageset pointer. 1889 * not check if the processor is online before following the pageset pointer.
1887 * Other parts of the kernel may not check if the zone is available. 1890 * Other parts of the kernel may not check if the zone is available.
1888 */ 1891 */
1889static struct per_cpu_pageset 1892static struct per_cpu_pageset boot_pageset[NR_CPUS];
1890 boot_pageset[NR_CPUS];
1891 1893
1892/* 1894/*
1893 * Dynamically allocate memory for the 1895 * Dynamically allocate memory for the
diff --git a/mm/slab.c b/mm/slab.c
index 71370256a7eb..d66c2b0d9715 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -294,6 +294,7 @@ struct kmem_list3 {
294 unsigned long next_reap; 294 unsigned long next_reap;
295 int free_touched; 295 int free_touched;
296 unsigned int free_limit; 296 unsigned int free_limit;
297 unsigned int colour_next; /* Per-node cache coloring */
297 spinlock_t list_lock; 298 spinlock_t list_lock;
298 struct array_cache *shared; /* shared per node */ 299 struct array_cache *shared; /* shared per node */
299 struct array_cache **alien; /* on other nodes */ 300 struct array_cache **alien; /* on other nodes */
@@ -344,6 +345,7 @@ static void kmem_list3_init(struct kmem_list3 *parent)
344 INIT_LIST_HEAD(&parent->slabs_free); 345 INIT_LIST_HEAD(&parent->slabs_free);
345 parent->shared = NULL; 346 parent->shared = NULL;
346 parent->alien = NULL; 347 parent->alien = NULL;
348 parent->colour_next = 0;
347 spin_lock_init(&parent->list_lock); 349 spin_lock_init(&parent->list_lock);
348 parent->free_objects = 0; 350 parent->free_objects = 0;
349 parent->free_touched = 0; 351 parent->free_touched = 0;
@@ -390,7 +392,6 @@ struct kmem_cache {
390 392
391 size_t colour; /* cache colouring range */ 393 size_t colour; /* cache colouring range */
392 unsigned int colour_off; /* colour offset */ 394 unsigned int colour_off; /* colour offset */
393 unsigned int colour_next; /* cache colouring */
394 struct kmem_cache *slabp_cache; 395 struct kmem_cache *slabp_cache;
395 unsigned int slab_size; 396 unsigned int slab_size;
396 unsigned int dflags; /* dynamic flags */ 397 unsigned int dflags; /* dynamic flags */
@@ -883,14 +884,14 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
883 } 884 }
884} 885}
885 886
886static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3) 887static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
887{ 888{
888 int i = 0; 889 int i = 0;
889 struct array_cache *ac; 890 struct array_cache *ac;
890 unsigned long flags; 891 unsigned long flags;
891 892
892 for_each_online_node(i) { 893 for_each_online_node(i) {
893 ac = l3->alien[i]; 894 ac = alien[i];
894 if (ac) { 895 if (ac) {
895 spin_lock_irqsave(&ac->lock, flags); 896 spin_lock_irqsave(&ac->lock, flags);
896 __drain_alien_cache(cachep, ac, i); 897 __drain_alien_cache(cachep, ac, i);
@@ -899,9 +900,18 @@ static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
899 } 900 }
900} 901}
901#else 902#else
902#define alloc_alien_cache(node, limit) do { } while (0) 903
903#define free_alien_cache(ac_ptr) do { } while (0) 904#define drain_alien_cache(cachep, alien) do { } while (0)
904#define drain_alien_cache(cachep, l3) do { } while (0) 905
906static inline struct array_cache **alloc_alien_cache(int node, int limit)
907{
908 return (struct array_cache **) 0x01020304ul;
909}
910
911static inline void free_alien_cache(struct array_cache **ac_ptr)
912{
913}
914
905#endif 915#endif
906 916
907static int __devinit cpuup_callback(struct notifier_block *nfb, 917static int __devinit cpuup_callback(struct notifier_block *nfb,
@@ -935,6 +945,11 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
935 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 945 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
936 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 946 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
937 947
948 /*
949 * The l3s don't come and go as CPUs come and
950 * go. cache_chain_mutex is sufficient
951 * protection here.
952 */
938 cachep->nodelists[node] = l3; 953 cachep->nodelists[node] = l3;
939 } 954 }
940 955
@@ -949,26 +964,46 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
949 & array cache's */ 964 & array cache's */
950 list_for_each_entry(cachep, &cache_chain, next) { 965 list_for_each_entry(cachep, &cache_chain, next) {
951 struct array_cache *nc; 966 struct array_cache *nc;
967 struct array_cache *shared;
968 struct array_cache **alien;
952 969
953 nc = alloc_arraycache(node, cachep->limit, 970 nc = alloc_arraycache(node, cachep->limit,
954 cachep->batchcount); 971 cachep->batchcount);
955 if (!nc) 972 if (!nc)
956 goto bad; 973 goto bad;
974 shared = alloc_arraycache(node,
975 cachep->shared * cachep->batchcount,
976 0xbaadf00d);
977 if (!shared)
978 goto bad;
979
980 alien = alloc_alien_cache(node, cachep->limit);
981 if (!alien)
982 goto bad;
957 cachep->array[cpu] = nc; 983 cachep->array[cpu] = nc;
958 984
959 l3 = cachep->nodelists[node]; 985 l3 = cachep->nodelists[node];
960 BUG_ON(!l3); 986 BUG_ON(!l3);
961 if (!l3->shared) {
962 if (!(nc = alloc_arraycache(node,
963 cachep->shared *
964 cachep->batchcount,
965 0xbaadf00d)))
966 goto bad;
967 987
968 /* we are serialised from CPU_DEAD or 988 spin_lock_irq(&l3->list_lock);
969 CPU_UP_CANCELLED by the cpucontrol lock */ 989 if (!l3->shared) {
970 l3->shared = nc; 990 /*
991 * We are serialised from CPU_DEAD or
992 * CPU_UP_CANCELLED by the cpucontrol lock
993 */
994 l3->shared = shared;
995 shared = NULL;
996 }
997#ifdef CONFIG_NUMA
998 if (!l3->alien) {
999 l3->alien = alien;
1000 alien = NULL;
971 } 1001 }
1002#endif
1003 spin_unlock_irq(&l3->list_lock);
1004
1005 kfree(shared);
1006 free_alien_cache(alien);
972 } 1007 }
973 mutex_unlock(&cache_chain_mutex); 1008 mutex_unlock(&cache_chain_mutex);
974 break; 1009 break;
@@ -977,25 +1012,34 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
977 break; 1012 break;
978#ifdef CONFIG_HOTPLUG_CPU 1013#ifdef CONFIG_HOTPLUG_CPU
979 case CPU_DEAD: 1014 case CPU_DEAD:
1015 /*
1016 * Even if all the cpus of a node are down, we don't free the
1017 * kmem_list3 of any cache. This to avoid a race between
1018 * cpu_down, and a kmalloc allocation from another cpu for
1019 * memory from the node of the cpu going down. The list3
1020 * structure is usually allocated from kmem_cache_create() and
1021 * gets destroyed at kmem_cache_destroy().
1022 */
980 /* fall thru */ 1023 /* fall thru */
981 case CPU_UP_CANCELED: 1024 case CPU_UP_CANCELED:
982 mutex_lock(&cache_chain_mutex); 1025 mutex_lock(&cache_chain_mutex);
983 1026
984 list_for_each_entry(cachep, &cache_chain, next) { 1027 list_for_each_entry(cachep, &cache_chain, next) {
985 struct array_cache *nc; 1028 struct array_cache *nc;
1029 struct array_cache *shared;
1030 struct array_cache **alien;
986 cpumask_t mask; 1031 cpumask_t mask;
987 1032
988 mask = node_to_cpumask(node); 1033 mask = node_to_cpumask(node);
989 spin_lock_irq(&cachep->spinlock);
990 /* cpu is dead; no one can alloc from it. */ 1034 /* cpu is dead; no one can alloc from it. */
991 nc = cachep->array[cpu]; 1035 nc = cachep->array[cpu];
992 cachep->array[cpu] = NULL; 1036 cachep->array[cpu] = NULL;
993 l3 = cachep->nodelists[node]; 1037 l3 = cachep->nodelists[node];
994 1038
995 if (!l3) 1039 if (!l3)
996 goto unlock_cache; 1040 goto free_array_cache;
997 1041
998 spin_lock(&l3->list_lock); 1042 spin_lock_irq(&l3->list_lock);
999 1043
1000 /* Free limit for this kmem_list3 */ 1044 /* Free limit for this kmem_list3 */
1001 l3->free_limit -= cachep->batchcount; 1045 l3->free_limit -= cachep->batchcount;
@@ -1003,34 +1047,44 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
1003 free_block(cachep, nc->entry, nc->avail, node); 1047 free_block(cachep, nc->entry, nc->avail, node);
1004 1048
1005 if (!cpus_empty(mask)) { 1049 if (!cpus_empty(mask)) {
1006 spin_unlock(&l3->list_lock); 1050 spin_unlock_irq(&l3->list_lock);
1007 goto unlock_cache; 1051 goto free_array_cache;
1008 } 1052 }
1009 1053
1010 if (l3->shared) { 1054 shared = l3->shared;
1055 if (shared) {
1011 free_block(cachep, l3->shared->entry, 1056 free_block(cachep, l3->shared->entry,
1012 l3->shared->avail, node); 1057 l3->shared->avail, node);
1013 kfree(l3->shared);
1014 l3->shared = NULL; 1058 l3->shared = NULL;
1015 } 1059 }
1016 if (l3->alien) {
1017 drain_alien_cache(cachep, l3);
1018 free_alien_cache(l3->alien);
1019 l3->alien = NULL;
1020 }
1021 1060
1022 /* free slabs belonging to this node */ 1061 alien = l3->alien;
1023 if (__node_shrink(cachep, node)) { 1062 l3->alien = NULL;
1024 cachep->nodelists[node] = NULL; 1063
1025 spin_unlock(&l3->list_lock); 1064 spin_unlock_irq(&l3->list_lock);
1026 kfree(l3); 1065
1027 } else { 1066 kfree(shared);
1028 spin_unlock(&l3->list_lock); 1067 if (alien) {
1068 drain_alien_cache(cachep, alien);
1069 free_alien_cache(alien);
1029 } 1070 }
1030 unlock_cache: 1071free_array_cache:
1031 spin_unlock_irq(&cachep->spinlock);
1032 kfree(nc); 1072 kfree(nc);
1033 } 1073 }
1074 /*
1075 * In the previous loop, all the objects were freed to
1076 * the respective cache's slabs, now we can go ahead and
1077 * shrink each nodelist to its limit.
1078 */
1079 list_for_each_entry(cachep, &cache_chain, next) {
1080 l3 = cachep->nodelists[node];
1081 if (!l3)
1082 continue;
1083 spin_lock_irq(&l3->list_lock);
1084 /* free slabs belonging to this node */
1085 __node_shrink(cachep, node);
1086 spin_unlock_irq(&l3->list_lock);
1087 }
1034 mutex_unlock(&cache_chain_mutex); 1088 mutex_unlock(&cache_chain_mutex);
1035 break; 1089 break;
1036#endif 1090#endif
@@ -1119,7 +1173,6 @@ void __init kmem_cache_init(void)
1119 BUG(); 1173 BUG();
1120 1174
1121 cache_cache.colour = left_over / cache_cache.colour_off; 1175 cache_cache.colour = left_over / cache_cache.colour_off;
1122 cache_cache.colour_next = 0;
1123 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1176 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1124 sizeof(struct slab), cache_line_size()); 1177 sizeof(struct slab), cache_line_size());
1125 1178
@@ -2011,18 +2064,16 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2011 2064
2012 smp_call_function_all_cpus(do_drain, cachep); 2065 smp_call_function_all_cpus(do_drain, cachep);
2013 check_irq_on(); 2066 check_irq_on();
2014 spin_lock_irq(&cachep->spinlock);
2015 for_each_online_node(node) { 2067 for_each_online_node(node) {
2016 l3 = cachep->nodelists[node]; 2068 l3 = cachep->nodelists[node];
2017 if (l3) { 2069 if (l3) {
2018 spin_lock(&l3->list_lock); 2070 spin_lock_irq(&l3->list_lock);
2019 drain_array_locked(cachep, l3->shared, 1, node); 2071 drain_array_locked(cachep, l3->shared, 1, node);
2020 spin_unlock(&l3->list_lock); 2072 spin_unlock_irq(&l3->list_lock);
2021 if (l3->alien) 2073 if (l3->alien)
2022 drain_alien_cache(cachep, l3); 2074 drain_alien_cache(cachep, l3->alien);
2023 } 2075 }
2024 } 2076 }
2025 spin_unlock_irq(&cachep->spinlock);
2026} 2077}
2027 2078
2028static int __node_shrink(struct kmem_cache *cachep, int node) 2079static int __node_shrink(struct kmem_cache *cachep, int node)
@@ -2324,20 +2375,20 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2324 */ 2375 */
2325 ctor_flags |= SLAB_CTOR_ATOMIC; 2376 ctor_flags |= SLAB_CTOR_ATOMIC;
2326 2377
2327 /* About to mess with non-constant members - lock. */ 2378 /* Take the l3 list lock to change the colour_next on this node */
2328 check_irq_off(); 2379 check_irq_off();
2329 spin_lock(&cachep->spinlock); 2380 l3 = cachep->nodelists[nodeid];
2381 spin_lock(&l3->list_lock);
2330 2382
2331 /* Get colour for the slab, and cal the next value. */ 2383 /* Get colour for the slab, and cal the next value. */
2332 offset = cachep->colour_next; 2384 offset = l3->colour_next;
2333 cachep->colour_next++; 2385 l3->colour_next++;
2334 if (cachep->colour_next >= cachep->colour) 2386 if (l3->colour_next >= cachep->colour)
2335 cachep->colour_next = 0; 2387 l3->colour_next = 0;
2336 offset *= cachep->colour_off; 2388 spin_unlock(&l3->list_lock);
2337 2389
2338 spin_unlock(&cachep->spinlock); 2390 offset *= cachep->colour_off;
2339 2391
2340 check_irq_off();
2341 if (local_flags & __GFP_WAIT) 2392 if (local_flags & __GFP_WAIT)
2342 local_irq_enable(); 2393 local_irq_enable();
2343 2394
@@ -2367,7 +2418,6 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2367 if (local_flags & __GFP_WAIT) 2418 if (local_flags & __GFP_WAIT)
2368 local_irq_disable(); 2419 local_irq_disable();
2369 check_irq_off(); 2420 check_irq_off();
2370 l3 = cachep->nodelists[nodeid];
2371 spin_lock(&l3->list_lock); 2421 spin_lock(&l3->list_lock);
2372 2422
2373 /* Make slab active. */ 2423 /* Make slab active. */
@@ -2725,6 +2775,7 @@ static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node
2725 BUG_ON(!l3); 2775 BUG_ON(!l3);
2726 2776
2727 retry: 2777 retry:
2778 check_irq_off();
2728 spin_lock(&l3->list_lock); 2779 spin_lock(&l3->list_lock);
2729 entry = l3->slabs_partial.next; 2780 entry = l3->slabs_partial.next;
2730 if (entry == &l3->slabs_partial) { 2781 if (entry == &l3->slabs_partial) {
@@ -3304,11 +3355,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount
3304 smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); 3355 smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
3305 3356
3306 check_irq_on(); 3357 check_irq_on();
3307 spin_lock_irq(&cachep->spinlock); 3358 spin_lock(&cachep->spinlock);
3308 cachep->batchcount = batchcount; 3359 cachep->batchcount = batchcount;
3309 cachep->limit = limit; 3360 cachep->limit = limit;
3310 cachep->shared = shared; 3361 cachep->shared = shared;
3311 spin_unlock_irq(&cachep->spinlock); 3362 spin_unlock(&cachep->spinlock);
3312 3363
3313 for_each_online_cpu(i) { 3364 for_each_online_cpu(i) {
3314 struct array_cache *ccold = new.new[i]; 3365 struct array_cache *ccold = new.new[i];
@@ -3440,7 +3491,7 @@ static void cache_reap(void *unused)
3440 3491
3441 l3 = searchp->nodelists[numa_node_id()]; 3492 l3 = searchp->nodelists[numa_node_id()];
3442 if (l3->alien) 3493 if (l3->alien)
3443 drain_alien_cache(searchp, l3); 3494 drain_alien_cache(searchp, l3->alien);
3444 spin_lock_irq(&l3->list_lock); 3495 spin_lock_irq(&l3->list_lock);
3445 3496
3446 drain_array_locked(searchp, cpu_cache_get(searchp), 0, 3497 drain_array_locked(searchp, cpu_cache_get(searchp), 0,
@@ -3564,8 +3615,7 @@ static int s_show(struct seq_file *m, void *p)
3564 int node; 3615 int node;
3565 struct kmem_list3 *l3; 3616 struct kmem_list3 *l3;
3566 3617
3567 check_irq_on(); 3618 spin_lock(&cachep->spinlock);
3568 spin_lock_irq(&cachep->spinlock);
3569 active_objs = 0; 3619 active_objs = 0;
3570 num_slabs = 0; 3620 num_slabs = 0;
3571 for_each_online_node(node) { 3621 for_each_online_node(node) {
@@ -3573,7 +3623,8 @@ static int s_show(struct seq_file *m, void *p)
3573 if (!l3) 3623 if (!l3)
3574 continue; 3624 continue;
3575 3625
3576 spin_lock(&l3->list_lock); 3626 check_irq_on();
3627 spin_lock_irq(&l3->list_lock);
3577 3628
3578 list_for_each(q, &l3->slabs_full) { 3629 list_for_each(q, &l3->slabs_full) {
3579 slabp = list_entry(q, struct slab, list); 3630 slabp = list_entry(q, struct slab, list);
@@ -3598,9 +3649,10 @@ static int s_show(struct seq_file *m, void *p)
3598 num_slabs++; 3649 num_slabs++;
3599 } 3650 }
3600 free_objects += l3->free_objects; 3651 free_objects += l3->free_objects;
3601 shared_avail += l3->shared->avail; 3652 if (l3->shared)
3653 shared_avail += l3->shared->avail;
3602 3654
3603 spin_unlock(&l3->list_lock); 3655 spin_unlock_irq(&l3->list_lock);
3604 } 3656 }
3605 num_slabs += active_slabs; 3657 num_slabs += active_slabs;
3606 num_objs = num_slabs * cachep->num; 3658 num_objs = num_slabs * cachep->num;
@@ -3644,7 +3696,7 @@ static int s_show(struct seq_file *m, void *p)
3644 } 3696 }
3645#endif 3697#endif
3646 seq_putc(m, '\n'); 3698 seq_putc(m, '\n');
3647 spin_unlock_irq(&cachep->spinlock); 3699 spin_unlock(&cachep->spinlock);
3648 return 0; 3700 return 0;
3649} 3701}
3650 3702
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 4d638944d933..34e42968b477 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -59,8 +59,10 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
59 proto = find_snap_client(skb->h.raw); 59 proto = find_snap_client(skb->h.raw);
60 if (proto) { 60 if (proto) {
61 /* Pass the frame on. */ 61 /* Pass the frame on. */
62 u8 *hdr = skb->data;
62 skb->h.raw += 5; 63 skb->h.raw += 5;
63 skb_pull(skb, 5); 64 skb_pull(skb, 5);
65 skb_postpull_rcsum(skb, hdr, 5);
64 rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); 66 rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
65 } else { 67 } else {
66 skb->sk = NULL; 68 skb->sk = NULL;
diff --git a/net/Kconfig b/net/Kconfig
index bc603d9aea56..5126f58d9c44 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -27,6 +27,13 @@ if NET
27 27
28menu "Networking options" 28menu "Networking options"
29 29
30config NETDEBUG
31 bool "Network packet debugging"
32 help
33 You can say Y here if you want to get additional messages useful in
34 debugging bad packets, but can overwhelm logs under denial of service
35 attacks.
36
30source "net/packet/Kconfig" 37source "net/packet/Kconfig"
31source "net/unix/Kconfig" 38source "net/unix/Kconfig"
32source "net/xfrm/Kconfig" 39source "net/xfrm/Kconfig"
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ce617b3dbbb8..802baf755ef4 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -46,7 +46,7 @@
46#define PRINTR(format, args...) do { if (net_ratelimit()) \ 46#define PRINTR(format, args...) do { if (net_ratelimit()) \
47 printk(format , ## args); } while (0) 47 printk(format , ## args); } while (0)
48 48
49static unsigned int nlbufsiz = 4096; 49static unsigned int nlbufsiz = NLMSG_GOODSIZE;
50module_param(nlbufsiz, uint, 0600); 50module_param(nlbufsiz, uint, 0600);
51MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " 51MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
52 "(defaults to 4096)"); 52 "(defaults to 4096)");
@@ -98,12 +98,14 @@ static void ulog_timer(unsigned long data)
98static struct sk_buff *ulog_alloc_skb(unsigned int size) 98static struct sk_buff *ulog_alloc_skb(unsigned int size)
99{ 99{
100 struct sk_buff *skb; 100 struct sk_buff *skb;
101 unsigned int n;
101 102
102 skb = alloc_skb(nlbufsiz, GFP_ATOMIC); 103 n = max(size, nlbufsiz);
104 skb = alloc_skb(n, GFP_ATOMIC);
103 if (!skb) { 105 if (!skb) {
104 PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer " 106 PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer "
105 "of size %ub!\n", nlbufsiz); 107 "of size %ub!\n", n);
106 if (size < nlbufsiz) { 108 if (n > size) {
107 /* try to allocate only as much as we need for 109 /* try to allocate only as much as we need for
108 * current packet */ 110 * current packet */
109 skb = alloc_skb(size, GFP_ATOMIC); 111 skb = alloc_skb(size, GFP_ATOMIC);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 00729b3604f8..cbd4020cc84d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -934,6 +934,13 @@ static int do_replace(void __user *user, unsigned int len)
934 BUGPRINT("Entries_size never zero\n"); 934 BUGPRINT("Entries_size never zero\n");
935 return -EINVAL; 935 return -EINVAL;
936 } 936 }
937 /* overflow check */
938 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
939 SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
940 return -ENOMEM;
941 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
942 return -ENOMEM;
943
937 countersize = COUNTER_OFFSET(tmp.nentries) * 944 countersize = COUNTER_OFFSET(tmp.nentries) *
938 (highest_possible_processor_id()+1); 945 (highest_possible_processor_id()+1);
939 newinfo = (struct ebt_table_info *) 946 newinfo = (struct ebt_table_info *)
diff --git a/net/core/dev.c b/net/core/dev.c
index ffb82073056e..2afb0de95329 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3237,7 +3237,7 @@ static int __init net_dev_init(void)
3237 * Initialise the packet receive queues. 3237 * Initialise the packet receive queues.
3238 */ 3238 */
3239 3239
3240 for (i = 0; i < NR_CPUS; i++) { 3240 for_each_cpu(i) {
3241 struct softnet_data *queue; 3241 struct softnet_data *queue;
3242 3242
3243 queue = &per_cpu(softnet_data, i); 3243 queue = &per_cpu(softnet_data, i);
diff --git a/net/core/utils.c b/net/core/utils.c
index ac1d1fcf8673..fdc4f38bc46c 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -121,7 +121,7 @@ void __init net_random_init(void)
121{ 121{
122 int i; 122 int i;
123 123
124 for (i = 0; i < NR_CPUS; i++) { 124 for_each_cpu(i) {
125 struct nrnd_state *state = &per_cpu(net_rand_state,i); 125 struct nrnd_state *state = &per_cpu(net_rand_state,i);
126 __net_srandom(state, i+jiffies); 126 __net_srandom(state, i+jiffies);
127 } 127 }
@@ -133,7 +133,7 @@ static int net_random_reseed(void)
133 unsigned long seed[NR_CPUS]; 133 unsigned long seed[NR_CPUS];
134 134
135 get_random_bytes(seed, sizeof(seed)); 135 get_random_bytes(seed, sizeof(seed));
136 for (i = 0; i < NR_CPUS; i++) { 136 for_each_cpu(i) {
137 struct nrnd_state *state = &per_cpu(net_rand_state,i); 137 struct nrnd_state *state = &per_cpu(net_rand_state,i);
138 __net_srandom(state, seed[i]); 138 __net_srandom(state, seed[i]);
139 } 139 }
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 105039eb7629..4d1c40972a4b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -385,7 +385,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
385 u32 daddr; 385 u32 daddr;
386 386
387 if (ip_options_echo(&icmp_param->replyopts, skb)) 387 if (ip_options_echo(&icmp_param->replyopts, skb))
388 goto out; 388 return;
389 389
390 if (icmp_xmit_lock()) 390 if (icmp_xmit_lock())
391 return; 391 return;
@@ -416,7 +416,6 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
416 ip_rt_put(rt); 416 ip_rt_put(rt);
417out_unlock: 417out_unlock:
418 icmp_xmit_unlock(); 418 icmp_xmit_unlock();
419out:;
420} 419}
421 420
422 421
@@ -525,7 +524,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
525 iph->tos; 524 iph->tos;
526 525
527 if (ip_options_echo(&icmp_param.replyopts, skb_in)) 526 if (ip_options_echo(&icmp_param.replyopts, skb_in))
528 goto ende; 527 goto out_unlock;
529 528
530 529
531 /* 530 /*
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c
index d34a9fa608e0..342d0b9098f5 100644
--- a/net/ipv4/multipath_wrandom.c
+++ b/net/ipv4/multipath_wrandom.c
@@ -228,7 +228,7 @@ static void wrandom_set_nhinfo(__u32 network,
228 struct multipath_dest *d, *target_dest = NULL; 228 struct multipath_dest *d, *target_dest = NULL;
229 229
230 /* store the weight information for a certain route */ 230 /* store the weight information for a certain route */
231 spin_lock(&state[state_idx].lock); 231 spin_lock_bh(&state[state_idx].lock);
232 232
233 /* find state entry for gateway or add one if necessary */ 233 /* find state entry for gateway or add one if necessary */
234 list_for_each_entry_rcu(r, &state[state_idx].head, list) { 234 list_for_each_entry_rcu(r, &state[state_idx].head, list) {
@@ -276,7 +276,7 @@ static void wrandom_set_nhinfo(__u32 network,
276 * we are finished 276 * we are finished
277 */ 277 */
278 278
279 spin_unlock(&state[state_idx].lock); 279 spin_unlock_bh(&state[state_idx].lock);
280} 280}
281 281
282static void __multipath_free(struct rcu_head *head) 282static void __multipath_free(struct rcu_head *head)
@@ -302,7 +302,7 @@ static void wrandom_flush(void)
302 for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) { 302 for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) {
303 struct multipath_route *r; 303 struct multipath_route *r;
304 304
305 spin_lock(&state[i].lock); 305 spin_lock_bh(&state[i].lock);
306 list_for_each_entry_rcu(r, &state[i].head, list) { 306 list_for_each_entry_rcu(r, &state[i].head, list) {
307 struct multipath_dest *d; 307 struct multipath_dest *d;
308 list_for_each_entry_rcu(d, &r->dests, list) { 308 list_for_each_entry_rcu(d, &r->dests, list) {
@@ -315,7 +315,7 @@ static void wrandom_flush(void)
315 __multipath_free); 315 __multipath_free);
316 } 316 }
317 317
318 spin_unlock(&state[i].lock); 318 spin_unlock_bh(&state[i].lock);
319 } 319 }
320} 320}
321 321
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index afe3d8f8177d..dd1048be8a01 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -807,6 +807,13 @@ static int do_replace(void __user *user, unsigned int len)
807 if (len != sizeof(tmp) + tmp.size) 807 if (len != sizeof(tmp) + tmp.size)
808 return -ENOPROTOOPT; 808 return -ENOPROTOOPT;
809 809
810 /* overflow check */
811 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
812 SMP_CACHE_BYTES)
813 return -ENOMEM;
814 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
815 return -ENOMEM;
816
810 newinfo = xt_alloc_table_info(tmp.size); 817 newinfo = xt_alloc_table_info(tmp.size);
811 if (!newinfo) 818 if (!newinfo)
812 return -ENOMEM; 819 return -ENOMEM;
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index c9ebbe0d2d9c..e0b5926c76f9 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -1216,7 +1216,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1216 1216
1217 b = skb->tail; 1217 b = skb->tail;
1218 1218
1219 type |= NFNL_SUBSYS_CTNETLINK << 8; 1219 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1220 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); 1220 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
1221 nfmsg = NLMSG_DATA(nlh); 1221 nfmsg = NLMSG_DATA(nlh);
1222 1222
@@ -1567,6 +1567,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
1567}; 1567};
1568 1568
1569MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); 1569MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
1570MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
1570 1571
1571static int __init ctnetlink_init(void) 1572static int __init ctnetlink_init(void)
1572{ 1573{
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c
index d3c5a371f993..4ba4463cec28 100644
--- a/net/ipv4/netfilter/ip_conntrack_tftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_tftp.c
@@ -71,6 +71,7 @@ static int tftp_help(struct sk_buff **pskb,
71 71
72 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; 72 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
73 exp->mask.src.ip = 0xffffffff; 73 exp->mask.src.ip = 0xffffffff;
74 exp->mask.src.u.udp.port = 0;
74 exp->mask.dst.ip = 0xffffffff; 75 exp->mask.dst.ip = 0xffffffff;
75 exp->mask.dst.u.udp.port = 0xffff; 76 exp->mask.dst.u.udp.port = 0xffff;
76 exp->mask.dst.protonum = 0xff; 77 exp->mask.dst.protonum = 0xff;
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index ad438fb185b8..92c54999a19d 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -209,8 +209,8 @@ ip_nat_in(unsigned int hooknum,
209 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { 209 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
210 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 210 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
211 211
212 if (ct->tuplehash[dir].tuple.src.ip != 212 if (ct->tuplehash[dir].tuple.dst.ip !=
213 ct->tuplehash[!dir].tuple.dst.ip) { 213 ct->tuplehash[!dir].tuple.src.ip) {
214 dst_release((*pskb)->dst); 214 dst_release((*pskb)->dst);
215 (*pskb)->dst = NULL; 215 (*pskb)->dst = NULL;
216 } 216 }
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2371b2062c2d..16f47c675fef 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -921,6 +921,13 @@ do_replace(void __user *user, unsigned int len)
921 if (len != sizeof(tmp) + tmp.size) 921 if (len != sizeof(tmp) + tmp.size)
922 return -ENOPROTOOPT; 922 return -ENOPROTOOPT;
923 923
924 /* overflow check */
925 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
926 SMP_CACHE_BYTES)
927 return -ENOMEM;
928 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
929 return -ENOMEM;
930
924 newinfo = xt_alloc_table_info(tmp.size); 931 newinfo = xt_alloc_table_info(tmp.size);
925 if (!newinfo) 932 if (!newinfo)
926 return -ENOMEM; 933 return -ENOMEM;
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 641dbc477650..180a9ea57b69 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -35,6 +35,10 @@
35 * each nlgroup you are using, so the total kernel memory usage increases 35 * each nlgroup you are using, so the total kernel memory usage increases
36 * by that factor. 36 * by that factor.
37 * 37 *
38 * Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since
39 * nlbufsiz is used with alloc_skb, which adds another
40 * sizeof(struct skb_shared_info). Use NLMSG_GOODSIZE instead.
41 *
38 * flushtimeout: 42 * flushtimeout:
39 * Specify, after how many hundredths of a second the queue should be 43 * Specify, after how many hundredths of a second the queue should be
40 * flushed even if it is not full yet. 44 * flushed even if it is not full yet.
@@ -76,7 +80,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
76 80
77#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) 81#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0)
78 82
79static unsigned int nlbufsiz = 4096; 83static unsigned int nlbufsiz = NLMSG_GOODSIZE;
80module_param(nlbufsiz, uint, 0400); 84module_param(nlbufsiz, uint, 0400);
81MODULE_PARM_DESC(nlbufsiz, "netlink buffer size"); 85MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
82 86
@@ -143,22 +147,26 @@ static void ulog_timer(unsigned long data)
143static struct sk_buff *ulog_alloc_skb(unsigned int size) 147static struct sk_buff *ulog_alloc_skb(unsigned int size)
144{ 148{
145 struct sk_buff *skb; 149 struct sk_buff *skb;
150 unsigned int n;
146 151
147 /* alloc skb which should be big enough for a whole 152 /* alloc skb which should be big enough for a whole
148 * multipart message. WARNING: has to be <= 131000 153 * multipart message. WARNING: has to be <= 131000
149 * due to slab allocator restrictions */ 154 * due to slab allocator restrictions */
150 155
151 skb = alloc_skb(nlbufsiz, GFP_ATOMIC); 156 n = max(size, nlbufsiz);
157 skb = alloc_skb(n, GFP_ATOMIC);
152 if (!skb) { 158 if (!skb) {
153 PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", 159 PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n);
154 nlbufsiz);
155 160
156 /* try to allocate only as much as we need for 161 if (n > size) {
157 * current packet */ 162 /* try to allocate only as much as we need for
163 * current packet */
158 164
159 skb = alloc_skb(size, GFP_ATOMIC); 165 skb = alloc_skb(size, GFP_ATOMIC);
160 if (!skb) 166 if (!skb)
161 PRINTR("ipt_ULOG: can't even allocate %ub\n", size); 167 PRINTR("ipt_ULOG: can't even allocate %ub\n",
168 size);
169 }
162 } 170 }
163 171
164 return skb; 172 return skb;
diff --git a/net/ipv4/netfilter/ipt_policy.c b/net/ipv4/netfilter/ipt_policy.c
index 18ca8258a1c5..5a7a265280f9 100644
--- a/net/ipv4/netfilter/ipt_policy.c
+++ b/net/ipv4/netfilter/ipt_policy.c
@@ -26,10 +26,13 @@ MODULE_LICENSE("GPL");
26static inline int 26static inline int
27match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e) 27match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e)
28{ 28{
29#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) 29#define MATCH_ADDR(x,y,z) (!e->match.x || \
30 ((e->x.a4.s_addr == (e->y.a4.s_addr & (z))) \
31 ^ e->invert.x))
32#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
30 33
31 return MATCH(saddr, x->props.saddr.a4 & e->smask) && 34 return MATCH_ADDR(saddr, smask, x->props.saddr.a4) &&
32 MATCH(daddr, x->id.daddr.a4 & e->dmask) && 35 MATCH_ADDR(daddr, dmask, x->id.daddr.a4) &&
33 MATCH(proto, x->id.proto) && 36 MATCH(proto, x->id.proto) &&
34 MATCH(mode, x->props.mode) && 37 MATCH(mode, x->props.mode) &&
35 MATCH(spi, x->id.spi) && 38 MATCH(spi, x->id.spi) &&
@@ -89,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info)
89 return 0; 92 return 0;
90 } 93 }
91 94
92 return strict ? 1 : 0; 95 return strict ? i == info->len : 0;
93} 96}
94 97
95static int match(const struct sk_buff *skb, 98static int match(const struct sk_buff *skb,
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 39d49dc333a7..1b167c4bb3be 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
49 int res = 0; 49 int res = 0;
50 int cpu; 50 int cpu;
51 51
52 for (cpu = 0; cpu < NR_CPUS; cpu++) 52 for_each_cpu(cpu)
53 res += proto->stats[cpu].inuse; 53 res += proto->stats[cpu].inuse;
54 54
55 return res; 55 return res;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d328d5986143..1db50487916b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3321,9 +3321,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
3321 3321
3322 switch (event) { 3322 switch (event) {
3323 case RTM_NEWADDR: 3323 case RTM_NEWADDR:
3324 dst_hold(&ifp->rt->u.dst); 3324 ip6_ins_rt(ifp->rt, NULL, NULL, NULL);
3325 if (ip6_ins_rt(ifp->rt, NULL, NULL, NULL))
3326 dst_release(&ifp->rt->u.dst);
3327 if (ifp->idev->cnf.forwarding) 3325 if (ifp->idev->cnf.forwarding)
3328 addrconf_join_anycast(ifp); 3326 addrconf_join_anycast(ifp);
3329 break; 3327 break;
@@ -3334,8 +3332,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
3334 dst_hold(&ifp->rt->u.dst); 3332 dst_hold(&ifp->rt->u.dst);
3335 if (ip6_del_rt(ifp->rt, NULL, NULL, NULL)) 3333 if (ip6_del_rt(ifp->rt, NULL, NULL, NULL))
3336 dst_free(&ifp->rt->u.dst); 3334 dst_free(&ifp->rt->u.dst);
3337 else
3338 dst_release(&ifp->rt->u.dst);
3339 break; 3335 break;
3340 } 3336 }
3341} 3337}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 064ffab82a9f..6c9711ac1c03 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -369,12 +369,6 @@ int inet6_destroy_sock(struct sock *sk)
369 struct sk_buff *skb; 369 struct sk_buff *skb;
370 struct ipv6_txoptions *opt; 370 struct ipv6_txoptions *opt;
371 371
372 /*
373 * Release destination entry
374 */
375
376 sk_dst_reset(sk);
377
378 /* Release rx options */ 372 /* Release rx options */
379 373
380 if ((skb = xchg(&np->pktoptions, NULL)) != NULL) 374 if ((skb = xchg(&np->pktoptions, NULL)) != NULL)
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 847068fd3367..74ff56c322f4 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -978,6 +978,13 @@ do_replace(void __user *user, unsigned int len)
978 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 978 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
979 return -EFAULT; 979 return -EFAULT;
980 980
981 /* overflow check */
982 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
983 SMP_CACHE_BYTES)
984 return -ENOMEM;
985 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
986 return -ENOMEM;
987
981 newinfo = xt_alloc_table_info(tmp.size); 988 newinfo = xt_alloc_table_info(tmp.size);
982 if (!newinfo) 989 if (!newinfo)
983 return -ENOMEM; 990 return -ENOMEM;
diff --git a/net/ipv6/netfilter/ip6t_policy.c b/net/ipv6/netfilter/ip6t_policy.c
index afe1cc4c18a5..3d39ec924041 100644
--- a/net/ipv6/netfilter/ip6t_policy.c
+++ b/net/ipv6/netfilter/ip6t_policy.c
@@ -26,8 +26,9 @@ MODULE_LICENSE("GPL");
26static inline int 26static inline int
27match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e) 27match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e)
28{ 28{
29#define MATCH_ADDR(x,y,z) (!e->match.x || \ 29#define MATCH_ADDR(x,y,z) (!e->match.x || \
30 ((ip6_masked_addrcmp((z), &e->x, &e->y)) == 0) ^ e->invert.x) 30 ((!ip6_masked_addrcmp(&e->x.a6, &e->y.a6, z)) \
31 ^ e->invert.x))
31#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) 32#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
32 33
33 return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) && 34 return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) &&
@@ -91,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ip6t_policy_info *info)
91 return 0; 92 return 0;
92 } 93 }
93 94
94 return strict ? 1 : 0; 95 return strict ? i == info->len : 0;
95} 96}
96 97
97static int match(const struct sk_buff *skb, 98static int match(const struct sk_buff *skb,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 50a13e75d70e..4238b1ed8860 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
38 int res = 0; 38 int res = 0;
39 int cpu; 39 int cpu;
40 40
41 for (cpu=0; cpu<NR_CPUS; cpu++) 41 for_each_cpu(cpu)
42 res += proto->stats[cpu].inuse; 42 res += proto->stats[cpu].inuse;
43 43
44 return res; 44 return res;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 62bb509f05d4..0ce337a1d974 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -188,7 +188,7 @@ extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
188struct nf_conntrack_protocol * 188struct nf_conntrack_protocol *
189__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol) 189__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol)
190{ 190{
191 if (unlikely(nf_ct_protos[l3proto] == NULL)) 191 if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
192 return &nf_conntrack_generic_protocol; 192 return &nf_conntrack_generic_protocol;
193 193
194 return nf_ct_protos[l3proto][protocol]; 194 return nf_ct_protos[l3proto][protocol];
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index ab0c920f0d30..6f210f399762 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -657,8 +657,6 @@ static int __init init(void)
657 /* FIXME should be configurable whether IPv4 and IPv6 FTP connections 657 /* FIXME should be configurable whether IPv4 and IPv6 FTP connections
658 are tracked or not - YK */ 658 are tracked or not - YK */
659 for (i = 0; i < ports_c; i++) { 659 for (i = 0; i < ports_c; i++) {
660 memset(&ftp[i], 0, sizeof(struct nf_conntrack_helper));
661
662 ftp[i][0].tuple.src.l3num = PF_INET; 660 ftp[i][0].tuple.src.l3num = PF_INET;
663 ftp[i][1].tuple.src.l3num = PF_INET6; 661 ftp[i][1].tuple.src.l3num = PF_INET6;
664 for (j = 0; j < 2; j++) { 662 for (j = 0; j < 2; j++) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 73ab16bc7d40..9ff3463037e1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1232,7 +1232,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1232 1232
1233 b = skb->tail; 1233 b = skb->tail;
1234 1234
1235 type |= NFNL_SUBSYS_CTNETLINK << 8; 1235 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1236 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); 1236 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
1237 nfmsg = NLMSG_DATA(nlh); 1237 nfmsg = NLMSG_DATA(nlh);
1238 1238
@@ -1589,6 +1589,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
1589}; 1589};
1590 1590
1591MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); 1591MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
1592MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
1592 1593
1593static int __init ctnetlink_init(void) 1594static int __init ctnetlink_init(void)
1594{ 1595{
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e10512e229b6..3b3c781b40c0 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -37,7 +37,7 @@
37#include "../bridge/br_private.h" 37#include "../bridge/br_private.h"
38#endif 38#endif
39 39
40#define NFULNL_NLBUFSIZ_DEFAULT 4096 40#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
41#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ 41#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
42#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ 42#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
43 43
@@ -314,24 +314,28 @@ static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size,
314 unsigned int pkt_size) 314 unsigned int pkt_size)
315{ 315{
316 struct sk_buff *skb; 316 struct sk_buff *skb;
317 unsigned int n;
317 318
318 UDEBUG("entered (%u, %u)\n", inst_size, pkt_size); 319 UDEBUG("entered (%u, %u)\n", inst_size, pkt_size);
319 320
320 /* alloc skb which should be big enough for a whole multipart 321 /* alloc skb which should be big enough for a whole multipart
321 * message. WARNING: has to be <= 128k due to slab restrictions */ 322 * message. WARNING: has to be <= 128k due to slab restrictions */
322 323
323 skb = alloc_skb(inst_size, GFP_ATOMIC); 324 n = max(inst_size, pkt_size);
325 skb = alloc_skb(n, GFP_ATOMIC);
324 if (!skb) { 326 if (!skb) {
325 PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", 327 PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
326 inst_size); 328 inst_size);
327 329
328 /* try to allocate only as much as we need for current 330 if (n > pkt_size) {
329 * packet */ 331 /* try to allocate only as much as we need for current
332 * packet */
330 333
331 skb = alloc_skb(pkt_size, GFP_ATOMIC); 334 skb = alloc_skb(pkt_size, GFP_ATOMIC);
332 if (!skb) 335 if (!skb)
333 PRINTR("nfnetlink_log: can't even alloc %u bytes\n", 336 PRINTR("nfnetlink_log: can't even alloc %u "
334 pkt_size); 337 "bytes\n", pkt_size);
338 }
335 } 339 }
336 340
337 return skb; 341 return skb;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 18ed9c5d209c..cac38b2e147a 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -825,7 +825,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
825 } 825 }
826 826
827 if (nfqa[NFQA_MARK-1]) 827 if (nfqa[NFQA_MARK-1])
828 skb->nfmark = ntohl(*(u_int32_t *)NFA_DATA(nfqa[NFQA_MARK-1])); 828 entry->skb->nfmark = ntohl(*(u_int32_t *)
829 NFA_DATA(nfqa[NFQA_MARK-1]));
829 830
830 issue_verdict(entry, verdict); 831 issue_verdict(entry, verdict);
831 instance_put(queue); 832 instance_put(queue);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a40991ef72c9..437cba7260a4 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -608,7 +608,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
608 * When a Fast Retransmit is being performed the sender SHOULD 608 * When a Fast Retransmit is being performed the sender SHOULD
609 * ignore the value of cwnd and SHOULD NOT delay retransmission. 609 * ignore the value of cwnd and SHOULD NOT delay retransmission.
610 */ 610 */
611 if (!chunk->fast_retransmit) 611 if (chunk->fast_retransmit <= 0)
612 if (transport->flight_size >= transport->cwnd) { 612 if (transport->flight_size >= transport->cwnd) {
613 retval = SCTP_XMIT_RWND_FULL; 613 retval = SCTP_XMIT_RWND_FULL;
614 goto finish; 614 goto finish;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index efb72faba20c..f148f9576dd2 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -406,7 +406,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
406 * chunks that are not yet acked should be added to the 406 * chunks that are not yet acked should be added to the
407 * retransmit queue. 407 * retransmit queue.
408 */ 408 */
409 if ((fast_retransmit && chunk->fast_retransmit) || 409 if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
410 (!fast_retransmit && !chunk->tsn_gap_acked)) { 410 (!fast_retransmit && !chunk->tsn_gap_acked)) {
411 /* RFC 2960 6.2.1 Processing a Received SACK 411 /* RFC 2960 6.2.1 Processing a Received SACK
412 * 412 *
@@ -603,7 +603,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
603 /* Mark the chunk as ineligible for fast retransmit 603 /* Mark the chunk as ineligible for fast retransmit
604 * after it is retransmitted. 604 * after it is retransmitted.
605 */ 605 */
606 chunk->fast_retransmit = 0; 606 if (chunk->fast_retransmit > 0)
607 chunk->fast_retransmit = -1;
607 608
608 *start_timer = 1; 609 *start_timer = 1;
609 q->empty = 0; 610 q->empty = 0;
@@ -621,7 +622,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
621 list_for_each(lchunk1, lqueue) { 622 list_for_each(lchunk1, lqueue) {
622 chunk1 = list_entry(lchunk1, struct sctp_chunk, 623 chunk1 = list_entry(lchunk1, struct sctp_chunk,
623 transmitted_list); 624 transmitted_list);
624 chunk1->fast_retransmit = 0; 625 if (chunk1->fast_retransmit > 0)
626 chunk1->fast_retransmit = -1;
625 } 627 }
626 } 628 }
627 } 629 }
@@ -1562,11 +1564,11 @@ static void sctp_mark_missing(struct sctp_outq *q,
1562 /* 1564 /*
1563 * M4) If any DATA chunk is found to have a 1565 * M4) If any DATA chunk is found to have a
1564 * 'TSN.Missing.Report' 1566 * 'TSN.Missing.Report'
1565 * value larger than or equal to 4, mark that chunk for 1567 * value larger than or equal to 3, mark that chunk for
1566 * retransmission and start the fast retransmit procedure. 1568 * retransmission and start the fast retransmit procedure.
1567 */ 1569 */
1568 1570
1569 if (chunk->tsn_missing_report >= 4) { 1571 if (chunk->tsn_missing_report >= 3) {
1570 chunk->fast_retransmit = 1; 1572 chunk->fast_retransmit = 1;
1571 do_fast_retransmit = 1; 1573 do_fast_retransmit = 1;
1572 } 1574 }
diff --git a/net/socket.c b/net/socket.c
index b38a263853c3..a00851f981db 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq)
2078 int cpu; 2078 int cpu;
2079 int counter = 0; 2079 int counter = 0;
2080 2080
2081 for (cpu = 0; cpu < NR_CPUS; cpu++) 2081 for_each_cpu(cpu)
2082 counter += per_cpu(sockets_in_use, cpu); 2082 counter += per_cpu(sockets_in_use, cpu);
2083 2083
2084 /* It can be negative, by the way. 8) */ 2084 /* It can be negative, by the way. 8) */
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 9ac1b8c26c01..8d6f1a176b15 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -184,7 +184,7 @@ rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free)
184 */ 184 */
185struct rpc_cred * 185struct rpc_cred *
186rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, 186rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
187 int taskflags) 187 int flags)
188{ 188{
189 struct rpc_cred_cache *cache = auth->au_credcache; 189 struct rpc_cred_cache *cache = auth->au_credcache;
190 HLIST_HEAD(free); 190 HLIST_HEAD(free);
@@ -193,7 +193,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
193 *cred = NULL; 193 *cred = NULL;
194 int nr = 0; 194 int nr = 0;
195 195
196 if (!(taskflags & RPC_TASK_ROOTCREDS)) 196 if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS))
197 nr = acred->uid & RPC_CREDCACHE_MASK; 197 nr = acred->uid & RPC_CREDCACHE_MASK;
198retry: 198retry:
199 spin_lock(&rpc_credcache_lock); 199 spin_lock(&rpc_credcache_lock);
@@ -202,7 +202,7 @@ retry:
202 hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { 202 hlist_for_each_safe(pos, next, &cache->hashtable[nr]) {
203 struct rpc_cred *entry; 203 struct rpc_cred *entry;
204 entry = hlist_entry(pos, struct rpc_cred, cr_hash); 204 entry = hlist_entry(pos, struct rpc_cred, cr_hash);
205 if (entry->cr_ops->crmatch(acred, entry, taskflags)) { 205 if (entry->cr_ops->crmatch(acred, entry, flags)) {
206 hlist_del(&entry->cr_hash); 206 hlist_del(&entry->cr_hash);
207 cred = entry; 207 cred = entry;
208 break; 208 break;
@@ -224,7 +224,7 @@ retry:
224 rpcauth_destroy_credlist(&free); 224 rpcauth_destroy_credlist(&free);
225 225
226 if (!cred) { 226 if (!cred) {
227 new = auth->au_ops->crcreate(auth, acred, taskflags); 227 new = auth->au_ops->crcreate(auth, acred, flags);
228 if (!IS_ERR(new)) { 228 if (!IS_ERR(new)) {
229#ifdef RPC_DEBUG 229#ifdef RPC_DEBUG
230 new->cr_magic = RPCAUTH_CRED_MAGIC; 230 new->cr_magic = RPCAUTH_CRED_MAGIC;
@@ -232,13 +232,21 @@ retry:
232 goto retry; 232 goto retry;
233 } else 233 } else
234 cred = new; 234 cred = new;
235 } else if ((cred->cr_flags & RPCAUTH_CRED_NEW)
236 && cred->cr_ops->cr_init != NULL
237 && !(flags & RPCAUTH_LOOKUP_NEW)) {
238 int res = cred->cr_ops->cr_init(auth, cred);
239 if (res < 0) {
240 put_rpccred(cred);
241 cred = ERR_PTR(res);
242 }
235 } 243 }
236 244
237 return (struct rpc_cred *) cred; 245 return (struct rpc_cred *) cred;
238} 246}
239 247
240struct rpc_cred * 248struct rpc_cred *
241rpcauth_lookupcred(struct rpc_auth *auth, int taskflags) 249rpcauth_lookupcred(struct rpc_auth *auth, int flags)
242{ 250{
243 struct auth_cred acred = { 251 struct auth_cred acred = {
244 .uid = current->fsuid, 252 .uid = current->fsuid,
@@ -250,7 +258,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int taskflags)
250 dprintk("RPC: looking up %s cred\n", 258 dprintk("RPC: looking up %s cred\n",
251 auth->au_ops->au_name); 259 auth->au_ops->au_name);
252 get_group_info(acred.group_info); 260 get_group_info(acred.group_info);
253 ret = auth->au_ops->lookup_cred(auth, &acred, taskflags); 261 ret = auth->au_ops->lookup_cred(auth, &acred, flags);
254 put_group_info(acred.group_info); 262 put_group_info(acred.group_info);
255 return ret; 263 return ret;
256} 264}
@@ -265,11 +273,14 @@ rpcauth_bindcred(struct rpc_task *task)
265 .group_info = current->group_info, 273 .group_info = current->group_info,
266 }; 274 };
267 struct rpc_cred *ret; 275 struct rpc_cred *ret;
276 int flags = 0;
268 277
269 dprintk("RPC: %4d looking up %s cred\n", 278 dprintk("RPC: %4d looking up %s cred\n",
270 task->tk_pid, task->tk_auth->au_ops->au_name); 279 task->tk_pid, task->tk_auth->au_ops->au_name);
271 get_group_info(acred.group_info); 280 get_group_info(acred.group_info);
272 ret = auth->au_ops->lookup_cred(auth, &acred, task->tk_flags); 281 if (task->tk_flags & RPC_TASK_ROOTCREDS)
282 flags |= RPCAUTH_LOOKUP_ROOTCREDS;
283 ret = auth->au_ops->lookup_cred(auth, &acred, flags);
273 if (!IS_ERR(ret)) 284 if (!IS_ERR(ret))
274 task->tk_msg.rpc_cred = ret; 285 task->tk_msg.rpc_cred = ret;
275 else 286 else
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 8d782282ec19..bb46efd92e57 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -158,6 +158,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
158 old = gss_cred->gc_ctx; 158 old = gss_cred->gc_ctx;
159 gss_cred->gc_ctx = ctx; 159 gss_cred->gc_ctx = ctx;
160 cred->cr_flags |= RPCAUTH_CRED_UPTODATE; 160 cred->cr_flags |= RPCAUTH_CRED_UPTODATE;
161 cred->cr_flags &= ~RPCAUTH_CRED_NEW;
161 write_unlock(&gss_ctx_lock); 162 write_unlock(&gss_ctx_lock);
162 if (old) 163 if (old)
163 gss_put_ctx(old); 164 gss_put_ctx(old);
@@ -580,7 +581,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
580 } else { 581 } else {
581 struct auth_cred acred = { .uid = uid }; 582 struct auth_cred acred = { .uid = uid };
582 spin_unlock(&gss_auth->lock); 583 spin_unlock(&gss_auth->lock);
583 cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, 0); 584 cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW);
584 if (IS_ERR(cred)) { 585 if (IS_ERR(cred)) {
585 err = PTR_ERR(cred); 586 err = PTR_ERR(cred);
586 goto err_put_ctx; 587 goto err_put_ctx;
@@ -758,13 +759,13 @@ gss_destroy_cred(struct rpc_cred *rc)
758 * Lookup RPCSEC_GSS cred for the current process 759 * Lookup RPCSEC_GSS cred for the current process
759 */ 760 */
760static struct rpc_cred * 761static struct rpc_cred *
761gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) 762gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
762{ 763{
763 return rpcauth_lookup_credcache(auth, acred, taskflags); 764 return rpcauth_lookup_credcache(auth, acred, flags);
764} 765}
765 766
766static struct rpc_cred * 767static struct rpc_cred *
767gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) 768gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
768{ 769{
769 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 770 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
770 struct gss_cred *cred = NULL; 771 struct gss_cred *cred = NULL;
@@ -785,13 +786,8 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags)
785 */ 786 */
786 cred->gc_flags = 0; 787 cred->gc_flags = 0;
787 cred->gc_base.cr_ops = &gss_credops; 788 cred->gc_base.cr_ops = &gss_credops;
789 cred->gc_base.cr_flags = RPCAUTH_CRED_NEW;
788 cred->gc_service = gss_auth->service; 790 cred->gc_service = gss_auth->service;
789 do {
790 err = gss_create_upcall(gss_auth, cred);
791 } while (err == -EAGAIN);
792 if (err < 0)
793 goto out_err;
794
795 return &cred->gc_base; 791 return &cred->gc_base;
796 792
797out_err: 793out_err:
@@ -801,13 +797,34 @@ out_err:
801} 797}
802 798
803static int 799static int
804gss_match(struct auth_cred *acred, struct rpc_cred *rc, int taskflags) 800gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
801{
802 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
803 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
804 int err;
805
806 do {
807 err = gss_create_upcall(gss_auth, gss_cred);
808 } while (err == -EAGAIN);
809 return err;
810}
811
812static int
813gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
805{ 814{
806 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 815 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
807 816
817 /*
818 * If the searchflags have set RPCAUTH_LOOKUP_NEW, then
819 * we don't really care if the credential has expired or not,
820 * since the caller should be prepared to reinitialise it.
821 */
822 if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW))
823 goto out;
808 /* Don't match with creds that have expired. */ 824 /* Don't match with creds that have expired. */
809 if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) 825 if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
810 return 0; 826 return 0;
827out:
811 return (rc->cr_uid == acred->uid); 828 return (rc->cr_uid == acred->uid);
812} 829}
813 830
@@ -1241,6 +1258,7 @@ static struct rpc_authops authgss_ops = {
1241static struct rpc_credops gss_credops = { 1258static struct rpc_credops gss_credops = {
1242 .cr_name = "AUTH_GSS", 1259 .cr_name = "AUTH_GSS",
1243 .crdestroy = gss_destroy_cred, 1260 .crdestroy = gss_destroy_cred,
1261 .cr_init = gss_cred_init,
1244 .crmatch = gss_match, 1262 .crmatch = gss_match,
1245 .crmarshal = gss_marshal, 1263 .crmarshal = gss_marshal,
1246 .crrefresh = gss_refresh, 1264 .crrefresh = gss_refresh,
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 1b3ed4fd1987..df14b6bfbf10 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -75,7 +75,7 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
75 75
76 atomic_set(&cred->uc_count, 1); 76 atomic_set(&cred->uc_count, 1);
77 cred->uc_flags = RPCAUTH_CRED_UPTODATE; 77 cred->uc_flags = RPCAUTH_CRED_UPTODATE;
78 if (flags & RPC_TASK_ROOTCREDS) { 78 if (flags & RPCAUTH_LOOKUP_ROOTCREDS) {
79 cred->uc_uid = 0; 79 cred->uc_uid = 0;
80 cred->uc_gid = 0; 80 cred->uc_gid = 0;
81 cred->uc_gids[0] = NOGROUP; 81 cred->uc_gids[0] = NOGROUP;
@@ -108,12 +108,12 @@ unx_destroy_cred(struct rpc_cred *cred)
108 * request root creds (e.g. for NFS swapping). 108 * request root creds (e.g. for NFS swapping).
109 */ 109 */
110static int 110static int
111unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int taskflags) 111unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
112{ 112{
113 struct unx_cred *cred = (struct unx_cred *) rcred; 113 struct unx_cred *cred = (struct unx_cred *) rcred;
114 int i; 114 int i;
115 115
116 if (!(taskflags & RPC_TASK_ROOTCREDS)) { 116 if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) {
117 int groups; 117 int groups;
118 118
119 if (cred->uc_uid != acred->uid 119 if (cred->uc_uid != acred->uid
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9764c80ab0b2..a5c0c7b6e151 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -38,44 +38,42 @@ static kmem_cache_t *rpc_inode_cachep __read_mostly;
38 38
39#define RPC_UPCALL_TIMEOUT (30*HZ) 39#define RPC_UPCALL_TIMEOUT (30*HZ)
40 40
41static void 41static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
42__rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, int err) 42 void (*destroy_msg)(struct rpc_pipe_msg *), int err)
43{ 43{
44 struct rpc_pipe_msg *msg; 44 struct rpc_pipe_msg *msg;
45 void (*destroy_msg)(struct rpc_pipe_msg *);
46 45
47 destroy_msg = rpci->ops->destroy_msg; 46 if (list_empty(head))
48 while (!list_empty(head)) { 47 return;
48 do {
49 msg = list_entry(head->next, struct rpc_pipe_msg, list); 49 msg = list_entry(head->next, struct rpc_pipe_msg, list);
50 list_del_init(&msg->list); 50 list_del(&msg->list);
51 msg->errno = err; 51 msg->errno = err;
52 destroy_msg(msg); 52 destroy_msg(msg);
53 } 53 } while (!list_empty(head));
54}
55
56static void
57__rpc_purge_upcall(struct inode *inode, int err)
58{
59 struct rpc_inode *rpci = RPC_I(inode);
60
61 __rpc_purge_list(rpci, &rpci->pipe, err);
62 rpci->pipelen = 0;
63 wake_up(&rpci->waitq); 54 wake_up(&rpci->waitq);
64} 55}
65 56
66static void 57static void
67rpc_timeout_upcall_queue(void *data) 58rpc_timeout_upcall_queue(void *data)
68{ 59{
60 LIST_HEAD(free_list);
69 struct rpc_inode *rpci = (struct rpc_inode *)data; 61 struct rpc_inode *rpci = (struct rpc_inode *)data;
70 struct inode *inode = &rpci->vfs_inode; 62 struct inode *inode = &rpci->vfs_inode;
63 void (*destroy_msg)(struct rpc_pipe_msg *);
71 64
72 mutex_lock(&inode->i_mutex); 65 spin_lock(&inode->i_lock);
73 if (rpci->ops == NULL) 66 if (rpci->ops == NULL) {
74 goto out; 67 spin_unlock(&inode->i_lock);
75 if (rpci->nreaders == 0 && !list_empty(&rpci->pipe)) 68 return;
76 __rpc_purge_upcall(inode, -ETIMEDOUT); 69 }
77out: 70 destroy_msg = rpci->ops->destroy_msg;
78 mutex_unlock(&inode->i_mutex); 71 if (rpci->nreaders == 0) {
72 list_splice_init(&rpci->pipe, &free_list);
73 rpci->pipelen = 0;
74 }
75 spin_unlock(&inode->i_lock);
76 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
79} 77}
80 78
81int 79int
@@ -84,7 +82,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
84 struct rpc_inode *rpci = RPC_I(inode); 82 struct rpc_inode *rpci = RPC_I(inode);
85 int res = -EPIPE; 83 int res = -EPIPE;
86 84
87 mutex_lock(&inode->i_mutex); 85 spin_lock(&inode->i_lock);
88 if (rpci->ops == NULL) 86 if (rpci->ops == NULL)
89 goto out; 87 goto out;
90 if (rpci->nreaders) { 88 if (rpci->nreaders) {
@@ -100,7 +98,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
100 res = 0; 98 res = 0;
101 } 99 }
102out: 100out:
103 mutex_unlock(&inode->i_mutex); 101 spin_unlock(&inode->i_lock);
104 wake_up(&rpci->waitq); 102 wake_up(&rpci->waitq);
105 return res; 103 return res;
106} 104}
@@ -115,21 +113,29 @@ static void
115rpc_close_pipes(struct inode *inode) 113rpc_close_pipes(struct inode *inode)
116{ 114{
117 struct rpc_inode *rpci = RPC_I(inode); 115 struct rpc_inode *rpci = RPC_I(inode);
116 struct rpc_pipe_ops *ops;
118 117
119 mutex_lock(&inode->i_mutex); 118 mutex_lock(&inode->i_mutex);
120 if (rpci->ops != NULL) { 119 ops = rpci->ops;
120 if (ops != NULL) {
121 LIST_HEAD(free_list);
122
123 spin_lock(&inode->i_lock);
121 rpci->nreaders = 0; 124 rpci->nreaders = 0;
122 __rpc_purge_list(rpci, &rpci->in_upcall, -EPIPE); 125 list_splice_init(&rpci->in_upcall, &free_list);
123 __rpc_purge_upcall(inode, -EPIPE); 126 list_splice_init(&rpci->pipe, &free_list);
124 rpci->nwriters = 0; 127 rpci->pipelen = 0;
125 if (rpci->ops->release_pipe)
126 rpci->ops->release_pipe(inode);
127 rpci->ops = NULL; 128 rpci->ops = NULL;
129 spin_unlock(&inode->i_lock);
130 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
131 rpci->nwriters = 0;
132 if (ops->release_pipe)
133 ops->release_pipe(inode);
134 cancel_delayed_work(&rpci->queue_timeout);
135 flush_scheduled_work();
128 } 136 }
129 rpc_inode_setowner(inode, NULL); 137 rpc_inode_setowner(inode, NULL);
130 mutex_unlock(&inode->i_mutex); 138 mutex_unlock(&inode->i_mutex);
131 cancel_delayed_work(&rpci->queue_timeout);
132 flush_scheduled_work();
133} 139}
134 140
135static struct inode * 141static struct inode *
@@ -177,16 +183,26 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
177 goto out; 183 goto out;
178 msg = (struct rpc_pipe_msg *)filp->private_data; 184 msg = (struct rpc_pipe_msg *)filp->private_data;
179 if (msg != NULL) { 185 if (msg != NULL) {
186 spin_lock(&inode->i_lock);
180 msg->errno = -EAGAIN; 187 msg->errno = -EAGAIN;
181 list_del_init(&msg->list); 188 list_del(&msg->list);
189 spin_unlock(&inode->i_lock);
182 rpci->ops->destroy_msg(msg); 190 rpci->ops->destroy_msg(msg);
183 } 191 }
184 if (filp->f_mode & FMODE_WRITE) 192 if (filp->f_mode & FMODE_WRITE)
185 rpci->nwriters --; 193 rpci->nwriters --;
186 if (filp->f_mode & FMODE_READ) 194 if (filp->f_mode & FMODE_READ) {
187 rpci->nreaders --; 195 rpci->nreaders --;
188 if (!rpci->nreaders) 196 if (rpci->nreaders == 0) {
189 __rpc_purge_upcall(inode, -EAGAIN); 197 LIST_HEAD(free_list);
198 spin_lock(&inode->i_lock);
199 list_splice_init(&rpci->pipe, &free_list);
200 rpci->pipelen = 0;
201 spin_unlock(&inode->i_lock);
202 rpc_purge_list(rpci, &free_list,
203 rpci->ops->destroy_msg, -EAGAIN);
204 }
205 }
190 if (rpci->ops->release_pipe) 206 if (rpci->ops->release_pipe)
191 rpci->ops->release_pipe(inode); 207 rpci->ops->release_pipe(inode);
192out: 208out:
@@ -209,6 +225,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
209 } 225 }
210 msg = filp->private_data; 226 msg = filp->private_data;
211 if (msg == NULL) { 227 if (msg == NULL) {
228 spin_lock(&inode->i_lock);
212 if (!list_empty(&rpci->pipe)) { 229 if (!list_empty(&rpci->pipe)) {
213 msg = list_entry(rpci->pipe.next, 230 msg = list_entry(rpci->pipe.next,
214 struct rpc_pipe_msg, 231 struct rpc_pipe_msg,
@@ -218,6 +235,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
218 filp->private_data = msg; 235 filp->private_data = msg;
219 msg->copied = 0; 236 msg->copied = 0;
220 } 237 }
238 spin_unlock(&inode->i_lock);
221 if (msg == NULL) 239 if (msg == NULL)
222 goto out_unlock; 240 goto out_unlock;
223 } 241 }
@@ -225,7 +243,9 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
225 res = rpci->ops->upcall(filp, msg, buf, len); 243 res = rpci->ops->upcall(filp, msg, buf, len);
226 if (res < 0 || msg->len == msg->copied) { 244 if (res < 0 || msg->len == msg->copied) {
227 filp->private_data = NULL; 245 filp->private_data = NULL;
228 list_del_init(&msg->list); 246 spin_lock(&inode->i_lock);
247 list_del(&msg->list);
248 spin_unlock(&inode->i_lock);
229 rpci->ops->destroy_msg(msg); 249 rpci->ops->destroy_msg(msg);
230 } 250 }
231out_unlock: 251out_unlock:
@@ -610,7 +630,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
610 return ERR_PTR(error); 630 return ERR_PTR(error);
611 dir = nd->dentry->d_inode; 631 dir = nd->dentry->d_inode;
612 mutex_lock(&dir->i_mutex); 632 mutex_lock(&dir->i_mutex);
613 dentry = lookup_hash(nd); 633 dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len);
614 if (IS_ERR(dentry)) 634 if (IS_ERR(dentry))
615 goto out_err; 635 goto out_err;
616 if (dentry->d_inode) { 636 if (dentry->d_inode) {
@@ -672,7 +692,7 @@ rpc_rmdir(char *path)
672 return error; 692 return error;
673 dir = nd.dentry->d_inode; 693 dir = nd.dentry->d_inode;
674 mutex_lock(&dir->i_mutex); 694 mutex_lock(&dir->i_mutex);
675 dentry = lookup_hash(&nd); 695 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
676 if (IS_ERR(dentry)) { 696 if (IS_ERR(dentry)) {
677 error = PTR_ERR(dentry); 697 error = PTR_ERR(dentry);
678 goto out_release; 698 goto out_release;
@@ -733,7 +753,7 @@ rpc_unlink(char *path)
733 return error; 753 return error;
734 dir = nd.dentry->d_inode; 754 dir = nd.dentry->d_inode;
735 mutex_lock(&dir->i_mutex); 755 mutex_lock(&dir->i_mutex);
736 dentry = lookup_hash(&nd); 756 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
737 if (IS_ERR(dentry)) { 757 if (IS_ERR(dentry)) {
738 error = PTR_ERR(dentry); 758 error = PTR_ERR(dentry);
739 goto out_release; 759 goto out_release;
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 5760e057ecba..d64aae85c378 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -123,7 +123,17 @@ KBUILD_HAVE_NLS := $(shell \
123 then echo yes ; \ 123 then echo yes ; \
124 else echo no ; fi) 124 else echo no ; fi)
125ifeq ($(KBUILD_HAVE_NLS),no) 125ifeq ($(KBUILD_HAVE_NLS),no)
126HOSTCFLAGS += -DKBUILD_NO_NLS 126 HOSTCFLAGS += -DKBUILD_NO_NLS
127else
128 KBUILD_NEED_LINTL := $(shell \
129 if echo -e "\#include <libintl.h>\nint main(int a, char** b) { gettext(\"\"); return 0; }\n" | \
130 $(HOSTCC) $(HOSTCFLAGS) -x c - -o /dev/null> /dev/null 2>&1 ; \
131 then echo no ; \
132 else echo yes ; fi)
133 ifeq ($(KBUILD_NEED_LINTL),yes)
134 HOSTLOADLIBES_conf += -lintl
135 HOSTLOADLIBES_mconf += -lintl
136 endif
127endif 137endif
128 138
129# generated files seem to need this to find local include files 139# generated files seem to need this to find local include files
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 90db5c76cf6e..0c62798ac7d8 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -67,9 +67,10 @@ asmlinkage long sys_add_key(const char __user *_type,
67 description = kmalloc(dlen + 1, GFP_KERNEL); 67 description = kmalloc(dlen + 1, GFP_KERNEL);
68 if (!description) 68 if (!description)
69 goto error; 69 goto error;
70 description[dlen] = '\0';
70 71
71 ret = -EFAULT; 72 ret = -EFAULT;
72 if (copy_from_user(description, _description, dlen + 1) != 0) 73 if (copy_from_user(description, _description, dlen) != 0)
73 goto error2; 74 goto error2;
74 75
75 /* pull the payload in if one was supplied */ 76 /* pull the payload in if one was supplied */
@@ -161,9 +162,10 @@ asmlinkage long sys_request_key(const char __user *_type,
161 description = kmalloc(dlen + 1, GFP_KERNEL); 162 description = kmalloc(dlen + 1, GFP_KERNEL);
162 if (!description) 163 if (!description)
163 goto error; 164 goto error;
165 description[dlen] = '\0';
164 166
165 ret = -EFAULT; 167 ret = -EFAULT;
166 if (copy_from_user(description, _description, dlen + 1) != 0) 168 if (copy_from_user(description, _description, dlen) != 0)
167 goto error2; 169 goto error2;
168 170
169 /* pull the callout info into kernel space */ 171 /* pull the callout info into kernel space */
@@ -182,9 +184,10 @@ asmlinkage long sys_request_key(const char __user *_type,
182 callout_info = kmalloc(dlen + 1, GFP_KERNEL); 184 callout_info = kmalloc(dlen + 1, GFP_KERNEL);
183 if (!callout_info) 185 if (!callout_info)
184 goto error2; 186 goto error2;
187 callout_info[dlen] = '\0';
185 188
186 ret = -EFAULT; 189 ret = -EFAULT;
187 if (copy_from_user(callout_info, _callout_info, dlen + 1) != 0) 190 if (copy_from_user(callout_info, _callout_info, dlen) != 0)
188 goto error3; 191 goto error3;
189 } 192 }
190 193
@@ -279,9 +282,10 @@ long keyctl_join_session_keyring(const char __user *_name)
279 name = kmalloc(nlen + 1, GFP_KERNEL); 282 name = kmalloc(nlen + 1, GFP_KERNEL);
280 if (!name) 283 if (!name)
281 goto error; 284 goto error;
285 name[nlen] = '\0';
282 286
283 ret = -EFAULT; 287 ret = -EFAULT;
284 if (copy_from_user(name, _name, nlen + 1) != 0) 288 if (copy_from_user(name, _name, nlen) != 0)
285 goto error2; 289 goto error2;
286 } 290 }
287 291
@@ -583,9 +587,10 @@ long keyctl_keyring_search(key_serial_t ringid,
583 description = kmalloc(dlen + 1, GFP_KERNEL); 587 description = kmalloc(dlen + 1, GFP_KERNEL);
584 if (!description) 588 if (!description)
585 goto error; 589 goto error;
590 description[dlen] = '\0';
586 591
587 ret = -EFAULT; 592 ret = -EFAULT;
588 if (copy_from_user(description, _description, dlen + 1) != 0) 593 if (copy_from_user(description, _description, dlen) != 0)
589 goto error2; 594 goto error2;
590 595
591 /* get the keyring at which to begin the search */ 596 /* get the keyring at which to begin the search */
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index b59582b92283..502f78f13f5f 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -1,6 +1,6 @@
1config SECURITY_SELINUX 1config SECURITY_SELINUX
2 bool "NSA SELinux Support" 2 bool "NSA SELinux Support"
3 depends on SECURITY && NET && INET 3 depends on SECURITY_NETWORK && NET && INET
4 default n 4 default n
5 help 5 help
6 This selects NSA Security-Enhanced Linux (SELinux). 6 This selects NSA Security-Enhanced Linux (SELinux).
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 06d54d9d20a5..688c0a267b62 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -4,9 +4,7 @@
4 4
5obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/ 5obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/
6 6
7selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o 7selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o
8
9selinux-$(CONFIG_SECURITY_NETWORK) += netif.o
10 8
11selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o 9selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
12 10
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4ae834d89bce..b7773bf68efa 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -232,7 +232,6 @@ static void superblock_free_security(struct super_block *sb)
232 kfree(sbsec); 232 kfree(sbsec);
233} 233}
234 234
235#ifdef CONFIG_SECURITY_NETWORK
236static int sk_alloc_security(struct sock *sk, int family, gfp_t priority) 235static int sk_alloc_security(struct sock *sk, int family, gfp_t priority)
237{ 236{
238 struct sk_security_struct *ssec; 237 struct sk_security_struct *ssec;
@@ -261,7 +260,6 @@ static void sk_free_security(struct sock *sk)
261 sk->sk_security = NULL; 260 sk->sk_security = NULL;
262 kfree(ssec); 261 kfree(ssec);
263} 262}
264#endif /* CONFIG_SECURITY_NETWORK */
265 263
266/* The security server must be initialized before 264/* The security server must be initialized before
267 any labeling or access decisions can be provided. */ 265 any labeling or access decisions can be provided. */
@@ -2736,8 +2734,6 @@ static void selinux_task_to_inode(struct task_struct *p,
2736 return; 2734 return;
2737} 2735}
2738 2736
2739#ifdef CONFIG_SECURITY_NETWORK
2740
2741/* Returns error only if unable to parse addresses */ 2737/* Returns error only if unable to parse addresses */
2742static int selinux_parse_skb_ipv4(struct sk_buff *skb, struct avc_audit_data *ad) 2738static int selinux_parse_skb_ipv4(struct sk_buff *skb, struct avc_audit_data *ad)
2743{ 2739{
@@ -3556,15 +3552,6 @@ static unsigned int selinux_ipv6_postroute_last(unsigned int hooknum,
3556 3552
3557#endif /* CONFIG_NETFILTER */ 3553#endif /* CONFIG_NETFILTER */
3558 3554
3559#else
3560
3561static inline int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
3562{
3563 return 0;
3564}
3565
3566#endif /* CONFIG_SECURITY_NETWORK */
3567
3568static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb) 3555static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
3569{ 3556{
3570 struct task_security_struct *tsec; 3557 struct task_security_struct *tsec;
@@ -4340,7 +4327,6 @@ static struct security_operations selinux_ops = {
4340 .getprocattr = selinux_getprocattr, 4327 .getprocattr = selinux_getprocattr,
4341 .setprocattr = selinux_setprocattr, 4328 .setprocattr = selinux_setprocattr,
4342 4329
4343#ifdef CONFIG_SECURITY_NETWORK
4344 .unix_stream_connect = selinux_socket_unix_stream_connect, 4330 .unix_stream_connect = selinux_socket_unix_stream_connect,
4345 .unix_may_send = selinux_socket_unix_may_send, 4331 .unix_may_send = selinux_socket_unix_may_send,
4346 4332
@@ -4362,7 +4348,6 @@ static struct security_operations selinux_ops = {
4362 .sk_alloc_security = selinux_sk_alloc_security, 4348 .sk_alloc_security = selinux_sk_alloc_security,
4363 .sk_free_security = selinux_sk_free_security, 4349 .sk_free_security = selinux_sk_free_security,
4364 .sk_getsid = selinux_sk_getsid_security, 4350 .sk_getsid = selinux_sk_getsid_security,
4365#endif
4366 4351
4367#ifdef CONFIG_SECURITY_NETWORK_XFRM 4352#ifdef CONFIG_SECURITY_NETWORK_XFRM
4368 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, 4353 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
@@ -4440,7 +4425,7 @@ next_sb:
4440 all processes and objects when they are created. */ 4425 all processes and objects when they are created. */
4441security_initcall(selinux_init); 4426security_initcall(selinux_init);
4442 4427
4443#if defined(CONFIG_SECURITY_NETWORK) && defined(CONFIG_NETFILTER) 4428#if defined(CONFIG_NETFILTER)
4444 4429
4445static struct nf_hook_ops selinux_ipv4_op = { 4430static struct nf_hook_ops selinux_ipv4_op = {
4446 .hook = selinux_ipv4_postroute_last, 4431 .hook = selinux_ipv4_postroute_last,
@@ -4501,13 +4486,13 @@ static void selinux_nf_ip_exit(void)
4501} 4486}
4502#endif 4487#endif
4503 4488
4504#else /* CONFIG_SECURITY_NETWORK && CONFIG_NETFILTER */ 4489#else /* CONFIG_NETFILTER */
4505 4490
4506#ifdef CONFIG_SECURITY_SELINUX_DISABLE 4491#ifdef CONFIG_SECURITY_SELINUX_DISABLE
4507#define selinux_nf_ip_exit() 4492#define selinux_nf_ip_exit()
4508#endif 4493#endif
4509 4494
4510#endif /* CONFIG_SECURITY_NETWORK && CONFIG_NETFILTER */ 4495#endif /* CONFIG_NETFILTER */
4511 4496
4512#ifdef CONFIG_SECURITY_SELINUX_DISABLE 4497#ifdef CONFIG_SECURITY_SELINUX_DISABLE
4513int selinux_disable(void) 4498int selinux_disable(void)
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 54147c1f6361..149feb410654 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -882,14 +882,20 @@ static int __devinit aaci_probe(struct amba_device *dev, void *id)
882 writel(0x1fff, aaci->base + AACI_INTCLR); 882 writel(0x1fff, aaci->base + AACI_INTCLR);
883 writel(aaci->maincr, aaci->base + AACI_MAINCR); 883 writel(aaci->maincr, aaci->base + AACI_MAINCR);
884 884
885 ret = aaci_probe_ac97(aaci);
886 if (ret)
887 goto out;
888
885 /* 889 /*
886 * Size the FIFOs. 890 * Size the FIFOs (must be multiple of 16).
887 */ 891 */
888 aaci->fifosize = aaci_size_fifo(aaci); 892 aaci->fifosize = aaci_size_fifo(aaci);
889 893 if (aaci->fifosize & 15) {
890 ret = aaci_probe_ac97(aaci); 894 printk(KERN_WARNING "AACI: fifosize = %d not supported\n",
891 if (ret) 895 aaci->fifosize);
896 ret = -ENODEV;
892 goto out; 897 goto out;
898 }
893 899
894 ret = aaci_init_pcm(aaci); 900 ret = aaci_init_pcm(aaci);
895 if (ret) 901 if (ret)