aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-12-05 18:22:26 -0500
committerDavid S. Miller <davem@davemloft.net>2009-12-05 18:22:26 -0500
commit28b4d5cc17c20786848cdc07b7ea237a309776bb (patch)
treebae406a4b17229dcce7c11be5073f7a67665e477
parentd29cecda036f251aee4947f47eea0fe9ed8cc931 (diff)
parent96fa2b508d2d3fe040cf4ef2fffb955f0a537ea1 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: drivers/net/pcmcia/fmvj18x_cs.c drivers/net/pcmcia/nmclan_cs.c drivers/net/pcmcia/xirc2ps_cs.c drivers/net/wireless/ray_cs.c
-rw-r--r--Documentation/RCU/trace.txt254
-rw-r--r--Documentation/RCU/whatisRCU.txt2
-rw-r--r--Documentation/dontdiff3
-rw-r--r--Documentation/kernel-parameters.txt28
-rw-r--r--Documentation/pcmcia/driver-changes.txt12
-rw-r--r--Documentation/trace/ftrace-design.txt13
-rw-r--r--MAINTAINERS5
-rw-r--r--Makefile3
-rw-r--r--arch/avr32/include/asm/bug.h2
-rw-r--r--arch/ia64/include/asm/swiotlb.h2
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c4
-rw-r--r--arch/mips/include/asm/bug.h4
-rw-r--r--arch/mips/kernel/syscall.c4
-rw-r--r--arch/mips/rb532/devices.c3
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/s390/Kconfig28
-rw-r--r--arch/s390/include/asm/bug.h2
-rw-r--r--arch/s390/include/asm/spinlock.h29
-rw-r--r--arch/s390/kernel/ftrace.c67
-rw-r--r--arch/x86/include/asm/amd_iommu.h16
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h38
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h54
-rw-r--r--arch/x86/include/asm/bug.h4
-rw-r--r--arch/x86/include/asm/calgary.h2
-rw-r--r--arch/x86/include/asm/device.h2
-rw-r--r--arch/x86/include/asm/dma-mapping.h5
-rw-r--r--arch/x86/include/asm/gart.h9
-rw-r--r--arch/x86/include/asm/iommu.h2
-rw-r--r--arch/x86/include/asm/swiotlb.h9
-rw-r--r--arch/x86/include/asm/x86_init.h10
-rw-r--r--arch/x86/kernel/amd_iommu.c1247
-rw-r--r--arch/x86/kernel/amd_iommu_init.c94
-rw-r--r--arch/x86/kernel/aperture_64.c4
-rw-r--r--arch/x86/kernel/crash.c5
-rw-r--r--arch/x86/kernel/entry_32.S7
-rw-r--r--arch/x86/kernel/entry_64.S6
-rw-r--r--arch/x86/kernel/ftrace.c84
-rw-r--r--arch/x86/kernel/pci-calgary_64.c94
-rw-r--r--arch/x86/kernel/pci-dma.c39
-rw-r--r--arch/x86/kernel/pci-gart_64.c156
-rw-r--r--arch/x86/kernel/pci-nommu.c11
-rw-r--r--arch/x86/kernel/pci-swiotlb.c18
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/x86_init.c8
-rw-r--r--arch/x86/mm/testmmiotrace.c29
-rw-r--r--drivers/ata/pata_pcmcia.c17
-rw-r--r--drivers/block/aoe/aoecmd.c23
-rw-r--r--drivers/bluetooth/bluecard_cs.c16
-rw-r--r--drivers/bluetooth/bt3c_cs.c13
-rw-r--r--drivers/bluetooth/btuart_cs.c13
-rw-r--r--drivers/bluetooth/dtl1_cs.c12
-rw-r--r--drivers/char/agp/Kconfig3
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c73
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c52
-rw-r--r--drivers/char/pcmcia/ipwireless/hardware.c8
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c296
-rw-r--r--drivers/char/pcmcia/synclink_cs.c80
-rw-r--r--drivers/char/tpm/tpm.c2
-rw-r--r--drivers/char/tpm/tpm_tis.c11
-rw-r--r--drivers/gpio/langwell_gpio.c11
-rw-r--r--drivers/ide/ide-cs.c33
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h21
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c4
-rw-r--r--drivers/isdn/hisax/avma1_cs.c28
-rw-r--r--drivers/isdn/hisax/elsa_cs.c46
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c64
-rw-r--r--drivers/isdn/hisax/teles_cs.c38
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c1
-rw-r--r--drivers/mfd/wm831x-core.c2
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c197
-rw-r--r--drivers/net/pcmcia/3c574_cs.c90
-rw-r--r--drivers/net/pcmcia/3c589_cs.c102
-rw-r--r--drivers/net/pcmcia/axnet_cs.c56
-rw-r--r--drivers/net/pcmcia/com20020_cs.c63
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c189
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c71
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c173
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c80
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c340
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c259
-rw-r--r--drivers/net/wireless/airo_cs.c55
-rw-r--r--drivers/net/wireless/atmel_cs.c51
-rw-r--r--drivers/net/wireless/b43/pcmcia.c26
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c51
-rw-r--r--drivers/net/wireless/libertas/if_cs.c72
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c33
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c60
-rw-r--r--drivers/net/wireless/ray_cs.c359
-rw-r--r--drivers/net/wireless/wl3501_cs.c74
-rw-r--r--drivers/parport/parport_cs.c38
-rw-r--r--drivers/pci/dmar.c7
-rw-r--r--drivers/pci/intel-iommu.c6
-rw-r--r--drivers/pcmcia/Kconfig42
-rw-r--r--drivers/pcmcia/Makefile10
-rw-r--r--drivers/pcmcia/cardbus.c4
-rw-r--r--drivers/pcmcia/cirrus.h10
-rw-r--r--drivers/pcmcia/cistpl.c71
-rw-r--r--drivers/pcmcia/cs.c67
-rw-r--r--drivers/pcmcia/cs_internal.h42
-rw-r--r--drivers/pcmcia/ds.c188
-rw-r--r--drivers/pcmcia/i82365.c37
-rw-r--r--drivers/pcmcia/m32r_cfc.c105
-rw-r--r--drivers/pcmcia/m32r_pcc.c51
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c40
-rw-r--r--drivers/pcmcia/o2micro.h22
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c92
-rw-r--r--drivers/pcmcia/pcmcia_resource.c482
-rw-r--r--drivers/pcmcia/pd6729.c70
-rw-r--r--drivers/pcmcia/pd6729.h7
-rw-r--r--drivers/pcmcia/pxa2xx_base.c94
-rw-r--r--drivers/pcmcia/pxa2xx_base.h3
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c2
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c2
-rw-r--r--drivers/pcmcia/pxa2xx_e740.c2
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c14
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c2
-rw-r--r--drivers/pcmcia/pxa2xx_palmld.c2
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c2
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c2
-rw-r--r--drivers/pcmcia/pxa2xx_trizeps4.c4
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c2
-rw-r--r--drivers/pcmcia/rsrc_mgr.c1
-rw-r--r--drivers/pcmcia/sa1100_assabet.c2
-rw-r--r--drivers/pcmcia/sa1100_badge4.c11
-rw-r--r--drivers/pcmcia/sa1100_cerf.c2
-rw-r--r--drivers/pcmcia/sa1100_generic.c11
-rw-r--r--drivers/pcmcia/sa1100_h3600.c4
-rw-r--r--drivers/pcmcia/sa1100_jornada720.c42
-rw-r--r--drivers/pcmcia/sa1100_neponset.c13
-rw-r--r--drivers/pcmcia/sa1100_shannon.c2
-rw-r--r--drivers/pcmcia/sa1100_simpad.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c65
-rw-r--r--drivers/pcmcia/sa1111_generic.h17
-rw-r--r--drivers/pcmcia/sa11xx_base.c99
-rw-r--r--drivers/pcmcia/sa11xx_base.h2
-rw-r--r--drivers/pcmcia/soc_common.c225
-rw-r--r--drivers/pcmcia/soc_common.h10
-rw-r--r--drivers/pcmcia/tcic.c29
-rw-r--r--drivers/pcmcia/topic.h15
-rw-r--r--drivers/regulator/wm831x-isink.c2
-rw-r--r--drivers/rtc/rtc-pcf50633.c3
-rw-r--r--drivers/rtc/rtc-x1205.c6
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c42
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c44
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c8
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c45
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c42
-rw-r--r--drivers/serial/serial_cs.c143
-rw-r--r--drivers/spi/spi_txx9.c13
-rw-r--r--drivers/ssb/pcmcia.c232
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c221
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c203
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c237
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c236
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c225
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c127
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c219
-rw-r--r--drivers/staging/netwave/netwave_cs.c95
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c35
-rw-r--r--drivers/telephony/ixj_pcmcia.c36
-rw-r--r--drivers/usb/host/sl811_cs.c49
-rw-r--r--drivers/video/da8xx-fb.c11
-rw-r--r--drivers/video/gbefb.c2
-rw-r--r--drivers/watchdog/rc32434_wdt.c4
-rw-r--r--fs/exec.c4
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/acl.c357
-rw-r--r--fs/gfs2/acl.h24
-rw-r--r--fs/gfs2/aops.c20
-rw-r--r--fs/gfs2/dir.c34
-rw-r--r--fs/gfs2/glock.c31
-rw-r--r--fs/gfs2/glock.h9
-rw-r--r--fs/gfs2/glops.c5
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/gfs2/lops.c4
-rw-r--r--fs/gfs2/ops_fstype.c154
-rw-r--r--fs/gfs2/quota.c393
-rw-r--r--fs/gfs2/quota.h5
-rw-r--r--fs/gfs2/recovery.c2
-rw-r--r--fs/gfs2/rgrp.c14
-rw-r--r--fs/gfs2/super.c110
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/sys.c14
-rw-r--r--fs/gfs2/xattr.c74
-rw-r--r--fs/gfs2/xattr.h8
-rw-r--r--fs/inode.c10
-rw-r--r--fs/namespace.c20
-rw-r--r--fs/open.c27
-rw-r--r--fs/quota/Kconfig2
-rw-r--r--fs/quota/dquot.c93
-rw-r--r--fs/quota/quota.c93
-rw-r--r--fs/xattr_acl.c2
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/compiler-gcc.h1
-rw-r--r--include/linux/compiler-gcc4.h14
-rw-r--r--include/linux/compiler.h9
-rw-r--r--include/linux/dmar.h15
-rw-r--r--include/linux/gfs2_ondisk.h6
-rw-r--r--include/linux/hardirq.h24
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/irqflags.h2
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/lsm_audit.h18
-rw-r--r--include/linux/mfd/wm831x/regulator.h4
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/posix_acl.h14
-rw-r--r--include/linux/quota.h11
-rw-r--r--include/linux/ratelimit.h33
-rw-r--r--include/linux/rcupdate.h10
-rw-r--r--include/linux/rcutiny.h104
-rw-r--r--include/linux/rcutree.h7
-rw-r--r--include/linux/sched.h23
-rw-r--r--include/linux/securebits.h24
-rw-r--r--include/linux/security.h48
-rw-r--r--include/linux/smp.h11
-rw-r--r--include/linux/smp_lock.h21
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/spinlock_api_smp.h75
-rw-r--r--include/linux/srcu.h1
-rw-r--r--include/linux/swiotlb.h12
-rw-r--r--include/linux/tpm.h9
-rw-r--r--include/pcmcia/cs.h14
-rw-r--r--include/pcmcia/cs_types.h3
-rw-r--r--include/pcmcia/ds.h88
-rw-r--r--include/pcmcia/ss.h13
-rw-r--r--include/trace/events/bkl.h61
-rw-r--r--include/trace/events/syscalls.h3
-rw-r--r--include/trace/ftrace.h2
-rw-r--r--include/trace/power.h32
-rw-r--r--include/trace/syscall.h2
-rw-r--r--init/Kconfig12
-rw-r--r--init/main.c11
-rw-r--r--kernel/Kconfig.locks202
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/capability.c15
-rw-r--r--kernel/hung_task.c2
-rw-r--r--kernel/irq/chip.c6
-rw-r--r--kernel/irq/proc.c40
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/kmod.c8
-rw-r--r--kernel/kprobes.c4
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/printk.c7
-rw-r--r--kernel/rcupdate.c122
-rw-r--r--kernel/rcutiny.c282
-rw-r--r--kernel/rcutorture.c65
-rw-r--r--kernel/rcutree.c465
-rw-r--r--kernel/rcutree.h69
-rw-r--r--kernel/rcutree_plugin.h309
-rw-r--r--kernel/rcutree_trace.c12
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/signal.c46
-rw-r--r--kernel/smp.c56
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/spinlock.c310
-rw-r--r--kernel/srcu.c74
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/trace/ftrace.c375
-rw-r--r--kernel/trace/ring_buffer.c9
-rw-r--r--kernel/trace/ring_buffer_benchmark.c85
-rw-r--r--kernel/trace/trace.c45
-rw-r--r--kernel/trace/trace.h44
-rw-r--r--kernel/trace/trace_clock.c8
-rw-r--r--kernel/trace/trace_events.c23
-rw-r--r--kernel/trace/trace_events_filter.c155
-rw-r--r--kernel/trace/trace_export.c4
-rw-r--r--kernel/trace/trace_syscalls.c86
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/kernel_lock.c20
-rw-r--r--lib/ratelimit.c45
-rw-r--r--lib/swiotlb.c46
-rw-r--r--mm/bootmem.c24
-rw-r--r--mm/mmap.c4
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/core/utils.c2
-rwxr-xr-xscripts/recordmcount.pl219
-rw-r--r--scripts/selinux/Makefile4
-rw-r--r--scripts/selinux/genheaders/.gitignore1
-rw-r--r--scripts/selinux/genheaders/Makefile5
-rw-r--r--scripts/selinux/genheaders/genheaders.c118
-rw-r--r--scripts/selinux/mdp/mdp.c151
-rw-r--r--security/Kconfig54
-rw-r--r--security/Makefile1
-rw-r--r--security/capability.c21
-rw-r--r--security/commoncap.c74
-rw-r--r--security/integrity/ima/Kconfig1
-rw-r--r--security/lsm_audit.c4
-rw-r--r--security/min_addr.c3
-rw-r--r--security/root_plug.c90
-rw-r--r--security/security.c61
-rw-r--r--security/selinux/.gitignore2
-rw-r--r--security/selinux/Makefile10
-rw-r--r--security/selinux/avc.c78
-rw-r--r--security/selinux/hooks.c25
-rw-r--r--security/selinux/include/av_inherit.h34
-rw-r--r--security/selinux/include/av_perm_to_string.h183
-rw-r--r--security/selinux/include/av_permissions.h870
-rw-r--r--security/selinux/include/avc_ss.h21
-rw-r--r--security/selinux/include/class_to_string.h80
-rw-r--r--security/selinux/include/classmap.h150
-rw-r--r--security/selinux/include/common_perm_to_string.h58
-rw-r--r--security/selinux/include/flask.h91
-rw-r--r--security/selinux/include/security.h13
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/Makefile2
-rw-r--r--security/selinux/ss/mls.c2
-rw-r--r--security/selinux/ss/policydb.c47
-rw-r--r--security/selinux/ss/policydb.h7
-rw-r--r--security/selinux/ss/services.c562
-rw-r--r--security/tomoyo/common.c200
-rw-r--r--security/tomoyo/common.h4
-rw-r--r--security/tomoyo/realpath.c13
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c6
-rw-r--r--sound/pcmcia/vx/vxpocket.c6
325 files changed, 9045 insertions, 9687 deletions
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 187bbf10c923..8608fd85e921 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -1,185 +1,10 @@
1CONFIG_RCU_TRACE debugfs Files and Formats 1CONFIG_RCU_TRACE debugfs Files and Formats
2 2
3 3
4The rcupreempt and rcutree implementations of RCU provide debugfs trace 4The rcutree implementation of RCU provides debugfs trace output that
5output that summarizes counters and state. This information is useful for 5summarizes counters and state. This information is useful for debugging
6debugging RCU itself, and can sometimes also help to debug abuses of RCU. 6RCU itself, and can sometimes also help to debug abuses of RCU.
7Note that the rcuclassic implementation of RCU does not provide debugfs 7The following sections describe the debugfs files and formats.
8trace output.
9
10The following sections describe the debugfs files and formats for
11preemptable RCU (rcupreempt) and hierarchical RCU (rcutree).
12
13
14Preemptable RCU debugfs Files and Formats
15
16This implementation of RCU provides three debugfs files under the
17top-level directory RCU: rcu/rcuctrs (which displays the per-CPU
18counters used by preemptable RCU) rcu/rcugp (which displays grace-period
19counters), and rcu/rcustats (which internal counters for debugging RCU).
20
21The output of "cat rcu/rcuctrs" looks as follows:
22
23CPU last cur F M
24 0 5 -5 0 0
25 1 -1 0 0 0
26 2 0 1 0 0
27 3 0 1 0 0
28 4 0 1 0 0
29 5 0 1 0 0
30 6 0 2 0 0
31 7 0 -1 0 0
32 8 0 1 0 0
33ggp = 26226, state = waitzero
34
35The per-CPU fields are as follows:
36
37o "CPU" gives the CPU number. Offline CPUs are not displayed.
38
39o "last" gives the value of the counter that is being decremented
40 for the current grace period phase. In the example above,
41 the counters sum to 4, indicating that there are still four
42 RCU read-side critical sections still running that started
43 before the last counter flip.
44
45o "cur" gives the value of the counter that is currently being
46 both incremented (by rcu_read_lock()) and decremented (by
47 rcu_read_unlock()). In the example above, the counters sum to
48 1, indicating that there is only one RCU read-side critical section
49 still running that started after the last counter flip.
50
51o "F" indicates whether RCU is waiting for this CPU to acknowledge
52 a counter flip. In the above example, RCU is not waiting on any,
53 which is consistent with the state being "waitzero" rather than
54 "waitack".
55
56o "M" indicates whether RCU is waiting for this CPU to execute a
57 memory barrier. In the above example, RCU is not waiting on any,
58 which is consistent with the state being "waitzero" rather than
59 "waitmb".
60
61o "ggp" is the global grace-period counter.
62
63o "state" is the RCU state, which can be one of the following:
64
65 o "idle": there is no grace period in progress.
66
67 o "waitack": RCU just incremented the global grace-period
68 counter, which has the effect of reversing the roles of
69 the "last" and "cur" counters above, and is waiting for
70 all the CPUs to acknowledge the flip. Once the flip has
71 been acknowledged, CPUs will no longer be incrementing
72 what are now the "last" counters, so that their sum will
73 decrease monotonically down to zero.
74
75 o "waitzero": RCU is waiting for the sum of the "last" counters
76 to decrease to zero.
77
78 o "waitmb": RCU is waiting for each CPU to execute a memory
79 barrier, which ensures that instructions from a given CPU's
80 last RCU read-side critical section cannot be reordered
81 with instructions following the memory-barrier instruction.
82
83The output of "cat rcu/rcugp" looks as follows:
84
85oldggp=48870 newggp=48873
86
87Note that reading from this file provokes a synchronize_rcu(). The
88"oldggp" value is that of "ggp" from rcu/rcuctrs above, taken before
89executing the synchronize_rcu(), and the "newggp" value is also the
90"ggp" value, but taken after the synchronize_rcu() command returns.
91
92
93The output of "cat rcu/rcugp" looks as follows:
94
95na=1337955 nl=40 wa=1337915 wl=44 da=1337871 dl=0 dr=1337871 di=1337871
961=50989 e1=6138 i1=49722 ie1=82 g1=49640 a1=315203 ae1=265563 a2=49640
97z1=1401244 ze1=1351605 z2=49639 m1=5661253 me1=5611614 m2=49639
98
99These are counters tracking internal preemptable-RCU events, however,
100some of them may be useful for debugging algorithms using RCU. In
101particular, the "nl", "wl", and "dl" values track the number of RCU
102callbacks in various states. The fields are as follows:
103
104o "na" is the total number of RCU callbacks that have been enqueued
105 since boot.
106
107o "nl" is the number of RCU callbacks waiting for the previous
108 grace period to end so that they can start waiting on the next
109 grace period.
110
111o "wa" is the total number of RCU callbacks that have started waiting
112 for a grace period since boot. "na" should be roughly equal to
113 "nl" plus "wa".
114
115o "wl" is the number of RCU callbacks currently waiting for their
116 grace period to end.
117
118o "da" is the total number of RCU callbacks whose grace periods
119 have completed since boot. "wa" should be roughly equal to
120 "wl" plus "da".
121
122o "dr" is the total number of RCU callbacks that have been removed
123 from the list of callbacks ready to invoke. "dr" should be roughly
124 equal to "da".
125
126o "di" is the total number of RCU callbacks that have been invoked
127 since boot. "di" should be roughly equal to "da", though some
128 early versions of preemptable RCU had a bug so that only the
129 last CPU's count of invocations was displayed, rather than the
130 sum of all CPU's counts.
131
132o "1" is the number of calls to rcu_try_flip(). This should be
133 roughly equal to the sum of "e1", "i1", "a1", "z1", and "m1"
134 described below. In other words, the number of times that
135 the state machine is visited should be equal to the sum of the
136 number of times that each state is visited plus the number of
137 times that the state-machine lock acquisition failed.
138
139o "e1" is the number of times that rcu_try_flip() was unable to
140 acquire the fliplock.
141
142o "i1" is the number of calls to rcu_try_flip_idle().
143
144o "ie1" is the number of times rcu_try_flip_idle() exited early
145 due to the calling CPU having no work for RCU.
146
147o "g1" is the number of times that rcu_try_flip_idle() decided
148 to start a new grace period. "i1" should be roughly equal to
149 "ie1" plus "g1".
150
151o "a1" is the number of calls to rcu_try_flip_waitack().
152
153o "ae1" is the number of times that rcu_try_flip_waitack() found
154 that at least one CPU had not yet acknowledge the new grace period
155 (AKA "counter flip").
156
157o "a2" is the number of time rcu_try_flip_waitack() found that
158 all CPUs had acknowledged. "a1" should be roughly equal to
159 "ae1" plus "a2". (This particular output was collected on
160 a 128-CPU machine, hence the smaller-than-usual fraction of
161 calls to rcu_try_flip_waitack() finding all CPUs having already
162 acknowledged.)
163
164o "z1" is the number of calls to rcu_try_flip_waitzero().
165
166o "ze1" is the number of times that rcu_try_flip_waitzero() found
167 that not all of the old RCU read-side critical sections had
168 completed.
169
170o "z2" is the number of times that rcu_try_flip_waitzero() finds
171 the sum of the counters equal to zero, in other words, that
172 all of the old RCU read-side critical sections had completed.
173 The value of "z1" should be roughly equal to "ze1" plus
174 "z2".
175
176o "m1" is the number of calls to rcu_try_flip_waitmb().
177
178o "me1" is the number of times that rcu_try_flip_waitmb() finds
179 that at least one CPU has not yet executed a memory barrier.
180
181o "m2" is the number of times that rcu_try_flip_waitmb() finds that
182 all CPUs have executed a memory barrier.
183 8
184 9
185Hierarchical RCU debugfs Files and Formats 10Hierarchical RCU debugfs Files and Formats
@@ -210,9 +35,10 @@ rcu_bh:
210 6 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=859/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 35 6 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=859/1 dn=0 df=15 of=0 ri=0 ql=0 b=10
211 7 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3761/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 36 7 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3761/1 dn=0 df=15 of=0 ri=0 ql=0 b=10
212 37
213The first section lists the rcu_data structures for rcu, the second for 38The first section lists the rcu_data structures for rcu_sched, the second
214rcu_bh. Each section has one line per CPU, or eight for this 8-CPU system. 39for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an
215The fields are as follows: 40additional section for rcu_preempt. Each section has one line per CPU,
41or eight for this 8-CPU system. The fields are as follows:
216 42
217o The number at the beginning of each line is the CPU number. 43o The number at the beginning of each line is the CPU number.
218 CPUs numbers followed by an exclamation mark are offline, 44 CPUs numbers followed by an exclamation mark are offline,
@@ -223,9 +49,9 @@ o The number at the beginning of each line is the CPU number.
223 49
224o "c" is the count of grace periods that this CPU believes have 50o "c" is the count of grace periods that this CPU believes have
225 completed. CPUs in dynticks idle mode may lag quite a ways 51 completed. CPUs in dynticks idle mode may lag quite a ways
226 behind, for example, CPU 4 under "rcu" above, which has slept 52 behind, for example, CPU 4 under "rcu_sched" above, which has
227 through the past 25 RCU grace periods. It is not unusual to 53 slept through the past 25 RCU grace periods. It is not unusual
228 see CPUs lagging by thousands of grace periods. 54 to see CPUs lagging by thousands of grace periods.
229 55
230o "g" is the count of grace periods that this CPU believes have 56o "g" is the count of grace periods that this CPU believes have
231 started. Again, CPUs in dynticks idle mode may lag behind. 57 started. Again, CPUs in dynticks idle mode may lag behind.
@@ -308,8 +134,10 @@ The output of "cat rcu/rcugp" looks as follows:
308rcu_sched: completed=33062 gpnum=33063 134rcu_sched: completed=33062 gpnum=33063
309rcu_bh: completed=464 gpnum=464 135rcu_bh: completed=464 gpnum=464
310 136
311Again, this output is for both "rcu" and "rcu_bh". The fields are 137Again, this output is for both "rcu_sched" and "rcu_bh". Note that
312taken from the rcu_state structure, and are as follows: 138kernels built with CONFIG_TREE_PREEMPT_RCU will have an additional
139"rcu_preempt" line. The fields are taken from the rcu_state structure,
140and are as follows:
313 141
314o "completed" is the number of grace periods that have completed. 142o "completed" is the number of grace periods that have completed.
315 It is comparable to the "c" field from rcu/rcudata in that a 143 It is comparable to the "c" field from rcu/rcudata in that a
@@ -324,23 +152,24 @@ o "gpnum" is the number of grace periods that have started. It is
324 If these two fields are equal (as they are for "rcu_bh" above), 152 If these two fields are equal (as they are for "rcu_bh" above),
325 then there is no grace period in progress, in other words, RCU 153 then there is no grace period in progress, in other words, RCU
326 is idle. On the other hand, if the two fields differ (as they 154 is idle. On the other hand, if the two fields differ (as they
327 do for "rcu" above), then an RCU grace period is in progress. 155 do for "rcu_sched" above), then an RCU grace period is in progress.
328 156
329 157
330The output of "cat rcu/rcuhier" looks as follows, with very long lines: 158The output of "cat rcu/rcuhier" looks as follows, with very long lines:
331 159
332c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6 160c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6 oqlen=0
3331/1 0:127 ^0 1611/1 .>. 0:127 ^0
3343/3 0:35 ^0 0/0 36:71 ^1 0/0 72:107 ^2 0/0 108:127 ^3 1623/3 .>. 0:35 ^0 0/0 .>. 36:71 ^1 0/0 .>. 72:107 ^2 0/0 .>. 108:127 ^3
3353/3f 0:5 ^0 2/3 6:11 ^1 0/0 12:17 ^2 0/0 18:23 ^3 0/0 24:29 ^4 0/0 30:35 ^5 0/0 36:41 ^0 0/0 42:47 ^1 0/0 48:53 ^2 0/0 54:59 ^3 0/0 60:65 ^4 0/0 66:71 ^5 0/0 72:77 ^0 0/0 78:83 ^1 0/0 84:89 ^2 0/0 90:95 ^3 0/0 96:101 ^4 0/0 102:107 ^5 0/0 108:113 ^0 0/0 114:119 ^1 0/0 120:125 ^2 0/0 126:127 ^3 1633/3f .>. 0:5 ^0 2/3 .>. 6:11 ^1 0/0 .>. 12:17 ^2 0/0 .>. 18:23 ^3 0/0 .>. 24:29 ^4 0/0 .>. 30:35 ^5 0/0 .>. 36:41 ^0 0/0 .>. 42:47 ^1 0/0 .>. 48:53 ^2 0/0 .>. 54:59 ^3 0/0 .>. 60:65 ^4 0/0 .>. 66:71 ^5 0/0 .>. 72:77 ^0 0/0 .>. 78:83 ^1 0/0 .>. 84:89 ^2 0/0 .>. 90:95 ^3 0/0 .>. 96:101 ^4 0/0 .>. 102:107 ^5 0/0 .>. 108:113 ^0 0/0 .>. 114:119 ^1 0/0 .>. 120:125 ^2 0/0 .>. 126:127 ^3
336rcu_bh: 164rcu_bh:
337c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0 165c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0 oqlen=0
3380/1 0:127 ^0 1660/1 .>. 0:127 ^0
3390/3 0:35 ^0 0/0 36:71 ^1 0/0 72:107 ^2 0/0 108:127 ^3 1670/3 .>. 0:35 ^0 0/0 .>. 36:71 ^1 0/0 .>. 72:107 ^2 0/0 .>. 108:127 ^3
3400/3f 0:5 ^0 0/3 6:11 ^1 0/0 12:17 ^2 0/0 18:23 ^3 0/0 24:29 ^4 0/0 30:35 ^5 0/0 36:41 ^0 0/0 42:47 ^1 0/0 48:53 ^2 0/0 54:59 ^3 0/0 60:65 ^4 0/0 66:71 ^5 0/0 72:77 ^0 0/0 78:83 ^1 0/0 84:89 ^2 0/0 90:95 ^3 0/0 96:101 ^4 0/0 102:107 ^5 0/0 108:113 ^0 0/0 114:119 ^1 0/0 120:125 ^2 0/0 126:127 ^3 1680/3f .>. 0:5 ^0 0/3 .>. 6:11 ^1 0/0 .>. 12:17 ^2 0/0 .>. 18:23 ^3 0/0 .>. 24:29 ^4 0/0 .>. 30:35 ^5 0/0 .>. 36:41 ^0 0/0 .>. 42:47 ^1 0/0 .>. 48:53 ^2 0/0 .>. 54:59 ^3 0/0 .>. 60:65 ^4 0/0 .>. 66:71 ^5 0/0 .>. 72:77 ^0 0/0 .>. 78:83 ^1 0/0 .>. 84:89 ^2 0/0 .>. 90:95 ^3 0/0 .>. 96:101 ^4 0/0 .>. 102:107 ^5 0/0 .>. 108:113 ^0 0/0 .>. 114:119 ^1 0/0 .>. 120:125 ^2 0/0 .>. 126:127 ^3
341 169
342This is once again split into "rcu" and "rcu_bh" portions. The fields are 170This is once again split into "rcu_sched" and "rcu_bh" portions,
343as follows: 171and CONFIG_TREE_PREEMPT_RCU kernels will again have an additional
172"rcu_preempt" section. The fields are as follows:
344 173
345o "c" is exactly the same as "completed" under rcu/rcugp. 174o "c" is exactly the same as "completed" under rcu/rcugp.
346 175
@@ -372,6 +201,11 @@ o "fqlh" is the number of calls to force_quiescent_state() that
372 exited immediately (without even being counted in nfqs above) 201 exited immediately (without even being counted in nfqs above)
373 due to contention on ->fqslock. 202 due to contention on ->fqslock.
374 203
204o "oqlen" is the number of callbacks on the "orphan" callback
205 list. RCU callbacks are placed on this list by CPUs going
206 offline, and are "adopted" either by the CPU helping the outgoing
207 CPU or by the next rcu_barrier*() call, whichever comes first.
208
375o Each element of the form "1/1 0:127 ^0" represents one struct 209o Each element of the form "1/1 0:127 ^0" represents one struct
376 rcu_node. Each line represents one level of the hierarchy, from 210 rcu_node. Each line represents one level of the hierarchy, from
377 root to leaves. It is best to think of the rcu_data structures 211 root to leaves. It is best to think of the rcu_data structures
@@ -379,7 +213,7 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
379 might be either one, two, or three levels of rcu_node structures, 213 might be either one, two, or three levels of rcu_node structures,
380 depending on the relationship between CONFIG_RCU_FANOUT and 214 depending on the relationship between CONFIG_RCU_FANOUT and
381 CONFIG_NR_CPUS. 215 CONFIG_NR_CPUS.
382 216
383 o The numbers separated by the "/" are the qsmask followed 217 o The numbers separated by the "/" are the qsmask followed
384 by the qsmaskinit. The qsmask will have one bit 218 by the qsmaskinit. The qsmask will have one bit
385 set for each entity in the next lower level that 219 set for each entity in the next lower level that
@@ -389,10 +223,19 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
389 The value of qsmaskinit is assigned to that of qsmask 223 The value of qsmaskinit is assigned to that of qsmask
390 at the beginning of each grace period. 224 at the beginning of each grace period.
391 225
392 For example, for "rcu", the qsmask of the first entry 226 For example, for "rcu_sched", the qsmask of the first
393 of the lowest level is 0x14, meaning that we are still 227 entry of the lowest level is 0x14, meaning that we
394 waiting for CPUs 2 and 4 to check in for the current 228 are still waiting for CPUs 2 and 4 to check in for the
395 grace period. 229 current grace period.
230
231 o The characters separated by the ">" indicate the state
232 of the blocked-tasks lists. A "T" preceding the ">"
233 indicates that at least one task blocked in an RCU
234 read-side critical section blocks the current grace
235 period, while a "." preceding the ">" indicates otherwise.
236 The character following the ">" indicates similarly for
237 the next grace period. A "T" should appear in this
238 field only for rcu-preempt.
396 239
397 o The numbers separated by the ":" are the range of CPUs 240 o The numbers separated by the ":" are the range of CPUs
398 served by this struct rcu_node. This can be helpful 241 served by this struct rcu_node. This can be helpful
@@ -431,8 +274,9 @@ rcu_bh:
431 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 274 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921
432 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 275 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542
433 276
434As always, this is once again split into "rcu" and "rcu_bh" portions. 277As always, this is once again split into "rcu_sched" and "rcu_bh"
435The fields are as follows: 278portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional
279"rcu_preempt" section. The fields are as follows:
436 280
437o "np" is the number of times that __rcu_pending() has been invoked 281o "np" is the number of times that __rcu_pending() has been invoked
438 for the corresponding flavor of RCU. 282 for the corresponding flavor of RCU.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index e41a7fecf0d3..d542ca243b80 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -830,7 +830,7 @@ sched: Critical sections Grace period Barrier
830SRCU: Critical sections Grace period Barrier 830SRCU: Critical sections Grace period Barrier
831 831
832 srcu_read_lock synchronize_srcu N/A 832 srcu_read_lock synchronize_srcu N/A
833 srcu_read_unlock 833 srcu_read_unlock synchronize_srcu_expedited
834 834
835SRCU: Initialization/cleanup 835SRCU: Initialization/cleanup
836 init_srcu_struct 836 init_srcu_struct
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index e1efc400bed6..e151b2a36267 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -65,6 +65,7 @@ aicdb.h*
65asm-offsets.h 65asm-offsets.h
66asm_offsets.h 66asm_offsets.h
67autoconf.h* 67autoconf.h*
68av_permissions.h
68bbootsect 69bbootsect
69bin2c 70bin2c
70binkernel.spec 71binkernel.spec
@@ -95,12 +96,14 @@ docproc
95elf2ecoff 96elf2ecoff
96elfconfig.h* 97elfconfig.h*
97fixdep 98fixdep
99flask.h
98fore200e_mkfirm 100fore200e_mkfirm
99fore200e_pca_fw.c* 101fore200e_pca_fw.c*
100gconf 102gconf
101gen-devlist 103gen-devlist
102gen_crc32table 104gen_crc32table
103gen_init_cpio 105gen_init_cpio
106genheaders
104genksyms 107genksyms
105*_gray256.c 108*_gray256.c
106ihex2fw 109ihex2fw
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 52c34b4f567e..d8ce217d1f72 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -85,7 +85,6 @@ parameter is applicable:
85 PPT Parallel port support is enabled. 85 PPT Parallel port support is enabled.
86 PS2 Appropriate PS/2 support is enabled. 86 PS2 Appropriate PS/2 support is enabled.
87 RAM RAM disk support is enabled. 87 RAM RAM disk support is enabled.
88 ROOTPLUG The example Root Plug LSM is enabled.
89 S390 S390 architecture is enabled. 88 S390 S390 architecture is enabled.
90 SCSI Appropriate SCSI support is enabled. 89 SCSI Appropriate SCSI support is enabled.
91 A lot of drivers has their options described inside of 90 A lot of drivers has their options described inside of
@@ -779,6 +778,13 @@ and is between 256 and 4096 characters. It is defined in the file
779 by the set_ftrace_notrace file in the debugfs 778 by the set_ftrace_notrace file in the debugfs
780 tracing directory. 779 tracing directory.
781 780
781 ftrace_graph_filter=[function-list]
782 [FTRACE] Limit the top level callers functions traced
783 by the function graph tracer at boot up.
784 function-list is a comma separated list of functions
785 that can be changed at run time by the
786 set_graph_function file in the debugfs tracing directory.
787
782 gamecon.map[2|3]= 788 gamecon.map[2|3]=
783 [HW,JOY] Multisystem joystick and NES/SNES/PSX pad 789 [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
784 support via parallel port (up to 5 devices per port) 790 support via parallel port (up to 5 devices per port)
@@ -2032,8 +2038,15 @@ and is between 256 and 4096 characters. It is defined in the file
2032 2038
2033 print-fatal-signals= 2039 print-fatal-signals=
2034 [KNL] debug: print fatal signals 2040 [KNL] debug: print fatal signals
2035 print-fatal-signals=1: print segfault info to 2041
2036 the kernel console. 2042 If enabled, warn about various signal handling
2043 related application anomalies: too many signals,
2044 too many POSIX.1 timers, fatal signals causing a
2045 coredump - etc.
2046
2047 If you hit the warning due to signal overflow,
2048 you might want to try "ulimit -i unlimited".
2049
2037 default: off. 2050 default: off.
2038 2051
2039 printk.time= Show timing data prefixed to each printk message line 2052 printk.time= Show timing data prefixed to each printk message line
@@ -2164,15 +2177,6 @@ and is between 256 and 4096 characters. It is defined in the file
2164 Useful for devices that are detected asynchronously 2177 Useful for devices that are detected asynchronously
2165 (e.g. USB and MMC devices). 2178 (e.g. USB and MMC devices).
2166 2179
2167 root_plug.vendor_id=
2168 [ROOTPLUG] Override the default vendor ID
2169
2170 root_plug.product_id=
2171 [ROOTPLUG] Override the default product ID
2172
2173 root_plug.debug=
2174 [ROOTPLUG] Enable debugging output
2175
2176 rw [KNL] Mount root device read-write on boot 2180 rw [KNL] Mount root device read-write on boot
2177 2181
2178 S [KNL] Run init in single mode 2182 S [KNL] Run init in single mode
diff --git a/Documentation/pcmcia/driver-changes.txt b/Documentation/pcmcia/driver-changes.txt
index 059934363caf..446f43b309df 100644
--- a/Documentation/pcmcia/driver-changes.txt
+++ b/Documentation/pcmcia/driver-changes.txt
@@ -1,5 +1,17 @@
1This file details changes in 2.6 which affect PCMCIA card driver authors: 1This file details changes in 2.6 which affect PCMCIA card driver authors:
2 2
3* no cs_error / CS_CHECK / CONFIG_PCMCIA_DEBUG (as of 2.6.33)
4 Instead of the cs_error() callback or the CS_CHECK() macro, please use
5 Linux-style checking of return values, and -- if necessary -- debug
6 messages using "dev_dbg()" or "pr_debug()".
7
8* New CIS tuple access (as of 2.6.33)
9 Instead of pcmcia_get_{first,next}_tuple(), pcmcia_get_tuple_data() and
10 pcmcia_parse_tuple(), a driver shall use "pcmcia_get_tuple()" if it is
11 only interested in one (raw) tuple, or "pcmcia_loop_tuple()" if it is
12 interested in all tuples of one type. To decode the MAC from CISTPL_FUNCE,
13 a new helper "pcmcia_get_mac_from_cis()" was added.
14
3* New configuration loop helper (as of 2.6.28) 15* New configuration loop helper (as of 2.6.28)
4 By calling pcmcia_loop_config(), a driver can iterate over all available 16 By calling pcmcia_loop_config(), a driver can iterate over all available
5 configuration options. During a driver's probe() phase, one doesn't need 17 configuration options. During a driver's probe() phase, one doesn't need
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt
index 7003e10f10f5..641a1ef2a7ff 100644
--- a/Documentation/trace/ftrace-design.txt
+++ b/Documentation/trace/ftrace-design.txt
@@ -213,10 +213,19 @@ If you can't trace NMI functions, then skip this option.
213<details to be filled> 213<details to be filled>
214 214
215 215
216HAVE_FTRACE_SYSCALLS 216HAVE_SYSCALL_TRACEPOINTS
217--------------------- 217---------------------
218 218
219<details to be filled> 219You need very few things to get the syscalls tracing in an arch.
220
221- Have a NR_syscalls variable in <asm/unistd.h> that provides the number
222 of syscalls supported by the arch.
223- Implement arch_syscall_addr() that resolves a syscall address from a
224 syscall number.
225- Support the TIF_SYSCALL_TRACEPOINT thread flags
226- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
227 in the ptrace syscalls tracing path.
228- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
220 229
221 230
222HAVE_FTRACE_MCOUNT_RECORD 231HAVE_FTRACE_MCOUNT_RECORD
diff --git a/MAINTAINERS b/MAINTAINERS
index 1186b1978b2e..89ceb4005c5d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3023,11 +3023,8 @@ S: Maintained
3023F: fs/autofs4/ 3023F: fs/autofs4/
3024 3024
3025KERNEL BUILD 3025KERNEL BUILD
3026M: Sam Ravnborg <sam@ravnborg.org>
3027T: git git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-next.git
3028T: git git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-fixes.git
3029L: linux-kbuild@vger.kernel.org 3026L: linux-kbuild@vger.kernel.org
3030S: Maintained 3027S: Orphan
3031F: Documentation/kbuild/ 3028F: Documentation/kbuild/
3032F: Makefile 3029F: Makefile
3033F: scripts/Makefile.* 3030F: scripts/Makefile.*
diff --git a/Makefile b/Makefile
index ad8260102f64..33d4732a6c4a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 32 3SUBLEVEL = 32
4EXTRAVERSION = -rc8 4EXTRAVERSION =
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -379,6 +379,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
379PHONY += scripts_basic 379PHONY += scripts_basic
380scripts_basic: 380scripts_basic:
381 $(Q)$(MAKE) $(build)=scripts/basic 381 $(Q)$(MAKE) $(build)=scripts/basic
382 $(Q)rm -f .tmp_quiet_recordmcount
382 383
383# To avoid any implicit rule to kick in, define an empty command. 384# To avoid any implicit rule to kick in, define an empty command.
384scripts/basic/%: scripts_basic ; 385scripts/basic/%: scripts_basic ;
diff --git a/arch/avr32/include/asm/bug.h b/arch/avr32/include/asm/bug.h
index 331d45bab18f..2aa373cc61b5 100644
--- a/arch/avr32/include/asm/bug.h
+++ b/arch/avr32/include/asm/bug.h
@@ -52,7 +52,7 @@
52#define BUG() \ 52#define BUG() \
53 do { \ 53 do { \
54 _BUG_OR_WARN(0); \ 54 _BUG_OR_WARN(0); \
55 for (;;); \ 55 unreachable(); \
56 } while (0) 56 } while (0)
57 57
58#define WARN_ON(condition) \ 58#define WARN_ON(condition) \
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
index dcbaea7ce128..f0acde68aaea 100644
--- a/arch/ia64/include/asm/swiotlb.h
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -4,8 +4,6 @@
4#include <linux/dma-mapping.h> 4#include <linux/dma-mapping.h>
5#include <linux/swiotlb.h> 5#include <linux/swiotlb.h>
6 6
7extern int swiotlb_force;
8
9#ifdef CONFIG_SWIOTLB 7#ifdef CONFIG_SWIOTLB
10extern int swiotlb; 8extern int swiotlb;
11extern void pci_swiotlb_init(void); 9extern void pci_swiotlb_init(void);
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 285aae8431c6..53292abf846c 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -41,7 +41,7 @@ struct dma_map_ops swiotlb_dma_ops = {
41void __init swiotlb_dma_init(void) 41void __init swiotlb_dma_init(void)
42{ 42{
43 dma_ops = &swiotlb_dma_ops; 43 dma_ops = &swiotlb_dma_ops;
44 swiotlb_init(); 44 swiotlb_init(1);
45} 45}
46 46
47void __init pci_swiotlb_init(void) 47void __init pci_swiotlb_init(void)
@@ -51,7 +51,7 @@ void __init pci_swiotlb_init(void)
51 swiotlb = 1; 51 swiotlb = 1;
52 printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); 52 printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
53 machvec_init("dig"); 53 machvec_init("dig");
54 swiotlb_init(); 54 swiotlb_init(1);
55 dma_ops = &swiotlb_dma_ops; 55 dma_ops = &swiotlb_dma_ops;
56#else 56#else
57 panic("Unable to find Intel IOMMU"); 57 panic("Unable to find Intel IOMMU");
diff --git a/arch/mips/include/asm/bug.h b/arch/mips/include/asm/bug.h
index 6cf29c26e873..540c98a810d1 100644
--- a/arch/mips/include/asm/bug.h
+++ b/arch/mips/include/asm/bug.h
@@ -11,9 +11,7 @@
11static inline void __noreturn BUG(void) 11static inline void __noreturn BUG(void)
12{ 12{
13 __asm__ __volatile__("break %0" : : "i" (BRK_BUG)); 13 __asm__ __volatile__("break %0" : : "i" (BRK_BUG));
14 /* Fool GCC into thinking the function doesn't return. */ 14 unreachable();
15 while (1)
16 ;
17} 15}
18 16
19#define HAVE_ARCH_BUG 17#define HAVE_ARCH_BUG
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 3fe1fcfa2e73..fe0d79805603 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -306,6 +306,7 @@ static inline int mips_atomic_set(struct pt_regs *regs,
306 306
307 if (cpu_has_llsc && R10000_LLSC_WAR) { 307 if (cpu_has_llsc && R10000_LLSC_WAR) {
308 __asm__ __volatile__ ( 308 __asm__ __volatile__ (
309 " .set mips3 \n"
309 " li %[err], 0 \n" 310 " li %[err], 0 \n"
310 "1: ll %[old], (%[addr]) \n" 311 "1: ll %[old], (%[addr]) \n"
311 " move %[tmp], %[new] \n" 312 " move %[tmp], %[new] \n"
@@ -320,6 +321,7 @@ static inline int mips_atomic_set(struct pt_regs *regs,
320 " "STR(PTR)" 1b, 4b \n" 321 " "STR(PTR)" 1b, 4b \n"
321 " "STR(PTR)" 2b, 4b \n" 322 " "STR(PTR)" 2b, 4b \n"
322 " .previous \n" 323 " .previous \n"
324 " .set mips0 \n"
323 : [old] "=&r" (old), 325 : [old] "=&r" (old),
324 [err] "=&r" (err), 326 [err] "=&r" (err),
325 [tmp] "=&r" (tmp) 327 [tmp] "=&r" (tmp)
@@ -329,6 +331,7 @@ static inline int mips_atomic_set(struct pt_regs *regs,
329 : "memory"); 331 : "memory");
330 } else if (cpu_has_llsc) { 332 } else if (cpu_has_llsc) {
331 __asm__ __volatile__ ( 333 __asm__ __volatile__ (
334 " .set mips3 \n"
332 " li %[err], 0 \n" 335 " li %[err], 0 \n"
333 "1: ll %[old], (%[addr]) \n" 336 "1: ll %[old], (%[addr]) \n"
334 " move %[tmp], %[new] \n" 337 " move %[tmp], %[new] \n"
@@ -347,6 +350,7 @@ static inline int mips_atomic_set(struct pt_regs *regs,
347 " "STR(PTR)" 1b, 5b \n" 350 " "STR(PTR)" 1b, 5b \n"
348 " "STR(PTR)" 2b, 5b \n" 351 " "STR(PTR)" 2b, 5b \n"
349 " .previous \n" 352 " .previous \n"
353 " .set mips0 \n"
350 : [old] "=&r" (old), 354 : [old] "=&r" (old),
351 [err] "=&r" (err), 355 [err] "=&r" (err),
352 [tmp] "=&r" (tmp) 356 [tmp] "=&r" (tmp)
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 9f40e1ff9b4f..041fc1afc3f4 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -110,7 +110,6 @@ static struct korina_device korina_dev0_data = {
110static struct platform_device korina_dev0 = { 110static struct platform_device korina_dev0 = {
111 .id = -1, 111 .id = -1,
112 .name = "korina", 112 .name = "korina",
113 .dev.driver_data = &korina_dev0_data,
114 .resource = korina_dev0_res, 113 .resource = korina_dev0_res,
115 .num_resources = ARRAY_SIZE(korina_dev0_res), 114 .num_resources = ARRAY_SIZE(korina_dev0_res),
116}; 115};
@@ -332,6 +331,8 @@ static int __init plat_setup_devices(void)
332 /* set the uart clock to the current cpu frequency */ 331 /* set the uart clock to the current cpu frequency */
333 rb532_uart_res[0].uartclk = idt_cpu_freq; 332 rb532_uart_res[0].uartclk = idt_cpu_freq;
334 333
334 dev_set_drvdata(&korina_dev0.dev, &korina_dev0_data);
335
335 return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); 336 return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
336} 337}
337 338
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 53bcf3d792db..b152de3e64d4 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -345,7 +345,7 @@ void __init setup_arch(char **cmdline_p)
345 345
346#ifdef CONFIG_SWIOTLB 346#ifdef CONFIG_SWIOTLB
347 if (ppc_swiotlb_enable) 347 if (ppc_swiotlb_enable)
348 swiotlb_init(); 348 swiotlb_init(1);
349#endif 349#endif
350 350
351 paging_init(); 351 paging_init();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 04f638d82fb3..df2c9e932b37 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -550,7 +550,7 @@ void __init setup_arch(char **cmdline_p)
550 550
551#ifdef CONFIG_SWIOTLB 551#ifdef CONFIG_SWIOTLB
552 if (ppc_swiotlb_enable) 552 if (ppc_swiotlb_enable)
553 swiotlb_init(); 553 swiotlb_init(1);
554#endif 554#endif
555 555
556 paging_init(); 556 paging_init();
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 43c0acad7160..16c673096a22 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -95,6 +95,34 @@ config S390
95 select HAVE_ARCH_TRACEHOOK 95 select HAVE_ARCH_TRACEHOOK
96 select INIT_ALL_POSSIBLE 96 select INIT_ALL_POSSIBLE
97 select HAVE_PERF_EVENTS 97 select HAVE_PERF_EVENTS
98 select ARCH_INLINE_SPIN_TRYLOCK
99 select ARCH_INLINE_SPIN_TRYLOCK_BH
100 select ARCH_INLINE_SPIN_LOCK
101 select ARCH_INLINE_SPIN_LOCK_BH
102 select ARCH_INLINE_SPIN_LOCK_IRQ
103 select ARCH_INLINE_SPIN_LOCK_IRQSAVE
104 select ARCH_INLINE_SPIN_UNLOCK
105 select ARCH_INLINE_SPIN_UNLOCK_BH
106 select ARCH_INLINE_SPIN_UNLOCK_IRQ
107 select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
108 select ARCH_INLINE_READ_TRYLOCK
109 select ARCH_INLINE_READ_LOCK
110 select ARCH_INLINE_READ_LOCK_BH
111 select ARCH_INLINE_READ_LOCK_IRQ
112 select ARCH_INLINE_READ_LOCK_IRQSAVE
113 select ARCH_INLINE_READ_UNLOCK
114 select ARCH_INLINE_READ_UNLOCK_BH
115 select ARCH_INLINE_READ_UNLOCK_IRQ
116 select ARCH_INLINE_READ_UNLOCK_IRQRESTORE
117 select ARCH_INLINE_WRITE_TRYLOCK
118 select ARCH_INLINE_WRITE_LOCK
119 select ARCH_INLINE_WRITE_LOCK_BH
120 select ARCH_INLINE_WRITE_LOCK_IRQ
121 select ARCH_INLINE_WRITE_LOCK_IRQSAVE
122 select ARCH_INLINE_WRITE_UNLOCK
123 select ARCH_INLINE_WRITE_UNLOCK_BH
124 select ARCH_INLINE_WRITE_UNLOCK_IRQ
125 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
98 126
99config SCHED_OMIT_FRAME_POINTER 127config SCHED_OMIT_FRAME_POINTER
100 bool 128 bool
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 7efd0abe8887..efb74fd5156e 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -49,7 +49,7 @@
49 49
50#define BUG() do { \ 50#define BUG() do { \
51 __EMIT_BUG(0); \ 51 __EMIT_BUG(0); \
52 for (;;); \ 52 unreachable(); \
53} while (0) 53} while (0)
54 54
55#define WARN_ON(x) ({ \ 55#define WARN_ON(x) ({ \
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 41ce6861174e..c9af0d19c7ab 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -191,33 +191,4 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
191#define _raw_read_relax(lock) cpu_relax() 191#define _raw_read_relax(lock) cpu_relax()
192#define _raw_write_relax(lock) cpu_relax() 192#define _raw_write_relax(lock) cpu_relax()
193 193
194#define __always_inline__spin_lock
195#define __always_inline__read_lock
196#define __always_inline__write_lock
197#define __always_inline__spin_lock_bh
198#define __always_inline__read_lock_bh
199#define __always_inline__write_lock_bh
200#define __always_inline__spin_lock_irq
201#define __always_inline__read_lock_irq
202#define __always_inline__write_lock_irq
203#define __always_inline__spin_lock_irqsave
204#define __always_inline__read_lock_irqsave
205#define __always_inline__write_lock_irqsave
206#define __always_inline__spin_trylock
207#define __always_inline__read_trylock
208#define __always_inline__write_trylock
209#define __always_inline__spin_trylock_bh
210#define __always_inline__spin_unlock
211#define __always_inline__read_unlock
212#define __always_inline__write_unlock
213#define __always_inline__spin_unlock_bh
214#define __always_inline__read_unlock_bh
215#define __always_inline__write_unlock_bh
216#define __always_inline__spin_unlock_irq
217#define __always_inline__read_unlock_irq
218#define __always_inline__write_unlock_irq
219#define __always_inline__spin_unlock_irqrestore
220#define __always_inline__read_unlock_irqrestore
221#define __always_inline__write_unlock_irqrestore
222
223#endif /* __ASM_SPINLOCK_H */ 194#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index f5fe34dd821b..5a82bc68193e 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -203,73 +203,10 @@ out:
203 203
204#ifdef CONFIG_FTRACE_SYSCALLS 204#ifdef CONFIG_FTRACE_SYSCALLS
205 205
206extern unsigned long __start_syscalls_metadata[];
207extern unsigned long __stop_syscalls_metadata[];
208extern unsigned int sys_call_table[]; 206extern unsigned int sys_call_table[];
209 207
210static struct syscall_metadata **syscalls_metadata; 208unsigned long __init arch_syscall_addr(int nr)
211
212struct syscall_metadata *syscall_nr_to_meta(int nr)
213{
214 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
215 return NULL;
216
217 return syscalls_metadata[nr];
218}
219
220int syscall_name_to_nr(char *name)
221{
222 int i;
223
224 if (!syscalls_metadata)
225 return -1;
226 for (i = 0; i < NR_syscalls; i++)
227 if (syscalls_metadata[i])
228 if (!strcmp(syscalls_metadata[i]->name, name))
229 return i;
230 return -1;
231}
232
233void set_syscall_enter_id(int num, int id)
234{
235 syscalls_metadata[num]->enter_id = id;
236}
237
238void set_syscall_exit_id(int num, int id)
239{ 209{
240 syscalls_metadata[num]->exit_id = id; 210 return (unsigned long)sys_call_table[nr];
241}
242
243static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
244{
245 struct syscall_metadata *start;
246 struct syscall_metadata *stop;
247 char str[KSYM_SYMBOL_LEN];
248
249 start = (struct syscall_metadata *)__start_syscalls_metadata;
250 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
251 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
252
253 for ( ; start < stop; start++) {
254 if (start->name && !strcmp(start->name + 3, str + 3))
255 return start;
256 }
257 return NULL;
258}
259
260static int __init arch_init_ftrace_syscalls(void)
261{
262 struct syscall_metadata *meta;
263 int i;
264 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
265 GFP_KERNEL);
266 if (!syscalls_metadata)
267 return -ENOMEM;
268 for (i = 0; i < NR_syscalls; i++) {
269 meta = find_syscall_meta((unsigned long)sys_call_table[i]);
270 syscalls_metadata[i] = meta;
271 }
272 return 0;
273} 211}
274arch_initcall(arch_init_ftrace_syscalls);
275#endif 212#endif
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
index 4b180897e6b5..5af2982133b5 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/arch/x86/include/asm/amd_iommu.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -23,19 +23,13 @@
23#include <linux/irqreturn.h> 23#include <linux/irqreturn.h>
24 24
25#ifdef CONFIG_AMD_IOMMU 25#ifdef CONFIG_AMD_IOMMU
26extern int amd_iommu_init(void); 26
27extern int amd_iommu_init_dma_ops(void);
28extern int amd_iommu_init_passthrough(void);
29extern void amd_iommu_detect(void); 27extern void amd_iommu_detect(void);
30extern irqreturn_t amd_iommu_int_handler(int irq, void *data); 28
31extern void amd_iommu_flush_all_domains(void);
32extern void amd_iommu_flush_all_devices(void);
33extern void amd_iommu_shutdown(void);
34extern void amd_iommu_apply_erratum_63(u16 devid);
35#else 29#else
36static inline int amd_iommu_init(void) { return -ENODEV; } 30
37static inline void amd_iommu_detect(void) { } 31static inline void amd_iommu_detect(void) { }
38static inline void amd_iommu_shutdown(void) { } 32
39#endif 33#endif
40 34
41#endif /* _ASM_X86_AMD_IOMMU_H */ 35#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
new file mode 100644
index 000000000000..84786fb9a23b
--- /dev/null
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
20#define _ASM_X86_AMD_IOMMU_PROTO_H
21
22struct amd_iommu;
23
24extern int amd_iommu_init_dma_ops(void);
25extern int amd_iommu_init_passthrough(void);
26extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
27extern void amd_iommu_flush_all_domains(void);
28extern void amd_iommu_flush_all_devices(void);
29extern void amd_iommu_apply_erratum_63(u16 devid);
30extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
31
32#ifndef CONFIG_AMD_IOMMU_STATS
33
34static inline void amd_iommu_stats_init(void) { }
35
36#endif /* !CONFIG_AMD_IOMMU_STATS */
37
38#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 2a2cc7a78a81..ba19ad4c47d0 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -25,6 +25,11 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26 26
27/* 27/*
28 * Maximum number of IOMMUs supported
29 */
30#define MAX_IOMMUS 32
31
32/*
28 * some size calculation constants 33 * some size calculation constants
29 */ 34 */
30#define DEV_TABLE_ENTRY_SIZE 32 35#define DEV_TABLE_ENTRY_SIZE 32
@@ -206,6 +211,9 @@ extern bool amd_iommu_dump;
206 printk(KERN_INFO "AMD-Vi: " format, ## arg); \ 211 printk(KERN_INFO "AMD-Vi: " format, ## arg); \
207 } while(0); 212 } while(0);
208 213
214/* global flag if IOMMUs cache non-present entries */
215extern bool amd_iommu_np_cache;
216
209/* 217/*
210 * Make iterating over all IOMMUs easier 218 * Make iterating over all IOMMUs easier
211 */ 219 */
@@ -226,6 +234,8 @@ extern bool amd_iommu_dump;
226 * independent of their use. 234 * independent of their use.
227 */ 235 */
228struct protection_domain { 236struct protection_domain {
237 struct list_head list; /* for list of all protection domains */
238 struct list_head dev_list; /* List of all devices in this domain */
229 spinlock_t lock; /* mostly used to lock the page table*/ 239 spinlock_t lock; /* mostly used to lock the page table*/
230 u16 id; /* the domain id written to the device table */ 240 u16 id; /* the domain id written to the device table */
231 int mode; /* paging mode (0-6 levels) */ 241 int mode; /* paging mode (0-6 levels) */
@@ -233,7 +243,20 @@ struct protection_domain {
233 unsigned long flags; /* flags to find out type of domain */ 243 unsigned long flags; /* flags to find out type of domain */
234 bool updated; /* complete domain flush required */ 244 bool updated; /* complete domain flush required */
235 unsigned dev_cnt; /* devices assigned to this domain */ 245 unsigned dev_cnt; /* devices assigned to this domain */
246 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
236 void *priv; /* private data */ 247 void *priv; /* private data */
248
249};
250
251/*
252 * This struct contains device specific data for the IOMMU
253 */
254struct iommu_dev_data {
255 struct list_head list; /* For domain->dev_list */
256 struct device *dev; /* Device this data belong to */
257 struct device *alias; /* The Alias Device */
258 struct protection_domain *domain; /* Domain the device is bound to */
259 atomic_t bind; /* Domain attach reverent count */
237}; 260};
238 261
239/* 262/*
@@ -291,6 +314,9 @@ struct dma_ops_domain {
291struct amd_iommu { 314struct amd_iommu {
292 struct list_head list; 315 struct list_head list;
293 316
317 /* Index within the IOMMU array */
318 int index;
319
294 /* locks the accesses to the hardware */ 320 /* locks the accesses to the hardware */
295 spinlock_t lock; 321 spinlock_t lock;
296 322
@@ -357,6 +383,21 @@ struct amd_iommu {
357extern struct list_head amd_iommu_list; 383extern struct list_head amd_iommu_list;
358 384
359/* 385/*
386 * Array with pointers to each IOMMU struct
387 * The indices are referenced in the protection domains
388 */
389extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
390
391/* Number of IOMMUs present in the system */
392extern int amd_iommus_present;
393
394/*
395 * Declarations for the global list of all protection domains
396 */
397extern spinlock_t amd_iommu_pd_lock;
398extern struct list_head amd_iommu_pd_list;
399
400/*
360 * Structure defining one entry in the device table 401 * Structure defining one entry in the device table
361 */ 402 */
362struct dev_table_entry { 403struct dev_table_entry {
@@ -416,15 +457,9 @@ extern unsigned amd_iommu_aperture_order;
416/* largest PCI device id we expect translation requests for */ 457/* largest PCI device id we expect translation requests for */
417extern u16 amd_iommu_last_bdf; 458extern u16 amd_iommu_last_bdf;
418 459
419/* data structures for protection domain handling */
420extern struct protection_domain **amd_iommu_pd_table;
421
422/* allocation bitmap for domain ids */ 460/* allocation bitmap for domain ids */
423extern unsigned long *amd_iommu_pd_alloc_bitmap; 461extern unsigned long *amd_iommu_pd_alloc_bitmap;
424 462
425/* will be 1 if device isolation is enabled */
426extern bool amd_iommu_isolate;
427
428/* 463/*
429 * If true, the addresses will be flushed on unmap time, not when 464 * If true, the addresses will be flushed on unmap time, not when
430 * they are reused 465 * they are reused
@@ -462,11 +497,6 @@ struct __iommu_counter {
462#define ADD_STATS_COUNTER(name, x) 497#define ADD_STATS_COUNTER(name, x)
463#define SUB_STATS_COUNTER(name, x) 498#define SUB_STATS_COUNTER(name, x)
464 499
465static inline void amd_iommu_stats_init(void) { }
466
467#endif /* CONFIG_AMD_IOMMU_STATS */ 500#endif /* CONFIG_AMD_IOMMU_STATS */
468 501
469/* some function prototypes */
470extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
471
472#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 502#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index d9cf1cd156d2..f654d1bb17fb 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -22,14 +22,14 @@ do { \
22 ".popsection" \ 22 ".popsection" \
23 : : "i" (__FILE__), "i" (__LINE__), \ 23 : : "i" (__FILE__), "i" (__LINE__), \
24 "i" (sizeof(struct bug_entry))); \ 24 "i" (sizeof(struct bug_entry))); \
25 for (;;) ; \ 25 unreachable(); \
26} while (0) 26} while (0)
27 27
28#else 28#else
29#define BUG() \ 29#define BUG() \
30do { \ 30do { \
31 asm volatile("ud2"); \ 31 asm volatile("ud2"); \
32 for (;;) ; \ 32 unreachable(); \
33} while (0) 33} while (0)
34#endif 34#endif
35 35
diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h
index b03bedb62aa7..0918654305af 100644
--- a/arch/x86/include/asm/calgary.h
+++ b/arch/x86/include/asm/calgary.h
@@ -62,10 +62,8 @@ struct cal_chipset_ops {
62extern int use_calgary; 62extern int use_calgary;
63 63
64#ifdef CONFIG_CALGARY_IOMMU 64#ifdef CONFIG_CALGARY_IOMMU
65extern int calgary_iommu_init(void);
66extern void detect_calgary(void); 65extern void detect_calgary(void);
67#else 66#else
68static inline int calgary_iommu_init(void) { return 1; }
69static inline void detect_calgary(void) { return; } 67static inline void detect_calgary(void) { return; }
70#endif 68#endif
71 69
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index cee34e9ca45b..029f230ab637 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -8,7 +8,7 @@ struct dev_archdata {
8#ifdef CONFIG_X86_64 8#ifdef CONFIG_X86_64
9struct dma_map_ops *dma_ops; 9struct dma_map_ops *dma_ops;
10#endif 10#endif
11#ifdef CONFIG_DMAR 11#if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU)
12 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
13#endif 13#endif
14}; 14};
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 6a25d5d42836..0f6c02f3b7d4 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -20,7 +20,8 @@
20# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) 20# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
21#endif 21#endif
22 22
23extern dma_addr_t bad_dma_address; 23#define DMA_ERROR_CODE 0
24
24extern int iommu_merge; 25extern int iommu_merge;
25extern struct device x86_dma_fallback_dev; 26extern struct device x86_dma_fallback_dev;
26extern int panic_on_overflow; 27extern int panic_on_overflow;
@@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
48 if (ops->mapping_error) 49 if (ops->mapping_error)
49 return ops->mapping_error(dev, dma_addr); 50 return ops->mapping_error(dev, dma_addr);
50 51
51 return (dma_addr == bad_dma_address); 52 return (dma_addr == DMA_ERROR_CODE);
52} 53}
53 54
54#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 55#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 6cfdafa409d8..4ac5b0f33fc1 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -35,8 +35,7 @@ extern int gart_iommu_aperture_allowed;
35extern int gart_iommu_aperture_disabled; 35extern int gart_iommu_aperture_disabled;
36 36
37extern void early_gart_iommu_check(void); 37extern void early_gart_iommu_check(void);
38extern void gart_iommu_init(void); 38extern int gart_iommu_init(void);
39extern void gart_iommu_shutdown(void);
40extern void __init gart_parse_options(char *); 39extern void __init gart_parse_options(char *);
41extern void gart_iommu_hole_init(void); 40extern void gart_iommu_hole_init(void);
42 41
@@ -48,12 +47,6 @@ extern void gart_iommu_hole_init(void);
48static inline void early_gart_iommu_check(void) 47static inline void early_gart_iommu_check(void)
49{ 48{
50} 49}
51static inline void gart_iommu_init(void)
52{
53}
54static inline void gart_iommu_shutdown(void)
55{
56}
57static inline void gart_parse_options(char *options) 50static inline void gart_parse_options(char *options)
58{ 51{
59} 52}
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index fd6d21bbee6c..345c99cef152 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -1,8 +1,6 @@
1#ifndef _ASM_X86_IOMMU_H 1#ifndef _ASM_X86_IOMMU_H
2#define _ASM_X86_IOMMU_H 2#define _ASM_X86_IOMMU_H
3 3
4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void);
6extern struct dma_map_ops nommu_dma_ops; 4extern struct dma_map_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 5extern int force_iommu, no_iommu;
8extern int iommu_detected; 6extern int iommu_detected;
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
index b9e4e20174fb..87ffcb12a1b8 100644
--- a/arch/x86/include/asm/swiotlb.h
+++ b/arch/x86/include/asm/swiotlb.h
@@ -3,17 +3,14 @@
3 3
4#include <linux/swiotlb.h> 4#include <linux/swiotlb.h>
5 5
6/* SWIOTLB interface */
7
8extern int swiotlb_force;
9
10#ifdef CONFIG_SWIOTLB 6#ifdef CONFIG_SWIOTLB
11extern int swiotlb; 7extern int swiotlb;
12extern void pci_swiotlb_init(void); 8extern int pci_swiotlb_init(void);
13#else 9#else
14#define swiotlb 0 10#define swiotlb 0
15static inline void pci_swiotlb_init(void) 11static inline int pci_swiotlb_init(void)
16{ 12{
13 return 0;
17} 14}
18#endif 15#endif
19 16
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 2c756fd4ab0e..d8e71459f025 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -91,6 +91,14 @@ struct x86_init_timers {
91}; 91};
92 92
93/** 93/**
94 * struct x86_init_iommu - platform specific iommu setup
95 * @iommu_init: platform specific iommu setup
96 */
97struct x86_init_iommu {
98 int (*iommu_init)(void);
99};
100
101/**
94 * struct x86_init_ops - functions for platform specific setup 102 * struct x86_init_ops - functions for platform specific setup
95 * 103 *
96 */ 104 */
@@ -101,6 +109,7 @@ struct x86_init_ops {
101 struct x86_init_oem oem; 109 struct x86_init_oem oem;
102 struct x86_init_paging paging; 110 struct x86_init_paging paging;
103 struct x86_init_timers timers; 111 struct x86_init_timers timers;
112 struct x86_init_iommu iommu;
104}; 113};
105 114
106/** 115/**
@@ -121,6 +130,7 @@ struct x86_platform_ops {
121 unsigned long (*calibrate_tsc)(void); 130 unsigned long (*calibrate_tsc)(void);
122 unsigned long (*get_wallclock)(void); 131 unsigned long (*get_wallclock)(void);
123 int (*set_wallclock)(unsigned long nowtime); 132 int (*set_wallclock)(unsigned long nowtime);
133 void (*iommu_shutdown)(void);
124}; 134};
125 135
126extern struct x86_init_ops x86_init; 136extern struct x86_init_ops x86_init;
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 0285521e0a99..32fb09102a13 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -28,6 +28,7 @@
28#include <asm/proto.h> 28#include <asm/proto.h>
29#include <asm/iommu.h> 29#include <asm/iommu.h>
30#include <asm/gart.h> 30#include <asm/gart.h>
31#include <asm/amd_iommu_proto.h>
31#include <asm/amd_iommu_types.h> 32#include <asm/amd_iommu_types.h>
32#include <asm/amd_iommu.h> 33#include <asm/amd_iommu.h>
33 34
@@ -56,20 +57,115 @@ struct iommu_cmd {
56 u32 data[4]; 57 u32 data[4];
57}; 58};
58 59
59static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
60 struct unity_map_entry *e);
61static struct dma_ops_domain *find_protection_domain(u16 devid);
62static u64 *alloc_pte(struct protection_domain *domain,
63 unsigned long address, int end_lvl,
64 u64 **pte_page, gfp_t gfp);
65static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
66 unsigned long start_page,
67 unsigned int pages);
68static void reset_iommu_command_buffer(struct amd_iommu *iommu); 60static void reset_iommu_command_buffer(struct amd_iommu *iommu);
69static u64 *fetch_pte(struct protection_domain *domain,
70 unsigned long address, int map_size);
71static void update_domain(struct protection_domain *domain); 61static void update_domain(struct protection_domain *domain);
72 62
63/****************************************************************************
64 *
65 * Helper functions
66 *
67 ****************************************************************************/
68
69static inline u16 get_device_id(struct device *dev)
70{
71 struct pci_dev *pdev = to_pci_dev(dev);
72
73 return calc_devid(pdev->bus->number, pdev->devfn);
74}
75
76static struct iommu_dev_data *get_dev_data(struct device *dev)
77{
78 return dev->archdata.iommu;
79}
80
81/*
82 * In this function the list of preallocated protection domains is traversed to
83 * find the domain for a specific device
84 */
85static struct dma_ops_domain *find_protection_domain(u16 devid)
86{
87 struct dma_ops_domain *entry, *ret = NULL;
88 unsigned long flags;
89 u16 alias = amd_iommu_alias_table[devid];
90
91 if (list_empty(&iommu_pd_list))
92 return NULL;
93
94 spin_lock_irqsave(&iommu_pd_list_lock, flags);
95
96 list_for_each_entry(entry, &iommu_pd_list, list) {
97 if (entry->target_dev == devid ||
98 entry->target_dev == alias) {
99 ret = entry;
100 break;
101 }
102 }
103
104 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
105
106 return ret;
107}
108
109/*
110 * This function checks if the driver got a valid device from the caller to
111 * avoid dereferencing invalid pointers.
112 */
113static bool check_device(struct device *dev)
114{
115 u16 devid;
116
117 if (!dev || !dev->dma_mask)
118 return false;
119
120 /* No device or no PCI device */
121 if (!dev || dev->bus != &pci_bus_type)
122 return false;
123
124 devid = get_device_id(dev);
125
126 /* Out of our scope? */
127 if (devid > amd_iommu_last_bdf)
128 return false;
129
130 if (amd_iommu_rlookup_table[devid] == NULL)
131 return false;
132
133 return true;
134}
135
136static int iommu_init_device(struct device *dev)
137{
138 struct iommu_dev_data *dev_data;
139 struct pci_dev *pdev;
140 u16 devid, alias;
141
142 if (dev->archdata.iommu)
143 return 0;
144
145 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
146 if (!dev_data)
147 return -ENOMEM;
148
149 dev_data->dev = dev;
150
151 devid = get_device_id(dev);
152 alias = amd_iommu_alias_table[devid];
153 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
154 if (pdev)
155 dev_data->alias = &pdev->dev;
156
157 atomic_set(&dev_data->bind, 0);
158
159 dev->archdata.iommu = dev_data;
160
161
162 return 0;
163}
164
165static void iommu_uninit_device(struct device *dev)
166{
167 kfree(dev->archdata.iommu);
168}
73#ifdef CONFIG_AMD_IOMMU_STATS 169#ifdef CONFIG_AMD_IOMMU_STATS
74 170
75/* 171/*
@@ -90,7 +186,6 @@ DECLARE_STATS_COUNTER(alloced_io_mem);
90DECLARE_STATS_COUNTER(total_map_requests); 186DECLARE_STATS_COUNTER(total_map_requests);
91 187
92static struct dentry *stats_dir; 188static struct dentry *stats_dir;
93static struct dentry *de_isolate;
94static struct dentry *de_fflush; 189static struct dentry *de_fflush;
95 190
96static void amd_iommu_stats_add(struct __iommu_counter *cnt) 191static void amd_iommu_stats_add(struct __iommu_counter *cnt)
@@ -108,9 +203,6 @@ static void amd_iommu_stats_init(void)
108 if (stats_dir == NULL) 203 if (stats_dir == NULL)
109 return; 204 return;
110 205
111 de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
112 (u32 *)&amd_iommu_isolate);
113
114 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, 206 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
115 (u32 *)&amd_iommu_unmap_flush); 207 (u32 *)&amd_iommu_unmap_flush);
116 208
@@ -130,12 +222,6 @@ static void amd_iommu_stats_init(void)
130 222
131#endif 223#endif
132 224
133/* returns !0 if the IOMMU is caching non-present entries in its TLB */
134static int iommu_has_npcache(struct amd_iommu *iommu)
135{
136 return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
137}
138
139/**************************************************************************** 225/****************************************************************************
140 * 226 *
141 * Interrupt handling functions 227 * Interrupt handling functions
@@ -199,6 +285,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
199 break; 285 break;
200 case EVENT_TYPE_ILL_CMD: 286 case EVENT_TYPE_ILL_CMD:
201 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); 287 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
288 iommu->reset_in_progress = true;
202 reset_iommu_command_buffer(iommu); 289 reset_iommu_command_buffer(iommu);
203 dump_command(address); 290 dump_command(address);
204 break; 291 break;
@@ -321,11 +408,8 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu)
321 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; 408 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
322 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); 409 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
323 410
324 if (unlikely(i == EXIT_LOOP_COUNT)) { 411 if (unlikely(i == EXIT_LOOP_COUNT))
325 spin_unlock(&iommu->lock); 412 iommu->reset_in_progress = true;
326 reset_iommu_command_buffer(iommu);
327 spin_lock(&iommu->lock);
328 }
329} 413}
330 414
331/* 415/*
@@ -372,26 +456,46 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
372out: 456out:
373 spin_unlock_irqrestore(&iommu->lock, flags); 457 spin_unlock_irqrestore(&iommu->lock, flags);
374 458
459 if (iommu->reset_in_progress)
460 reset_iommu_command_buffer(iommu);
461
375 return 0; 462 return 0;
376} 463}
377 464
465static void iommu_flush_complete(struct protection_domain *domain)
466{
467 int i;
468
469 for (i = 0; i < amd_iommus_present; ++i) {
470 if (!domain->dev_iommu[i])
471 continue;
472
473 /*
474 * Devices of this domain are behind this IOMMU
475 * We need to wait for completion of all commands.
476 */
477 iommu_completion_wait(amd_iommus[i]);
478 }
479}
480
378/* 481/*
379 * Command send function for invalidating a device table entry 482 * Command send function for invalidating a device table entry
380 */ 483 */
381static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) 484static int iommu_flush_device(struct device *dev)
382{ 485{
486 struct amd_iommu *iommu;
383 struct iommu_cmd cmd; 487 struct iommu_cmd cmd;
384 int ret; 488 u16 devid;
385 489
386 BUG_ON(iommu == NULL); 490 devid = get_device_id(dev);
491 iommu = amd_iommu_rlookup_table[devid];
387 492
493 /* Build command */
388 memset(&cmd, 0, sizeof(cmd)); 494 memset(&cmd, 0, sizeof(cmd));
389 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); 495 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
390 cmd.data[0] = devid; 496 cmd.data[0] = devid;
391 497
392 ret = iommu_queue_command(iommu, &cmd); 498 return iommu_queue_command(iommu, &cmd);
393
394 return ret;
395} 499}
396 500
397static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, 501static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
@@ -430,11 +534,11 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
430 * It invalidates a single PTE if the range to flush is within a single 534 * It invalidates a single PTE if the range to flush is within a single
431 * page. Otherwise it flushes the whole TLB of the IOMMU. 535 * page. Otherwise it flushes the whole TLB of the IOMMU.
432 */ 536 */
433static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, 537static void __iommu_flush_pages(struct protection_domain *domain,
434 u64 address, size_t size) 538 u64 address, size_t size, int pde)
435{ 539{
436 int s = 0; 540 int s = 0, i;
437 unsigned pages = iommu_num_pages(address, size, PAGE_SIZE); 541 unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE);
438 542
439 address &= PAGE_MASK; 543 address &= PAGE_MASK;
440 544
@@ -447,142 +551,212 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
447 s = 1; 551 s = 1;
448 } 552 }
449 553
450 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
451 554
452 return 0; 555 for (i = 0; i < amd_iommus_present; ++i) {
556 if (!domain->dev_iommu[i])
557 continue;
558
559 /*
560 * Devices of this domain are behind this IOMMU
561 * We need a TLB flush
562 */
563 iommu_queue_inv_iommu_pages(amd_iommus[i], address,
564 domain->id, pde, s);
565 }
566
567 return;
453} 568}
454 569
455/* Flush the whole IO/TLB for a given protection domain */ 570static void iommu_flush_pages(struct protection_domain *domain,
456static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) 571 u64 address, size_t size)
457{ 572{
458 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 573 __iommu_flush_pages(domain, address, size, 0);
459 574}
460 INC_STATS_COUNTER(domain_flush_single);
461 575
462 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); 576/* Flush the whole IO/TLB for a given protection domain */
577static void iommu_flush_tlb(struct protection_domain *domain)
578{
579 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
463} 580}
464 581
465/* Flush the whole IO/TLB for a given protection domain - including PDE */ 582/* Flush the whole IO/TLB for a given protection domain - including PDE */
466static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) 583static void iommu_flush_tlb_pde(struct protection_domain *domain)
467{ 584{
468 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 585 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
469
470 INC_STATS_COUNTER(domain_flush_single);
471
472 iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
473} 586}
474 587
588
475/* 589/*
476 * This function flushes one domain on one IOMMU 590 * This function flushes the DTEs for all devices in domain
477 */ 591 */
478static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) 592static void iommu_flush_domain_devices(struct protection_domain *domain)
479{ 593{
480 struct iommu_cmd cmd; 594 struct iommu_dev_data *dev_data;
481 unsigned long flags; 595 unsigned long flags;
482 596
483 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 597 spin_lock_irqsave(&domain->lock, flags);
484 domid, 1, 1);
485 598
486 spin_lock_irqsave(&iommu->lock, flags); 599 list_for_each_entry(dev_data, &domain->dev_list, list)
487 __iommu_queue_command(iommu, &cmd); 600 iommu_flush_device(dev_data->dev);
488 __iommu_completion_wait(iommu); 601
489 __iommu_wait_for_completion(iommu); 602 spin_unlock_irqrestore(&domain->lock, flags);
490 spin_unlock_irqrestore(&iommu->lock, flags);
491} 603}
492 604
493static void flush_all_domains_on_iommu(struct amd_iommu *iommu) 605static void iommu_flush_all_domain_devices(void)
494{ 606{
495 int i; 607 struct protection_domain *domain;
608 unsigned long flags;
496 609
497 for (i = 1; i < MAX_DOMAIN_ID; ++i) { 610 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
498 if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) 611
499 continue; 612 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
500 flush_domain_on_iommu(iommu, i); 613 iommu_flush_domain_devices(domain);
614 iommu_flush_complete(domain);
501 } 615 }
502 616
617 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
618}
619
620void amd_iommu_flush_all_devices(void)
621{
622 iommu_flush_all_domain_devices();
503} 623}
504 624
505/* 625/*
506 * This function is used to flush the IO/TLB for a given protection domain 626 * This function uses heavy locking and may disable irqs for some time. But
507 * on every IOMMU in the system 627 * this is no issue because it is only called during resume.
508 */ 628 */
509static void iommu_flush_domain(u16 domid) 629void amd_iommu_flush_all_domains(void)
510{ 630{
511 struct amd_iommu *iommu; 631 struct protection_domain *domain;
632 unsigned long flags;
512 633
513 INC_STATS_COUNTER(domain_flush_all); 634 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
514 635
515 for_each_iommu(iommu) 636 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
516 flush_domain_on_iommu(iommu, domid); 637 spin_lock(&domain->lock);
638 iommu_flush_tlb_pde(domain);
639 iommu_flush_complete(domain);
640 spin_unlock(&domain->lock);
641 }
642
643 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
517} 644}
518 645
519void amd_iommu_flush_all_domains(void) 646static void reset_iommu_command_buffer(struct amd_iommu *iommu)
520{ 647{
521 struct amd_iommu *iommu; 648 pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
522 649
523 for_each_iommu(iommu) 650 if (iommu->reset_in_progress)
524 flush_all_domains_on_iommu(iommu); 651 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
652
653 amd_iommu_reset_cmd_buffer(iommu);
654 amd_iommu_flush_all_devices();
655 amd_iommu_flush_all_domains();
656
657 iommu->reset_in_progress = false;
525} 658}
526 659
527static void flush_all_devices_for_iommu(struct amd_iommu *iommu) 660/****************************************************************************
661 *
662 * The functions below are used the create the page table mappings for
663 * unity mapped regions.
664 *
665 ****************************************************************************/
666
667/*
668 * This function is used to add another level to an IO page table. Adding
669 * another level increases the size of the address space by 9 bits to a size up
670 * to 64 bits.
671 */
672static bool increase_address_space(struct protection_domain *domain,
673 gfp_t gfp)
528{ 674{
529 int i; 675 u64 *pte;
530 676
531 for (i = 0; i <= amd_iommu_last_bdf; ++i) { 677 if (domain->mode == PAGE_MODE_6_LEVEL)
532 if (iommu != amd_iommu_rlookup_table[i]) 678 /* address space already 64 bit large */
533 continue; 679 return false;
534 680
535 iommu_queue_inv_dev_entry(iommu, i); 681 pte = (void *)get_zeroed_page(gfp);
536 iommu_completion_wait(iommu); 682 if (!pte)
537 } 683 return false;
684
685 *pte = PM_LEVEL_PDE(domain->mode,
686 virt_to_phys(domain->pt_root));
687 domain->pt_root = pte;
688 domain->mode += 1;
689 domain->updated = true;
690
691 return true;
538} 692}
539 693
540static void flush_devices_by_domain(struct protection_domain *domain) 694static u64 *alloc_pte(struct protection_domain *domain,
695 unsigned long address,
696 int end_lvl,
697 u64 **pte_page,
698 gfp_t gfp)
541{ 699{
542 struct amd_iommu *iommu; 700 u64 *pte, *page;
543 int i; 701 int level;
544 702
545 for (i = 0; i <= amd_iommu_last_bdf; ++i) { 703 while (address > PM_LEVEL_SIZE(domain->mode))
546 if ((domain == NULL && amd_iommu_pd_table[i] == NULL) || 704 increase_address_space(domain, gfp);
547 (amd_iommu_pd_table[i] != domain))
548 continue;
549 705
550 iommu = amd_iommu_rlookup_table[i]; 706 level = domain->mode - 1;
551 if (!iommu) 707 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
552 continue;
553 708
554 iommu_queue_inv_dev_entry(iommu, i); 709 while (level > end_lvl) {
555 iommu_completion_wait(iommu); 710 if (!IOMMU_PTE_PRESENT(*pte)) {
711 page = (u64 *)get_zeroed_page(gfp);
712 if (!page)
713 return NULL;
714 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
715 }
716
717 level -= 1;
718
719 pte = IOMMU_PTE_PAGE(*pte);
720
721 if (pte_page && level == end_lvl)
722 *pte_page = pte;
723
724 pte = &pte[PM_LEVEL_INDEX(level, address)];
556 } 725 }
726
727 return pte;
557} 728}
558 729
559static void reset_iommu_command_buffer(struct amd_iommu *iommu) 730/*
731 * This function checks if there is a PTE for a given dma address. If
732 * there is one, it returns the pointer to it.
733 */
734static u64 *fetch_pte(struct protection_domain *domain,
735 unsigned long address, int map_size)
560{ 736{
561 pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); 737 int level;
738 u64 *pte;
562 739
563 if (iommu->reset_in_progress) 740 level = domain->mode - 1;
564 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); 741 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
565 742
566 iommu->reset_in_progress = true; 743 while (level > map_size) {
744 if (!IOMMU_PTE_PRESENT(*pte))
745 return NULL;
567 746
568 amd_iommu_reset_cmd_buffer(iommu); 747 level -= 1;
569 flush_all_devices_for_iommu(iommu);
570 flush_all_domains_on_iommu(iommu);
571 748
572 iommu->reset_in_progress = false; 749 pte = IOMMU_PTE_PAGE(*pte);
573} 750 pte = &pte[PM_LEVEL_INDEX(level, address)];
574 751
575void amd_iommu_flush_all_devices(void) 752 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
576{ 753 pte = NULL;
577 flush_devices_by_domain(NULL); 754 break;
578} 755 }
756 }
579 757
580/**************************************************************************** 758 return pte;
581 * 759}
582 * The functions below are used the create the page table mappings for
583 * unity mapped regions.
584 *
585 ****************************************************************************/
586 760
587/* 761/*
588 * Generic mapping functions. It maps a physical address into a DMA 762 * Generic mapping functions. It maps a physical address into a DMA
@@ -654,28 +828,6 @@ static int iommu_for_unity_map(struct amd_iommu *iommu,
654} 828}
655 829
656/* 830/*
657 * Init the unity mappings for a specific IOMMU in the system
658 *
659 * Basically iterates over all unity mapping entries and applies them to
660 * the default domain DMA of that IOMMU if necessary.
661 */
662static int iommu_init_unity_mappings(struct amd_iommu *iommu)
663{
664 struct unity_map_entry *entry;
665 int ret;
666
667 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
668 if (!iommu_for_unity_map(iommu, entry))
669 continue;
670 ret = dma_ops_unity_map(iommu->default_dom, entry);
671 if (ret)
672 return ret;
673 }
674
675 return 0;
676}
677
678/*
679 * This function actually applies the mapping to the page table of the 831 * This function actually applies the mapping to the page table of the
680 * dma_ops domain. 832 * dma_ops domain.
681 */ 833 */
@@ -704,6 +856,28 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
704} 856}
705 857
706/* 858/*
859 * Init the unity mappings for a specific IOMMU in the system
860 *
861 * Basically iterates over all unity mapping entries and applies them to
862 * the default domain DMA of that IOMMU if necessary.
863 */
864static int iommu_init_unity_mappings(struct amd_iommu *iommu)
865{
866 struct unity_map_entry *entry;
867 int ret;
868
869 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
870 if (!iommu_for_unity_map(iommu, entry))
871 continue;
872 ret = dma_ops_unity_map(iommu->default_dom, entry);
873 if (ret)
874 return ret;
875 }
876
877 return 0;
878}
879
880/*
707 * Inits the unity mappings required for a specific device 881 * Inits the unity mappings required for a specific device
708 */ 882 */
709static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, 883static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
@@ -740,34 +914,23 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
740 */ 914 */
741 915
742/* 916/*
743 * This function checks if there is a PTE for a given dma address. If 917 * Used to reserve address ranges in the aperture (e.g. for exclusion
744 * there is one, it returns the pointer to it. 918 * ranges.
745 */ 919 */
746static u64 *fetch_pte(struct protection_domain *domain, 920static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
747 unsigned long address, int map_size) 921 unsigned long start_page,
922 unsigned int pages)
748{ 923{
749 int level; 924 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
750 u64 *pte;
751
752 level = domain->mode - 1;
753 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
754
755 while (level > map_size) {
756 if (!IOMMU_PTE_PRESENT(*pte))
757 return NULL;
758
759 level -= 1;
760 925
761 pte = IOMMU_PTE_PAGE(*pte); 926 if (start_page + pages > last_page)
762 pte = &pte[PM_LEVEL_INDEX(level, address)]; 927 pages = last_page - start_page;
763 928
764 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { 929 for (i = start_page; i < start_page + pages; ++i) {
765 pte = NULL; 930 int index = i / APERTURE_RANGE_PAGES;
766 break; 931 int page = i % APERTURE_RANGE_PAGES;
767 } 932 __set_bit(page, dom->aperture[index]->bitmap);
768 } 933 }
769
770 return pte;
771} 934}
772 935
773/* 936/*
@@ -775,11 +938,11 @@ static u64 *fetch_pte(struct protection_domain *domain,
775 * aperture in case of dma_ops domain allocation or address allocation 938 * aperture in case of dma_ops domain allocation or address allocation
776 * failure. 939 * failure.
777 */ 940 */
778static int alloc_new_range(struct amd_iommu *iommu, 941static int alloc_new_range(struct dma_ops_domain *dma_dom,
779 struct dma_ops_domain *dma_dom,
780 bool populate, gfp_t gfp) 942 bool populate, gfp_t gfp)
781{ 943{
782 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; 944 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
945 struct amd_iommu *iommu;
783 int i; 946 int i;
784 947
785#ifdef CONFIG_IOMMU_STRESS 948#ifdef CONFIG_IOMMU_STRESS
@@ -819,14 +982,17 @@ static int alloc_new_range(struct amd_iommu *iommu,
819 dma_dom->aperture_size += APERTURE_RANGE_SIZE; 982 dma_dom->aperture_size += APERTURE_RANGE_SIZE;
820 983
821 /* Intialize the exclusion range if necessary */ 984 /* Intialize the exclusion range if necessary */
822 if (iommu->exclusion_start && 985 for_each_iommu(iommu) {
823 iommu->exclusion_start >= dma_dom->aperture[index]->offset && 986 if (iommu->exclusion_start &&
824 iommu->exclusion_start < dma_dom->aperture_size) { 987 iommu->exclusion_start >= dma_dom->aperture[index]->offset
825 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; 988 && iommu->exclusion_start < dma_dom->aperture_size) {
826 int pages = iommu_num_pages(iommu->exclusion_start, 989 unsigned long startpage;
827 iommu->exclusion_length, 990 int pages = iommu_num_pages(iommu->exclusion_start,
828 PAGE_SIZE); 991 iommu->exclusion_length,
829 dma_ops_reserve_addresses(dma_dom, startpage, pages); 992 PAGE_SIZE);
993 startpage = iommu->exclusion_start >> PAGE_SHIFT;
994 dma_ops_reserve_addresses(dma_dom, startpage, pages);
995 }
830 } 996 }
831 997
832 /* 998 /*
@@ -928,7 +1094,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
928 } 1094 }
929 1095
930 if (unlikely(address == -1)) 1096 if (unlikely(address == -1))
931 address = bad_dma_address; 1097 address = DMA_ERROR_CODE;
932 1098
933 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); 1099 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
934 1100
@@ -973,6 +1139,31 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
973 * 1139 *
974 ****************************************************************************/ 1140 ****************************************************************************/
975 1141
1142/*
1143 * This function adds a protection domain to the global protection domain list
1144 */
1145static void add_domain_to_list(struct protection_domain *domain)
1146{
1147 unsigned long flags;
1148
1149 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1150 list_add(&domain->list, &amd_iommu_pd_list);
1151 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1152}
1153
1154/*
1155 * This function removes a protection domain to the global
1156 * protection domain list
1157 */
1158static void del_domain_from_list(struct protection_domain *domain)
1159{
1160 unsigned long flags;
1161
1162 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1163 list_del(&domain->list);
1164 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1165}
1166
976static u16 domain_id_alloc(void) 1167static u16 domain_id_alloc(void)
977{ 1168{
978 unsigned long flags; 1169 unsigned long flags;
@@ -1000,26 +1191,6 @@ static void domain_id_free(int id)
1000 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1191 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1001} 1192}
1002 1193
1003/*
1004 * Used to reserve address ranges in the aperture (e.g. for exclusion
1005 * ranges.
1006 */
1007static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1008 unsigned long start_page,
1009 unsigned int pages)
1010{
1011 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
1012
1013 if (start_page + pages > last_page)
1014 pages = last_page - start_page;
1015
1016 for (i = start_page; i < start_page + pages; ++i) {
1017 int index = i / APERTURE_RANGE_PAGES;
1018 int page = i % APERTURE_RANGE_PAGES;
1019 __set_bit(page, dom->aperture[index]->bitmap);
1020 }
1021}
1022
1023static void free_pagetable(struct protection_domain *domain) 1194static void free_pagetable(struct protection_domain *domain)
1024{ 1195{
1025 int i, j; 1196 int i, j;
@@ -1061,6 +1232,8 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
1061 if (!dom) 1232 if (!dom)
1062 return; 1233 return;
1063 1234
1235 del_domain_from_list(&dom->domain);
1236
1064 free_pagetable(&dom->domain); 1237 free_pagetable(&dom->domain);
1065 1238
1066 for (i = 0; i < APERTURE_MAX_RANGES; ++i) { 1239 for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
@@ -1078,7 +1251,7 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
1078 * It also intializes the page table and the address allocator data 1251 * It also intializes the page table and the address allocator data
1079 * structures required for the dma_ops interface 1252 * structures required for the dma_ops interface
1080 */ 1253 */
1081static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) 1254static struct dma_ops_domain *dma_ops_domain_alloc(void)
1082{ 1255{
1083 struct dma_ops_domain *dma_dom; 1256 struct dma_ops_domain *dma_dom;
1084 1257
@@ -1091,6 +1264,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
1091 dma_dom->domain.id = domain_id_alloc(); 1264 dma_dom->domain.id = domain_id_alloc();
1092 if (dma_dom->domain.id == 0) 1265 if (dma_dom->domain.id == 0)
1093 goto free_dma_dom; 1266 goto free_dma_dom;
1267 INIT_LIST_HEAD(&dma_dom->domain.dev_list);
1094 dma_dom->domain.mode = PAGE_MODE_2_LEVEL; 1268 dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
1095 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); 1269 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1096 dma_dom->domain.flags = PD_DMA_OPS_MASK; 1270 dma_dom->domain.flags = PD_DMA_OPS_MASK;
@@ -1101,7 +1275,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
1101 dma_dom->need_flush = false; 1275 dma_dom->need_flush = false;
1102 dma_dom->target_dev = 0xffff; 1276 dma_dom->target_dev = 0xffff;
1103 1277
1104 if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL)) 1278 add_domain_to_list(&dma_dom->domain);
1279
1280 if (alloc_new_range(dma_dom, true, GFP_KERNEL))
1105 goto free_dma_dom; 1281 goto free_dma_dom;
1106 1282
1107 /* 1283 /*
@@ -1129,22 +1305,6 @@ static bool dma_ops_domain(struct protection_domain *domain)
1129 return domain->flags & PD_DMA_OPS_MASK; 1305 return domain->flags & PD_DMA_OPS_MASK;
1130} 1306}
1131 1307
1132/*
1133 * Find out the protection domain structure for a given PCI device. This
1134 * will give us the pointer to the page table root for example.
1135 */
1136static struct protection_domain *domain_for_device(u16 devid)
1137{
1138 struct protection_domain *dom;
1139 unsigned long flags;
1140
1141 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1142 dom = amd_iommu_pd_table[devid];
1143 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1144
1145 return dom;
1146}
1147
1148static void set_dte_entry(u16 devid, struct protection_domain *domain) 1308static void set_dte_entry(u16 devid, struct protection_domain *domain)
1149{ 1309{
1150 u64 pte_root = virt_to_phys(domain->pt_root); 1310 u64 pte_root = virt_to_phys(domain->pt_root);
@@ -1156,42 +1316,123 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain)
1156 amd_iommu_dev_table[devid].data[2] = domain->id; 1316 amd_iommu_dev_table[devid].data[2] = domain->id;
1157 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); 1317 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
1158 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); 1318 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
1319}
1320
1321static void clear_dte_entry(u16 devid)
1322{
1323 /* remove entry from the device table seen by the hardware */
1324 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1325 amd_iommu_dev_table[devid].data[1] = 0;
1326 amd_iommu_dev_table[devid].data[2] = 0;
1159 1327
1160 amd_iommu_pd_table[devid] = domain; 1328 amd_iommu_apply_erratum_63(devid);
1329}
1330
1331static void do_attach(struct device *dev, struct protection_domain *domain)
1332{
1333 struct iommu_dev_data *dev_data;
1334 struct amd_iommu *iommu;
1335 u16 devid;
1336
1337 devid = get_device_id(dev);
1338 iommu = amd_iommu_rlookup_table[devid];
1339 dev_data = get_dev_data(dev);
1340
1341 /* Update data structures */
1342 dev_data->domain = domain;
1343 list_add(&dev_data->list, &domain->dev_list);
1344 set_dte_entry(devid, domain);
1345
1346 /* Do reference counting */
1347 domain->dev_iommu[iommu->index] += 1;
1348 domain->dev_cnt += 1;
1349
1350 /* Flush the DTE entry */
1351 iommu_flush_device(dev);
1352}
1353
1354static void do_detach(struct device *dev)
1355{
1356 struct iommu_dev_data *dev_data;
1357 struct amd_iommu *iommu;
1358 u16 devid;
1359
1360 devid = get_device_id(dev);
1361 iommu = amd_iommu_rlookup_table[devid];
1362 dev_data = get_dev_data(dev);
1363
1364 /* decrease reference counters */
1365 dev_data->domain->dev_iommu[iommu->index] -= 1;
1366 dev_data->domain->dev_cnt -= 1;
1367
1368 /* Update data structures */
1369 dev_data->domain = NULL;
1370 list_del(&dev_data->list);
1371 clear_dte_entry(devid);
1372
1373 /* Flush the DTE entry */
1374 iommu_flush_device(dev);
1161} 1375}
1162 1376
1163/* 1377/*
1164 * If a device is not yet associated with a domain, this function does 1378 * If a device is not yet associated with a domain, this function does
1165 * assigns it visible for the hardware 1379 * assigns it visible for the hardware
1166 */ 1380 */
1167static void __attach_device(struct amd_iommu *iommu, 1381static int __attach_device(struct device *dev,
1168 struct protection_domain *domain, 1382 struct protection_domain *domain)
1169 u16 devid)
1170{ 1383{
1384 struct iommu_dev_data *dev_data, *alias_data;
1385
1386 dev_data = get_dev_data(dev);
1387 alias_data = get_dev_data(dev_data->alias);
1388
1389 if (!alias_data)
1390 return -EINVAL;
1391
1171 /* lock domain */ 1392 /* lock domain */
1172 spin_lock(&domain->lock); 1393 spin_lock(&domain->lock);
1173 1394
1174 /* update DTE entry */ 1395 /* Some sanity checks */
1175 set_dte_entry(devid, domain); 1396 if (alias_data->domain != NULL &&
1397 alias_data->domain != domain)
1398 return -EBUSY;
1176 1399
1177 domain->dev_cnt += 1; 1400 if (dev_data->domain != NULL &&
1401 dev_data->domain != domain)
1402 return -EBUSY;
1403
1404 /* Do real assignment */
1405 if (dev_data->alias != dev) {
1406 alias_data = get_dev_data(dev_data->alias);
1407 if (alias_data->domain == NULL)
1408 do_attach(dev_data->alias, domain);
1409
1410 atomic_inc(&alias_data->bind);
1411 }
1412
1413 if (dev_data->domain == NULL)
1414 do_attach(dev, domain);
1415
1416 atomic_inc(&dev_data->bind);
1178 1417
1179 /* ready */ 1418 /* ready */
1180 spin_unlock(&domain->lock); 1419 spin_unlock(&domain->lock);
1420
1421 return 0;
1181} 1422}
1182 1423
1183/* 1424/*
1184 * If a device is not yet associated with a domain, this function does 1425 * If a device is not yet associated with a domain, this function does
1185 * assigns it visible for the hardware 1426 * assigns it visible for the hardware
1186 */ 1427 */
1187static void attach_device(struct amd_iommu *iommu, 1428static int attach_device(struct device *dev,
1188 struct protection_domain *domain, 1429 struct protection_domain *domain)
1189 u16 devid)
1190{ 1430{
1191 unsigned long flags; 1431 unsigned long flags;
1432 int ret;
1192 1433
1193 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1434 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1194 __attach_device(iommu, domain, devid); 1435 ret = __attach_device(dev, domain);
1195 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1436 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1196 1437
1197 /* 1438 /*
@@ -1199,98 +1440,125 @@ static void attach_device(struct amd_iommu *iommu,
1199 * left the caches in the IOMMU dirty. So we have to flush 1440 * left the caches in the IOMMU dirty. So we have to flush
1200 * here to evict all dirty stuff. 1441 * here to evict all dirty stuff.
1201 */ 1442 */
1202 iommu_queue_inv_dev_entry(iommu, devid); 1443 iommu_flush_tlb_pde(domain);
1203 iommu_flush_tlb_pde(iommu, domain->id); 1444
1445 return ret;
1204} 1446}
1205 1447
1206/* 1448/*
1207 * Removes a device from a protection domain (unlocked) 1449 * Removes a device from a protection domain (unlocked)
1208 */ 1450 */
1209static void __detach_device(struct protection_domain *domain, u16 devid) 1451static void __detach_device(struct device *dev)
1210{ 1452{
1453 struct iommu_dev_data *dev_data = get_dev_data(dev);
1454 struct iommu_dev_data *alias_data;
1455 unsigned long flags;
1211 1456
1212 /* lock domain */ 1457 BUG_ON(!dev_data->domain);
1213 spin_lock(&domain->lock);
1214
1215 /* remove domain from the lookup table */
1216 amd_iommu_pd_table[devid] = NULL;
1217 1458
1218 /* remove entry from the device table seen by the hardware */ 1459 spin_lock_irqsave(&dev_data->domain->lock, flags);
1219 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1220 amd_iommu_dev_table[devid].data[1] = 0;
1221 amd_iommu_dev_table[devid].data[2] = 0;
1222 1460
1223 amd_iommu_apply_erratum_63(devid); 1461 if (dev_data->alias != dev) {
1462 alias_data = get_dev_data(dev_data->alias);
1463 if (atomic_dec_and_test(&alias_data->bind))
1464 do_detach(dev_data->alias);
1465 }
1224 1466
1225 /* decrease reference counter */ 1467 if (atomic_dec_and_test(&dev_data->bind))
1226 domain->dev_cnt -= 1; 1468 do_detach(dev);
1227 1469
1228 /* ready */ 1470 spin_unlock_irqrestore(&dev_data->domain->lock, flags);
1229 spin_unlock(&domain->lock);
1230 1471
1231 /* 1472 /*
1232 * If we run in passthrough mode the device must be assigned to the 1473 * If we run in passthrough mode the device must be assigned to the
1233 * passthrough domain if it is detached from any other domain 1474 * passthrough domain if it is detached from any other domain
1234 */ 1475 */
1235 if (iommu_pass_through) { 1476 if (iommu_pass_through && dev_data->domain == NULL)
1236 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; 1477 __attach_device(dev, pt_domain);
1237 __attach_device(iommu, pt_domain, devid);
1238 }
1239} 1478}
1240 1479
1241/* 1480/*
1242 * Removes a device from a protection domain (with devtable_lock held) 1481 * Removes a device from a protection domain (with devtable_lock held)
1243 */ 1482 */
1244static void detach_device(struct protection_domain *domain, u16 devid) 1483static void detach_device(struct device *dev)
1245{ 1484{
1246 unsigned long flags; 1485 unsigned long flags;
1247 1486
1248 /* lock device table */ 1487 /* lock device table */
1249 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1488 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1250 __detach_device(domain, devid); 1489 __detach_device(dev);
1251 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1490 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1252} 1491}
1253 1492
1493/*
1494 * Find out the protection domain structure for a given PCI device. This
1495 * will give us the pointer to the page table root for example.
1496 */
1497static struct protection_domain *domain_for_device(struct device *dev)
1498{
1499 struct protection_domain *dom;
1500 struct iommu_dev_data *dev_data, *alias_data;
1501 unsigned long flags;
1502 u16 devid, alias;
1503
1504 devid = get_device_id(dev);
1505 alias = amd_iommu_alias_table[devid];
1506 dev_data = get_dev_data(dev);
1507 alias_data = get_dev_data(dev_data->alias);
1508 if (!alias_data)
1509 return NULL;
1510
1511 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1512 dom = dev_data->domain;
1513 if (dom == NULL &&
1514 alias_data->domain != NULL) {
1515 __attach_device(dev, alias_data->domain);
1516 dom = alias_data->domain;
1517 }
1518
1519 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1520
1521 return dom;
1522}
1523
1254static int device_change_notifier(struct notifier_block *nb, 1524static int device_change_notifier(struct notifier_block *nb,
1255 unsigned long action, void *data) 1525 unsigned long action, void *data)
1256{ 1526{
1257 struct device *dev = data; 1527 struct device *dev = data;
1258 struct pci_dev *pdev = to_pci_dev(dev); 1528 u16 devid;
1259 u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
1260 struct protection_domain *domain; 1529 struct protection_domain *domain;
1261 struct dma_ops_domain *dma_domain; 1530 struct dma_ops_domain *dma_domain;
1262 struct amd_iommu *iommu; 1531 struct amd_iommu *iommu;
1263 unsigned long flags; 1532 unsigned long flags;
1264 1533
1265 if (devid > amd_iommu_last_bdf) 1534 if (!check_device(dev))
1266 goto out; 1535 return 0;
1267
1268 devid = amd_iommu_alias_table[devid];
1269
1270 iommu = amd_iommu_rlookup_table[devid];
1271 if (iommu == NULL)
1272 goto out;
1273
1274 domain = domain_for_device(devid);
1275 1536
1276 if (domain && !dma_ops_domain(domain)) 1537 devid = get_device_id(dev);
1277 WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound " 1538 iommu = amd_iommu_rlookup_table[devid];
1278 "to a non-dma-ops domain\n", dev_name(dev));
1279 1539
1280 switch (action) { 1540 switch (action) {
1281 case BUS_NOTIFY_UNBOUND_DRIVER: 1541 case BUS_NOTIFY_UNBOUND_DRIVER:
1542
1543 domain = domain_for_device(dev);
1544
1282 if (!domain) 1545 if (!domain)
1283 goto out; 1546 goto out;
1284 if (iommu_pass_through) 1547 if (iommu_pass_through)
1285 break; 1548 break;
1286 detach_device(domain, devid); 1549 detach_device(dev);
1287 break; 1550 break;
1288 case BUS_NOTIFY_ADD_DEVICE: 1551 case BUS_NOTIFY_ADD_DEVICE:
1552
1553 iommu_init_device(dev);
1554
1555 domain = domain_for_device(dev);
1556
1289 /* allocate a protection domain if a device is added */ 1557 /* allocate a protection domain if a device is added */
1290 dma_domain = find_protection_domain(devid); 1558 dma_domain = find_protection_domain(devid);
1291 if (dma_domain) 1559 if (dma_domain)
1292 goto out; 1560 goto out;
1293 dma_domain = dma_ops_domain_alloc(iommu); 1561 dma_domain = dma_ops_domain_alloc();
1294 if (!dma_domain) 1562 if (!dma_domain)
1295 goto out; 1563 goto out;
1296 dma_domain->target_dev = devid; 1564 dma_domain->target_dev = devid;
@@ -1300,11 +1568,15 @@ static int device_change_notifier(struct notifier_block *nb,
1300 spin_unlock_irqrestore(&iommu_pd_list_lock, flags); 1568 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1301 1569
1302 break; 1570 break;
1571 case BUS_NOTIFY_DEL_DEVICE:
1572
1573 iommu_uninit_device(dev);
1574
1303 default: 1575 default:
1304 goto out; 1576 goto out;
1305 } 1577 }
1306 1578
1307 iommu_queue_inv_dev_entry(iommu, devid); 1579 iommu_flush_device(dev);
1308 iommu_completion_wait(iommu); 1580 iommu_completion_wait(iommu);
1309 1581
1310out: 1582out:
@@ -1322,106 +1594,46 @@ static struct notifier_block device_nb = {
1322 *****************************************************************************/ 1594 *****************************************************************************/
1323 1595
1324/* 1596/*
1325 * This function checks if the driver got a valid device from the caller to
1326 * avoid dereferencing invalid pointers.
1327 */
1328static bool check_device(struct device *dev)
1329{
1330 if (!dev || !dev->dma_mask)
1331 return false;
1332
1333 return true;
1334}
1335
1336/*
1337 * In this function the list of preallocated protection domains is traversed to
1338 * find the domain for a specific device
1339 */
1340static struct dma_ops_domain *find_protection_domain(u16 devid)
1341{
1342 struct dma_ops_domain *entry, *ret = NULL;
1343 unsigned long flags;
1344
1345 if (list_empty(&iommu_pd_list))
1346 return NULL;
1347
1348 spin_lock_irqsave(&iommu_pd_list_lock, flags);
1349
1350 list_for_each_entry(entry, &iommu_pd_list, list) {
1351 if (entry->target_dev == devid) {
1352 ret = entry;
1353 break;
1354 }
1355 }
1356
1357 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1358
1359 return ret;
1360}
1361
1362/*
1363 * In the dma_ops path we only have the struct device. This function 1597 * In the dma_ops path we only have the struct device. This function
1364 * finds the corresponding IOMMU, the protection domain and the 1598 * finds the corresponding IOMMU, the protection domain and the
1365 * requestor id for a given device. 1599 * requestor id for a given device.
1366 * If the device is not yet associated with a domain this is also done 1600 * If the device is not yet associated with a domain this is also done
1367 * in this function. 1601 * in this function.
1368 */ 1602 */
1369static int get_device_resources(struct device *dev, 1603static struct protection_domain *get_domain(struct device *dev)
1370 struct amd_iommu **iommu,
1371 struct protection_domain **domain,
1372 u16 *bdf)
1373{ 1604{
1605 struct protection_domain *domain;
1374 struct dma_ops_domain *dma_dom; 1606 struct dma_ops_domain *dma_dom;
1375 struct pci_dev *pcidev; 1607 u16 devid = get_device_id(dev);
1376 u16 _bdf;
1377
1378 *iommu = NULL;
1379 *domain = NULL;
1380 *bdf = 0xffff;
1381
1382 if (dev->bus != &pci_bus_type)
1383 return 0;
1384 1608
1385 pcidev = to_pci_dev(dev); 1609 if (!check_device(dev))
1386 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); 1610 return ERR_PTR(-EINVAL);
1387 1611
1388 /* device not translated by any IOMMU in the system? */ 1612 domain = domain_for_device(dev);
1389 if (_bdf > amd_iommu_last_bdf) 1613 if (domain != NULL && !dma_ops_domain(domain))
1390 return 0; 1614 return ERR_PTR(-EBUSY);
1391 1615
1392 *bdf = amd_iommu_alias_table[_bdf]; 1616 if (domain != NULL)
1617 return domain;
1393 1618
1394 *iommu = amd_iommu_rlookup_table[*bdf]; 1619 /* Device not bount yet - bind it */
1395 if (*iommu == NULL) 1620 dma_dom = find_protection_domain(devid);
1396 return 0; 1621 if (!dma_dom)
1397 *domain = domain_for_device(*bdf); 1622 dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
1398 if (*domain == NULL) { 1623 attach_device(dev, &dma_dom->domain);
1399 dma_dom = find_protection_domain(*bdf); 1624 DUMP_printk("Using protection domain %d for device %s\n",
1400 if (!dma_dom) 1625 dma_dom->domain.id, dev_name(dev));
1401 dma_dom = (*iommu)->default_dom;
1402 *domain = &dma_dom->domain;
1403 attach_device(*iommu, *domain, *bdf);
1404 DUMP_printk("Using protection domain %d for device %s\n",
1405 (*domain)->id, dev_name(dev));
1406 }
1407
1408 if (domain_for_device(_bdf) == NULL)
1409 attach_device(*iommu, *domain, _bdf);
1410 1626
1411 return 1; 1627 return &dma_dom->domain;
1412} 1628}
1413 1629
1414static void update_device_table(struct protection_domain *domain) 1630static void update_device_table(struct protection_domain *domain)
1415{ 1631{
1416 unsigned long flags; 1632 struct iommu_dev_data *dev_data;
1417 int i;
1418 1633
1419 for (i = 0; i <= amd_iommu_last_bdf; ++i) { 1634 list_for_each_entry(dev_data, &domain->dev_list, list) {
1420 if (amd_iommu_pd_table[i] != domain) 1635 u16 devid = get_device_id(dev_data->dev);
1421 continue; 1636 set_dte_entry(devid, domain);
1422 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1423 set_dte_entry(i, domain);
1424 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1425 } 1637 }
1426} 1638}
1427 1639
@@ -1431,76 +1643,13 @@ static void update_domain(struct protection_domain *domain)
1431 return; 1643 return;
1432 1644
1433 update_device_table(domain); 1645 update_device_table(domain);
1434 flush_devices_by_domain(domain); 1646 iommu_flush_domain_devices(domain);
1435 iommu_flush_domain(domain->id); 1647 iommu_flush_tlb_pde(domain);
1436 1648
1437 domain->updated = false; 1649 domain->updated = false;
1438} 1650}
1439 1651
1440/* 1652/*
1441 * This function is used to add another level to an IO page table. Adding
1442 * another level increases the size of the address space by 9 bits to a size up
1443 * to 64 bits.
1444 */
1445static bool increase_address_space(struct protection_domain *domain,
1446 gfp_t gfp)
1447{
1448 u64 *pte;
1449
1450 if (domain->mode == PAGE_MODE_6_LEVEL)
1451 /* address space already 64 bit large */
1452 return false;
1453
1454 pte = (void *)get_zeroed_page(gfp);
1455 if (!pte)
1456 return false;
1457
1458 *pte = PM_LEVEL_PDE(domain->mode,
1459 virt_to_phys(domain->pt_root));
1460 domain->pt_root = pte;
1461 domain->mode += 1;
1462 domain->updated = true;
1463
1464 return true;
1465}
1466
1467static u64 *alloc_pte(struct protection_domain *domain,
1468 unsigned long address,
1469 int end_lvl,
1470 u64 **pte_page,
1471 gfp_t gfp)
1472{
1473 u64 *pte, *page;
1474 int level;
1475
1476 while (address > PM_LEVEL_SIZE(domain->mode))
1477 increase_address_space(domain, gfp);
1478
1479 level = domain->mode - 1;
1480 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1481
1482 while (level > end_lvl) {
1483 if (!IOMMU_PTE_PRESENT(*pte)) {
1484 page = (u64 *)get_zeroed_page(gfp);
1485 if (!page)
1486 return NULL;
1487 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1488 }
1489
1490 level -= 1;
1491
1492 pte = IOMMU_PTE_PAGE(*pte);
1493
1494 if (pte_page && level == end_lvl)
1495 *pte_page = pte;
1496
1497 pte = &pte[PM_LEVEL_INDEX(level, address)];
1498 }
1499
1500 return pte;
1501}
1502
1503/*
1504 * This function fetches the PTE for a given address in the aperture 1653 * This function fetches the PTE for a given address in the aperture
1505 */ 1654 */
1506static u64* dma_ops_get_pte(struct dma_ops_domain *dom, 1655static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
@@ -1530,8 +1679,7 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1530 * This is the generic map function. It maps one 4kb page at paddr to 1679 * This is the generic map function. It maps one 4kb page at paddr to
1531 * the given address in the DMA address space for the domain. 1680 * the given address in the DMA address space for the domain.
1532 */ 1681 */
1533static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, 1682static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
1534 struct dma_ops_domain *dom,
1535 unsigned long address, 1683 unsigned long address,
1536 phys_addr_t paddr, 1684 phys_addr_t paddr,
1537 int direction) 1685 int direction)
@@ -1544,7 +1692,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
1544 1692
1545 pte = dma_ops_get_pte(dom, address); 1693 pte = dma_ops_get_pte(dom, address);
1546 if (!pte) 1694 if (!pte)
1547 return bad_dma_address; 1695 return DMA_ERROR_CODE;
1548 1696
1549 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; 1697 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
1550 1698
@@ -1565,8 +1713,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
1565/* 1713/*
1566 * The generic unmapping function for on page in the DMA address space. 1714 * The generic unmapping function for on page in the DMA address space.
1567 */ 1715 */
1568static void dma_ops_domain_unmap(struct amd_iommu *iommu, 1716static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
1569 struct dma_ops_domain *dom,
1570 unsigned long address) 1717 unsigned long address)
1571{ 1718{
1572 struct aperture_range *aperture; 1719 struct aperture_range *aperture;
@@ -1597,7 +1744,6 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
1597 * Must be called with the domain lock held. 1744 * Must be called with the domain lock held.
1598 */ 1745 */
1599static dma_addr_t __map_single(struct device *dev, 1746static dma_addr_t __map_single(struct device *dev,
1600 struct amd_iommu *iommu,
1601 struct dma_ops_domain *dma_dom, 1747 struct dma_ops_domain *dma_dom,
1602 phys_addr_t paddr, 1748 phys_addr_t paddr,
1603 size_t size, 1749 size_t size,
@@ -1625,7 +1771,7 @@ static dma_addr_t __map_single(struct device *dev,
1625retry: 1771retry:
1626 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, 1772 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
1627 dma_mask); 1773 dma_mask);
1628 if (unlikely(address == bad_dma_address)) { 1774 if (unlikely(address == DMA_ERROR_CODE)) {
1629 /* 1775 /*
1630 * setting next_address here will let the address 1776 * setting next_address here will let the address
1631 * allocator only scan the new allocated range in the 1777 * allocator only scan the new allocated range in the
@@ -1633,7 +1779,7 @@ retry:
1633 */ 1779 */
1634 dma_dom->next_address = dma_dom->aperture_size; 1780 dma_dom->next_address = dma_dom->aperture_size;
1635 1781
1636 if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC)) 1782 if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
1637 goto out; 1783 goto out;
1638 1784
1639 /* 1785 /*
@@ -1645,8 +1791,8 @@ retry:
1645 1791
1646 start = address; 1792 start = address;
1647 for (i = 0; i < pages; ++i) { 1793 for (i = 0; i < pages; ++i) {
1648 ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); 1794 ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
1649 if (ret == bad_dma_address) 1795 if (ret == DMA_ERROR_CODE)
1650 goto out_unmap; 1796 goto out_unmap;
1651 1797
1652 paddr += PAGE_SIZE; 1798 paddr += PAGE_SIZE;
@@ -1657,10 +1803,10 @@ retry:
1657 ADD_STATS_COUNTER(alloced_io_mem, size); 1803 ADD_STATS_COUNTER(alloced_io_mem, size);
1658 1804
1659 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { 1805 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
1660 iommu_flush_tlb(iommu, dma_dom->domain.id); 1806 iommu_flush_tlb(&dma_dom->domain);
1661 dma_dom->need_flush = false; 1807 dma_dom->need_flush = false;
1662 } else if (unlikely(iommu_has_npcache(iommu))) 1808 } else if (unlikely(amd_iommu_np_cache))
1663 iommu_flush_pages(iommu, dma_dom->domain.id, address, size); 1809 iommu_flush_pages(&dma_dom->domain, address, size);
1664 1810
1665out: 1811out:
1666 return address; 1812 return address;
@@ -1669,20 +1815,19 @@ out_unmap:
1669 1815
1670 for (--i; i >= 0; --i) { 1816 for (--i; i >= 0; --i) {
1671 start -= PAGE_SIZE; 1817 start -= PAGE_SIZE;
1672 dma_ops_domain_unmap(iommu, dma_dom, start); 1818 dma_ops_domain_unmap(dma_dom, start);
1673 } 1819 }
1674 1820
1675 dma_ops_free_addresses(dma_dom, address, pages); 1821 dma_ops_free_addresses(dma_dom, address, pages);
1676 1822
1677 return bad_dma_address; 1823 return DMA_ERROR_CODE;
1678} 1824}
1679 1825
1680/* 1826/*
1681 * Does the reverse of the __map_single function. Must be called with 1827 * Does the reverse of the __map_single function. Must be called with
1682 * the domain lock held too 1828 * the domain lock held too
1683 */ 1829 */
1684static void __unmap_single(struct amd_iommu *iommu, 1830static void __unmap_single(struct dma_ops_domain *dma_dom,
1685 struct dma_ops_domain *dma_dom,
1686 dma_addr_t dma_addr, 1831 dma_addr_t dma_addr,
1687 size_t size, 1832 size_t size,
1688 int dir) 1833 int dir)
@@ -1690,7 +1835,7 @@ static void __unmap_single(struct amd_iommu *iommu,
1690 dma_addr_t i, start; 1835 dma_addr_t i, start;
1691 unsigned int pages; 1836 unsigned int pages;
1692 1837
1693 if ((dma_addr == bad_dma_address) || 1838 if ((dma_addr == DMA_ERROR_CODE) ||
1694 (dma_addr + size > dma_dom->aperture_size)) 1839 (dma_addr + size > dma_dom->aperture_size))
1695 return; 1840 return;
1696 1841
@@ -1699,7 +1844,7 @@ static void __unmap_single(struct amd_iommu *iommu,
1699 start = dma_addr; 1844 start = dma_addr;
1700 1845
1701 for (i = 0; i < pages; ++i) { 1846 for (i = 0; i < pages; ++i) {
1702 dma_ops_domain_unmap(iommu, dma_dom, start); 1847 dma_ops_domain_unmap(dma_dom, start);
1703 start += PAGE_SIZE; 1848 start += PAGE_SIZE;
1704 } 1849 }
1705 1850
@@ -1708,7 +1853,7 @@ static void __unmap_single(struct amd_iommu *iommu,
1708 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1853 dma_ops_free_addresses(dma_dom, dma_addr, pages);
1709 1854
1710 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1855 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
1711 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); 1856 iommu_flush_pages(&dma_dom->domain, dma_addr, size);
1712 dma_dom->need_flush = false; 1857 dma_dom->need_flush = false;
1713 } 1858 }
1714} 1859}
@@ -1722,36 +1867,29 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
1722 struct dma_attrs *attrs) 1867 struct dma_attrs *attrs)
1723{ 1868{
1724 unsigned long flags; 1869 unsigned long flags;
1725 struct amd_iommu *iommu;
1726 struct protection_domain *domain; 1870 struct protection_domain *domain;
1727 u16 devid;
1728 dma_addr_t addr; 1871 dma_addr_t addr;
1729 u64 dma_mask; 1872 u64 dma_mask;
1730 phys_addr_t paddr = page_to_phys(page) + offset; 1873 phys_addr_t paddr = page_to_phys(page) + offset;
1731 1874
1732 INC_STATS_COUNTER(cnt_map_single); 1875 INC_STATS_COUNTER(cnt_map_single);
1733 1876
1734 if (!check_device(dev)) 1877 domain = get_domain(dev);
1735 return bad_dma_address; 1878 if (PTR_ERR(domain) == -EINVAL)
1736
1737 dma_mask = *dev->dma_mask;
1738
1739 get_device_resources(dev, &iommu, &domain, &devid);
1740
1741 if (iommu == NULL || domain == NULL)
1742 /* device not handled by any AMD IOMMU */
1743 return (dma_addr_t)paddr; 1879 return (dma_addr_t)paddr;
1880 else if (IS_ERR(domain))
1881 return DMA_ERROR_CODE;
1744 1882
1745 if (!dma_ops_domain(domain)) 1883 dma_mask = *dev->dma_mask;
1746 return bad_dma_address;
1747 1884
1748 spin_lock_irqsave(&domain->lock, flags); 1885 spin_lock_irqsave(&domain->lock, flags);
1749 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, 1886
1887 addr = __map_single(dev, domain->priv, paddr, size, dir, false,
1750 dma_mask); 1888 dma_mask);
1751 if (addr == bad_dma_address) 1889 if (addr == DMA_ERROR_CODE)
1752 goto out; 1890 goto out;
1753 1891
1754 iommu_completion_wait(iommu); 1892 iommu_flush_complete(domain);
1755 1893
1756out: 1894out:
1757 spin_unlock_irqrestore(&domain->lock, flags); 1895 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1766,25 +1904,19 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
1766 enum dma_data_direction dir, struct dma_attrs *attrs) 1904 enum dma_data_direction dir, struct dma_attrs *attrs)
1767{ 1905{
1768 unsigned long flags; 1906 unsigned long flags;
1769 struct amd_iommu *iommu;
1770 struct protection_domain *domain; 1907 struct protection_domain *domain;
1771 u16 devid;
1772 1908
1773 INC_STATS_COUNTER(cnt_unmap_single); 1909 INC_STATS_COUNTER(cnt_unmap_single);
1774 1910
1775 if (!check_device(dev) || 1911 domain = get_domain(dev);
1776 !get_device_resources(dev, &iommu, &domain, &devid)) 1912 if (IS_ERR(domain))
1777 /* device not handled by any AMD IOMMU */
1778 return;
1779
1780 if (!dma_ops_domain(domain))
1781 return; 1913 return;
1782 1914
1783 spin_lock_irqsave(&domain->lock, flags); 1915 spin_lock_irqsave(&domain->lock, flags);
1784 1916
1785 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1917 __unmap_single(domain->priv, dma_addr, size, dir);
1786 1918
1787 iommu_completion_wait(iommu); 1919 iommu_flush_complete(domain);
1788 1920
1789 spin_unlock_irqrestore(&domain->lock, flags); 1921 spin_unlock_irqrestore(&domain->lock, flags);
1790} 1922}
@@ -1816,9 +1948,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1816 struct dma_attrs *attrs) 1948 struct dma_attrs *attrs)
1817{ 1949{
1818 unsigned long flags; 1950 unsigned long flags;
1819 struct amd_iommu *iommu;
1820 struct protection_domain *domain; 1951 struct protection_domain *domain;
1821 u16 devid;
1822 int i; 1952 int i;
1823 struct scatterlist *s; 1953 struct scatterlist *s;
1824 phys_addr_t paddr; 1954 phys_addr_t paddr;
@@ -1827,25 +1957,20 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1827 1957
1828 INC_STATS_COUNTER(cnt_map_sg); 1958 INC_STATS_COUNTER(cnt_map_sg);
1829 1959
1830 if (!check_device(dev)) 1960 domain = get_domain(dev);
1961 if (PTR_ERR(domain) == -EINVAL)
1962 return map_sg_no_iommu(dev, sglist, nelems, dir);
1963 else if (IS_ERR(domain))
1831 return 0; 1964 return 0;
1832 1965
1833 dma_mask = *dev->dma_mask; 1966 dma_mask = *dev->dma_mask;
1834 1967
1835 get_device_resources(dev, &iommu, &domain, &devid);
1836
1837 if (!iommu || !domain)
1838 return map_sg_no_iommu(dev, sglist, nelems, dir);
1839
1840 if (!dma_ops_domain(domain))
1841 return 0;
1842
1843 spin_lock_irqsave(&domain->lock, flags); 1968 spin_lock_irqsave(&domain->lock, flags);
1844 1969
1845 for_each_sg(sglist, s, nelems, i) { 1970 for_each_sg(sglist, s, nelems, i) {
1846 paddr = sg_phys(s); 1971 paddr = sg_phys(s);
1847 1972
1848 s->dma_address = __map_single(dev, iommu, domain->priv, 1973 s->dma_address = __map_single(dev, domain->priv,
1849 paddr, s->length, dir, false, 1974 paddr, s->length, dir, false,
1850 dma_mask); 1975 dma_mask);
1851 1976
@@ -1856,7 +1981,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1856 goto unmap; 1981 goto unmap;
1857 } 1982 }
1858 1983
1859 iommu_completion_wait(iommu); 1984 iommu_flush_complete(domain);
1860 1985
1861out: 1986out:
1862 spin_unlock_irqrestore(&domain->lock, flags); 1987 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1865,7 +1990,7 @@ out:
1865unmap: 1990unmap:
1866 for_each_sg(sglist, s, mapped_elems, i) { 1991 for_each_sg(sglist, s, mapped_elems, i) {
1867 if (s->dma_address) 1992 if (s->dma_address)
1868 __unmap_single(iommu, domain->priv, s->dma_address, 1993 __unmap_single(domain->priv, s->dma_address,
1869 s->dma_length, dir); 1994 s->dma_length, dir);
1870 s->dma_address = s->dma_length = 0; 1995 s->dma_address = s->dma_length = 0;
1871 } 1996 }
@@ -1884,30 +2009,25 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1884 struct dma_attrs *attrs) 2009 struct dma_attrs *attrs)
1885{ 2010{
1886 unsigned long flags; 2011 unsigned long flags;
1887 struct amd_iommu *iommu;
1888 struct protection_domain *domain; 2012 struct protection_domain *domain;
1889 struct scatterlist *s; 2013 struct scatterlist *s;
1890 u16 devid;
1891 int i; 2014 int i;
1892 2015
1893 INC_STATS_COUNTER(cnt_unmap_sg); 2016 INC_STATS_COUNTER(cnt_unmap_sg);
1894 2017
1895 if (!check_device(dev) || 2018 domain = get_domain(dev);
1896 !get_device_resources(dev, &iommu, &domain, &devid)) 2019 if (IS_ERR(domain))
1897 return;
1898
1899 if (!dma_ops_domain(domain))
1900 return; 2020 return;
1901 2021
1902 spin_lock_irqsave(&domain->lock, flags); 2022 spin_lock_irqsave(&domain->lock, flags);
1903 2023
1904 for_each_sg(sglist, s, nelems, i) { 2024 for_each_sg(sglist, s, nelems, i) {
1905 __unmap_single(iommu, domain->priv, s->dma_address, 2025 __unmap_single(domain->priv, s->dma_address,
1906 s->dma_length, dir); 2026 s->dma_length, dir);
1907 s->dma_address = s->dma_length = 0; 2027 s->dma_address = s->dma_length = 0;
1908 } 2028 }
1909 2029
1910 iommu_completion_wait(iommu); 2030 iommu_flush_complete(domain);
1911 2031
1912 spin_unlock_irqrestore(&domain->lock, flags); 2032 spin_unlock_irqrestore(&domain->lock, flags);
1913} 2033}
@@ -1920,49 +2040,44 @@ static void *alloc_coherent(struct device *dev, size_t size,
1920{ 2040{
1921 unsigned long flags; 2041 unsigned long flags;
1922 void *virt_addr; 2042 void *virt_addr;
1923 struct amd_iommu *iommu;
1924 struct protection_domain *domain; 2043 struct protection_domain *domain;
1925 u16 devid;
1926 phys_addr_t paddr; 2044 phys_addr_t paddr;
1927 u64 dma_mask = dev->coherent_dma_mask; 2045 u64 dma_mask = dev->coherent_dma_mask;
1928 2046
1929 INC_STATS_COUNTER(cnt_alloc_coherent); 2047 INC_STATS_COUNTER(cnt_alloc_coherent);
1930 2048
1931 if (!check_device(dev)) 2049 domain = get_domain(dev);
2050 if (PTR_ERR(domain) == -EINVAL) {
2051 virt_addr = (void *)__get_free_pages(flag, get_order(size));
2052 *dma_addr = __pa(virt_addr);
2053 return virt_addr;
2054 } else if (IS_ERR(domain))
1932 return NULL; 2055 return NULL;
1933 2056
1934 if (!get_device_resources(dev, &iommu, &domain, &devid)) 2057 dma_mask = dev->coherent_dma_mask;
1935 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 2058 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2059 flag |= __GFP_ZERO;
1936 2060
1937 flag |= __GFP_ZERO;
1938 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 2061 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1939 if (!virt_addr) 2062 if (!virt_addr)
1940 return NULL; 2063 return NULL;
1941 2064
1942 paddr = virt_to_phys(virt_addr); 2065 paddr = virt_to_phys(virt_addr);
1943 2066
1944 if (!iommu || !domain) {
1945 *dma_addr = (dma_addr_t)paddr;
1946 return virt_addr;
1947 }
1948
1949 if (!dma_ops_domain(domain))
1950 goto out_free;
1951
1952 if (!dma_mask) 2067 if (!dma_mask)
1953 dma_mask = *dev->dma_mask; 2068 dma_mask = *dev->dma_mask;
1954 2069
1955 spin_lock_irqsave(&domain->lock, flags); 2070 spin_lock_irqsave(&domain->lock, flags);
1956 2071
1957 *dma_addr = __map_single(dev, iommu, domain->priv, paddr, 2072 *dma_addr = __map_single(dev, domain->priv, paddr,
1958 size, DMA_BIDIRECTIONAL, true, dma_mask); 2073 size, DMA_BIDIRECTIONAL, true, dma_mask);
1959 2074
1960 if (*dma_addr == bad_dma_address) { 2075 if (*dma_addr == DMA_ERROR_CODE) {
1961 spin_unlock_irqrestore(&domain->lock, flags); 2076 spin_unlock_irqrestore(&domain->lock, flags);
1962 goto out_free; 2077 goto out_free;
1963 } 2078 }
1964 2079
1965 iommu_completion_wait(iommu); 2080 iommu_flush_complete(domain);
1966 2081
1967 spin_unlock_irqrestore(&domain->lock, flags); 2082 spin_unlock_irqrestore(&domain->lock, flags);
1968 2083
@@ -1982,28 +2097,19 @@ static void free_coherent(struct device *dev, size_t size,
1982 void *virt_addr, dma_addr_t dma_addr) 2097 void *virt_addr, dma_addr_t dma_addr)
1983{ 2098{
1984 unsigned long flags; 2099 unsigned long flags;
1985 struct amd_iommu *iommu;
1986 struct protection_domain *domain; 2100 struct protection_domain *domain;
1987 u16 devid;
1988 2101
1989 INC_STATS_COUNTER(cnt_free_coherent); 2102 INC_STATS_COUNTER(cnt_free_coherent);
1990 2103
1991 if (!check_device(dev)) 2104 domain = get_domain(dev);
1992 return; 2105 if (IS_ERR(domain))
1993
1994 get_device_resources(dev, &iommu, &domain, &devid);
1995
1996 if (!iommu || !domain)
1997 goto free_mem;
1998
1999 if (!dma_ops_domain(domain))
2000 goto free_mem; 2106 goto free_mem;
2001 2107
2002 spin_lock_irqsave(&domain->lock, flags); 2108 spin_lock_irqsave(&domain->lock, flags);
2003 2109
2004 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 2110 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
2005 2111
2006 iommu_completion_wait(iommu); 2112 iommu_flush_complete(domain);
2007 2113
2008 spin_unlock_irqrestore(&domain->lock, flags); 2114 spin_unlock_irqrestore(&domain->lock, flags);
2009 2115
@@ -2017,22 +2123,7 @@ free_mem:
2017 */ 2123 */
2018static int amd_iommu_dma_supported(struct device *dev, u64 mask) 2124static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2019{ 2125{
2020 u16 bdf; 2126 return check_device(dev);
2021 struct pci_dev *pcidev;
2022
2023 /* No device or no PCI device */
2024 if (!dev || dev->bus != &pci_bus_type)
2025 return 0;
2026
2027 pcidev = to_pci_dev(dev);
2028
2029 bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
2030
2031 /* Out of our scope? */
2032 if (bdf > amd_iommu_last_bdf)
2033 return 0;
2034
2035 return 1;
2036} 2127}
2037 2128
2038/* 2129/*
@@ -2046,25 +2137,30 @@ static void prealloc_protection_domains(void)
2046{ 2137{
2047 struct pci_dev *dev = NULL; 2138 struct pci_dev *dev = NULL;
2048 struct dma_ops_domain *dma_dom; 2139 struct dma_ops_domain *dma_dom;
2049 struct amd_iommu *iommu;
2050 u16 devid; 2140 u16 devid;
2051 2141
2052 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2142 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2053 devid = calc_devid(dev->bus->number, dev->devfn); 2143
2054 if (devid > amd_iommu_last_bdf) 2144 /* Do we handle this device? */
2055 continue; 2145 if (!check_device(&dev->dev))
2056 devid = amd_iommu_alias_table[devid];
2057 if (domain_for_device(devid))
2058 continue; 2146 continue;
2059 iommu = amd_iommu_rlookup_table[devid]; 2147
2060 if (!iommu) 2148 iommu_init_device(&dev->dev);
2149
2150 /* Is there already any domain for it? */
2151 if (domain_for_device(&dev->dev))
2061 continue; 2152 continue;
2062 dma_dom = dma_ops_domain_alloc(iommu); 2153
2154 devid = get_device_id(&dev->dev);
2155
2156 dma_dom = dma_ops_domain_alloc();
2063 if (!dma_dom) 2157 if (!dma_dom)
2064 continue; 2158 continue;
2065 init_unity_mappings_for_device(dma_dom, devid); 2159 init_unity_mappings_for_device(dma_dom, devid);
2066 dma_dom->target_dev = devid; 2160 dma_dom->target_dev = devid;
2067 2161
2162 attach_device(&dev->dev, &dma_dom->domain);
2163
2068 list_add_tail(&dma_dom->list, &iommu_pd_list); 2164 list_add_tail(&dma_dom->list, &iommu_pd_list);
2069 } 2165 }
2070} 2166}
@@ -2093,7 +2189,7 @@ int __init amd_iommu_init_dma_ops(void)
2093 * protection domain will be assigned to the default one. 2189 * protection domain will be assigned to the default one.
2094 */ 2190 */
2095 for_each_iommu(iommu) { 2191 for_each_iommu(iommu) {
2096 iommu->default_dom = dma_ops_domain_alloc(iommu); 2192 iommu->default_dom = dma_ops_domain_alloc();
2097 if (iommu->default_dom == NULL) 2193 if (iommu->default_dom == NULL)
2098 return -ENOMEM; 2194 return -ENOMEM;
2099 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; 2195 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
@@ -2103,15 +2199,12 @@ int __init amd_iommu_init_dma_ops(void)
2103 } 2199 }
2104 2200
2105 /* 2201 /*
2106 * If device isolation is enabled, pre-allocate the protection 2202 * Pre-allocate the protection domains for each device.
2107 * domains for each device.
2108 */ 2203 */
2109 if (amd_iommu_isolate) 2204 prealloc_protection_domains();
2110 prealloc_protection_domains();
2111 2205
2112 iommu_detected = 1; 2206 iommu_detected = 1;
2113 force_iommu = 1; 2207 swiotlb = 0;
2114 bad_dma_address = 0;
2115#ifdef CONFIG_GART_IOMMU 2208#ifdef CONFIG_GART_IOMMU
2116 gart_iommu_aperture_disabled = 1; 2209 gart_iommu_aperture_disabled = 1;
2117 gart_iommu_aperture = 0; 2210 gart_iommu_aperture = 0;
@@ -2150,14 +2243,17 @@ free_domains:
2150 2243
2151static void cleanup_domain(struct protection_domain *domain) 2244static void cleanup_domain(struct protection_domain *domain)
2152{ 2245{
2246 struct iommu_dev_data *dev_data, *next;
2153 unsigned long flags; 2247 unsigned long flags;
2154 u16 devid;
2155 2248
2156 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 2249 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2157 2250
2158 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) 2251 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
2159 if (amd_iommu_pd_table[devid] == domain) 2252 struct device *dev = dev_data->dev;
2160 __detach_device(domain, devid); 2253
2254 do_detach(dev);
2255 atomic_set(&dev_data->bind, 0);
2256 }
2161 2257
2162 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2258 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2163} 2259}
@@ -2167,6 +2263,8 @@ static void protection_domain_free(struct protection_domain *domain)
2167 if (!domain) 2263 if (!domain)
2168 return; 2264 return;
2169 2265
2266 del_domain_from_list(domain);
2267
2170 if (domain->id) 2268 if (domain->id)
2171 domain_id_free(domain->id); 2269 domain_id_free(domain->id);
2172 2270
@@ -2185,6 +2283,9 @@ static struct protection_domain *protection_domain_alloc(void)
2185 domain->id = domain_id_alloc(); 2283 domain->id = domain_id_alloc();
2186 if (!domain->id) 2284 if (!domain->id)
2187 goto out_err; 2285 goto out_err;
2286 INIT_LIST_HEAD(&domain->dev_list);
2287
2288 add_domain_to_list(domain);
2188 2289
2189 return domain; 2290 return domain;
2190 2291
@@ -2241,26 +2342,23 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2241static void amd_iommu_detach_device(struct iommu_domain *dom, 2342static void amd_iommu_detach_device(struct iommu_domain *dom,
2242 struct device *dev) 2343 struct device *dev)
2243{ 2344{
2244 struct protection_domain *domain = dom->priv; 2345 struct iommu_dev_data *dev_data = dev->archdata.iommu;
2245 struct amd_iommu *iommu; 2346 struct amd_iommu *iommu;
2246 struct pci_dev *pdev;
2247 u16 devid; 2347 u16 devid;
2248 2348
2249 if (dev->bus != &pci_bus_type) 2349 if (!check_device(dev))
2250 return; 2350 return;
2251 2351
2252 pdev = to_pci_dev(dev); 2352 devid = get_device_id(dev);
2253
2254 devid = calc_devid(pdev->bus->number, pdev->devfn);
2255 2353
2256 if (devid > 0) 2354 if (dev_data->domain != NULL)
2257 detach_device(domain, devid); 2355 detach_device(dev);
2258 2356
2259 iommu = amd_iommu_rlookup_table[devid]; 2357 iommu = amd_iommu_rlookup_table[devid];
2260 if (!iommu) 2358 if (!iommu)
2261 return; 2359 return;
2262 2360
2263 iommu_queue_inv_dev_entry(iommu, devid); 2361 iommu_flush_device(dev);
2264 iommu_completion_wait(iommu); 2362 iommu_completion_wait(iommu);
2265} 2363}
2266 2364
@@ -2268,35 +2366,30 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2268 struct device *dev) 2366 struct device *dev)
2269{ 2367{
2270 struct protection_domain *domain = dom->priv; 2368 struct protection_domain *domain = dom->priv;
2271 struct protection_domain *old_domain; 2369 struct iommu_dev_data *dev_data;
2272 struct amd_iommu *iommu; 2370 struct amd_iommu *iommu;
2273 struct pci_dev *pdev; 2371 int ret;
2274 u16 devid; 2372 u16 devid;
2275 2373
2276 if (dev->bus != &pci_bus_type) 2374 if (!check_device(dev))
2277 return -EINVAL; 2375 return -EINVAL;
2278 2376
2279 pdev = to_pci_dev(dev); 2377 dev_data = dev->archdata.iommu;
2280 2378
2281 devid = calc_devid(pdev->bus->number, pdev->devfn); 2379 devid = get_device_id(dev);
2282
2283 if (devid >= amd_iommu_last_bdf ||
2284 devid != amd_iommu_alias_table[devid])
2285 return -EINVAL;
2286 2380
2287 iommu = amd_iommu_rlookup_table[devid]; 2381 iommu = amd_iommu_rlookup_table[devid];
2288 if (!iommu) 2382 if (!iommu)
2289 return -EINVAL; 2383 return -EINVAL;
2290 2384
2291 old_domain = domain_for_device(devid); 2385 if (dev_data->domain)
2292 if (old_domain) 2386 detach_device(dev);
2293 detach_device(old_domain, devid);
2294 2387
2295 attach_device(iommu, domain, devid); 2388 ret = attach_device(dev, domain);
2296 2389
2297 iommu_completion_wait(iommu); 2390 iommu_completion_wait(iommu);
2298 2391
2299 return 0; 2392 return ret;
2300} 2393}
2301 2394
2302static int amd_iommu_map_range(struct iommu_domain *dom, 2395static int amd_iommu_map_range(struct iommu_domain *dom,
@@ -2342,7 +2435,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
2342 iova += PAGE_SIZE; 2435 iova += PAGE_SIZE;
2343 } 2436 }
2344 2437
2345 iommu_flush_domain(domain->id); 2438 iommu_flush_tlb_pde(domain);
2346} 2439}
2347 2440
2348static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 2441static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -2393,8 +2486,9 @@ static struct iommu_ops amd_iommu_ops = {
2393 2486
2394int __init amd_iommu_init_passthrough(void) 2487int __init amd_iommu_init_passthrough(void)
2395{ 2488{
2489 struct amd_iommu *iommu;
2396 struct pci_dev *dev = NULL; 2490 struct pci_dev *dev = NULL;
2397 u16 devid, devid2; 2491 u16 devid;
2398 2492
2399 /* allocate passthroug domain */ 2493 /* allocate passthroug domain */
2400 pt_domain = protection_domain_alloc(); 2494 pt_domain = protection_domain_alloc();
@@ -2404,20 +2498,17 @@ int __init amd_iommu_init_passthrough(void)
2404 pt_domain->mode |= PAGE_MODE_NONE; 2498 pt_domain->mode |= PAGE_MODE_NONE;
2405 2499
2406 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2500 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2407 struct amd_iommu *iommu;
2408 2501
2409 devid = calc_devid(dev->bus->number, dev->devfn); 2502 if (!check_device(&dev->dev))
2410 if (devid > amd_iommu_last_bdf)
2411 continue; 2503 continue;
2412 2504
2413 devid2 = amd_iommu_alias_table[devid]; 2505 devid = get_device_id(&dev->dev);
2414 2506
2415 iommu = amd_iommu_rlookup_table[devid2]; 2507 iommu = amd_iommu_rlookup_table[devid];
2416 if (!iommu) 2508 if (!iommu)
2417 continue; 2509 continue;
2418 2510
2419 __attach_device(iommu, pt_domain, devid); 2511 attach_device(&dev->dev, pt_domain);
2420 __attach_device(iommu, pt_domain, devid2);
2421 } 2512 }
2422 2513
2423 pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); 2514 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index c20001e4f556..7ffc39965233 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -25,10 +25,12 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/msi.h> 26#include <linux/msi.h>
27#include <asm/pci-direct.h> 27#include <asm/pci-direct.h>
28#include <asm/amd_iommu_proto.h>
28#include <asm/amd_iommu_types.h> 29#include <asm/amd_iommu_types.h>
29#include <asm/amd_iommu.h> 30#include <asm/amd_iommu.h>
30#include <asm/iommu.h> 31#include <asm/iommu.h>
31#include <asm/gart.h> 32#include <asm/gart.h>
33#include <asm/x86_init.h>
32 34
33/* 35/*
34 * definitions for the ACPI scanning code 36 * definitions for the ACPI scanning code
@@ -123,18 +125,24 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
123 to handle */ 125 to handle */
124LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 126LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
125 we find in ACPI */ 127 we find in ACPI */
126#ifdef CONFIG_IOMMU_STRESS
127bool amd_iommu_isolate = false;
128#else
129bool amd_iommu_isolate = true; /* if true, device isolation is
130 enabled */
131#endif
132
133bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 128bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
134 129
135LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 130LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
136 system */ 131 system */
137 132
133/* Array to assign indices to IOMMUs*/
134struct amd_iommu *amd_iommus[MAX_IOMMUS];
135int amd_iommus_present;
136
137/* IOMMUs have a non-present cache? */
138bool amd_iommu_np_cache __read_mostly;
139
140/*
141 * List of protection domains - used during resume
142 */
143LIST_HEAD(amd_iommu_pd_list);
144spinlock_t amd_iommu_pd_lock;
145
138/* 146/*
139 * Pointer to the device table which is shared by all AMD IOMMUs 147 * Pointer to the device table which is shared by all AMD IOMMUs
140 * it is indexed by the PCI device id or the HT unit id and contains 148 * it is indexed by the PCI device id or the HT unit id and contains
@@ -157,12 +165,6 @@ u16 *amd_iommu_alias_table;
157struct amd_iommu **amd_iommu_rlookup_table; 165struct amd_iommu **amd_iommu_rlookup_table;
158 166
159/* 167/*
160 * The pd table (protection domain table) is used to find the protection domain
161 * data structure a device belongs to. Indexed with the PCI device id too.
162 */
163struct protection_domain **amd_iommu_pd_table;
164
165/*
166 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap 168 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
167 * to know which ones are already in use. 169 * to know which ones are already in use.
168 */ 170 */
@@ -838,7 +840,18 @@ static void __init free_iommu_all(void)
838static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) 840static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
839{ 841{
840 spin_lock_init(&iommu->lock); 842 spin_lock_init(&iommu->lock);
843
844 /* Add IOMMU to internal data structures */
841 list_add_tail(&iommu->list, &amd_iommu_list); 845 list_add_tail(&iommu->list, &amd_iommu_list);
846 iommu->index = amd_iommus_present++;
847
848 if (unlikely(iommu->index >= MAX_IOMMUS)) {
849 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
850 return -ENOSYS;
851 }
852
853 /* Index is fine - add IOMMU to the array */
854 amd_iommus[iommu->index] = iommu;
842 855
843 /* 856 /*
844 * Copy data from ACPI table entry to the iommu struct 857 * Copy data from ACPI table entry to the iommu struct
@@ -868,6 +881,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
868 init_iommu_from_acpi(iommu, h); 881 init_iommu_from_acpi(iommu, h);
869 init_iommu_devices(iommu); 882 init_iommu_devices(iommu);
870 883
884 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
885 amd_iommu_np_cache = true;
886
871 return pci_enable_device(iommu->dev); 887 return pci_enable_device(iommu->dev);
872} 888}
873 889
@@ -925,7 +941,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
925 * 941 *
926 ****************************************************************************/ 942 ****************************************************************************/
927 943
928static int __init iommu_setup_msi(struct amd_iommu *iommu) 944static int iommu_setup_msi(struct amd_iommu *iommu)
929{ 945{
930 int r; 946 int r;
931 947
@@ -1176,19 +1192,10 @@ static struct sys_device device_amd_iommu = {
1176 * functions. Finally it prints some information about AMD IOMMUs and 1192 * functions. Finally it prints some information about AMD IOMMUs and
1177 * the driver state and enables the hardware. 1193 * the driver state and enables the hardware.
1178 */ 1194 */
1179int __init amd_iommu_init(void) 1195static int __init amd_iommu_init(void)
1180{ 1196{
1181 int i, ret = 0; 1197 int i, ret = 0;
1182 1198
1183
1184 if (no_iommu) {
1185 printk(KERN_INFO "AMD-Vi disabled by kernel command line\n");
1186 return 0;
1187 }
1188
1189 if (!amd_iommu_detected)
1190 return -ENODEV;
1191
1192 /* 1199 /*
1193 * First parse ACPI tables to find the largest Bus/Dev/Func 1200 * First parse ACPI tables to find the largest Bus/Dev/Func
1194 * we need to handle. Upon this information the shared data 1201 * we need to handle. Upon this information the shared data
@@ -1225,15 +1232,6 @@ int __init amd_iommu_init(void)
1225 if (amd_iommu_rlookup_table == NULL) 1232 if (amd_iommu_rlookup_table == NULL)
1226 goto free; 1233 goto free;
1227 1234
1228 /*
1229 * Protection Domain table - maps devices to protection domains
1230 * This table has the same size as the rlookup_table
1231 */
1232 amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1233 get_order(rlookup_table_size));
1234 if (amd_iommu_pd_table == NULL)
1235 goto free;
1236
1237 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 1235 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1238 GFP_KERNEL | __GFP_ZERO, 1236 GFP_KERNEL | __GFP_ZERO,
1239 get_order(MAX_DOMAIN_ID/8)); 1237 get_order(MAX_DOMAIN_ID/8));
@@ -1255,6 +1253,8 @@ int __init amd_iommu_init(void)
1255 */ 1253 */
1256 amd_iommu_pd_alloc_bitmap[0] = 1; 1254 amd_iommu_pd_alloc_bitmap[0] = 1;
1257 1255
1256 spin_lock_init(&amd_iommu_pd_lock);
1257
1258 /* 1258 /*
1259 * now the data structures are allocated and basically initialized 1259 * now the data structures are allocated and basically initialized
1260 * start the real acpi table scan 1260 * start the real acpi table scan
@@ -1286,17 +1286,12 @@ int __init amd_iommu_init(void)
1286 if (iommu_pass_through) 1286 if (iommu_pass_through)
1287 goto out; 1287 goto out;
1288 1288
1289 printk(KERN_INFO "AMD-Vi: device isolation ");
1290 if (amd_iommu_isolate)
1291 printk("enabled\n");
1292 else
1293 printk("disabled\n");
1294
1295 if (amd_iommu_unmap_flush) 1289 if (amd_iommu_unmap_flush)
1296 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); 1290 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1297 else 1291 else
1298 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); 1292 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1299 1293
1294 x86_platform.iommu_shutdown = disable_iommus;
1300out: 1295out:
1301 return ret; 1296 return ret;
1302 1297
@@ -1304,9 +1299,6 @@ free:
1304 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1299 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1305 get_order(MAX_DOMAIN_ID/8)); 1300 get_order(MAX_DOMAIN_ID/8));
1306 1301
1307 free_pages((unsigned long)amd_iommu_pd_table,
1308 get_order(rlookup_table_size));
1309
1310 free_pages((unsigned long)amd_iommu_rlookup_table, 1302 free_pages((unsigned long)amd_iommu_rlookup_table,
1311 get_order(rlookup_table_size)); 1303 get_order(rlookup_table_size));
1312 1304
@@ -1323,11 +1315,6 @@ free:
1323 goto out; 1315 goto out;
1324} 1316}
1325 1317
1326void amd_iommu_shutdown(void)
1327{
1328 disable_iommus();
1329}
1330
1331/**************************************************************************** 1318/****************************************************************************
1332 * 1319 *
1333 * Early detect code. This code runs at IOMMU detection time in the DMA 1320 * Early detect code. This code runs at IOMMU detection time in the DMA
@@ -1342,16 +1329,13 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1342 1329
1343void __init amd_iommu_detect(void) 1330void __init amd_iommu_detect(void)
1344{ 1331{
1345 if (swiotlb || no_iommu || (iommu_detected && !gart_iommu_aperture)) 1332 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1346 return; 1333 return;
1347 1334
1348 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { 1335 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1349 iommu_detected = 1; 1336 iommu_detected = 1;
1350 amd_iommu_detected = 1; 1337 amd_iommu_detected = 1;
1351#ifdef CONFIG_GART_IOMMU 1338 x86_init.iommu.iommu_init = amd_iommu_init;
1352 gart_iommu_aperture_disabled = 1;
1353 gart_iommu_aperture = 0;
1354#endif
1355 } 1339 }
1356} 1340}
1357 1341
@@ -1372,10 +1356,6 @@ static int __init parse_amd_iommu_dump(char *str)
1372static int __init parse_amd_iommu_options(char *str) 1356static int __init parse_amd_iommu_options(char *str)
1373{ 1357{
1374 for (; *str; ++str) { 1358 for (; *str; ++str) {
1375 if (strncmp(str, "isolate", 7) == 0)
1376 amd_iommu_isolate = true;
1377 if (strncmp(str, "share", 5) == 0)
1378 amd_iommu_isolate = false;
1379 if (strncmp(str, "fullflush", 9) == 0) 1359 if (strncmp(str, "fullflush", 9) == 0)
1380 amd_iommu_unmap_flush = true; 1360 amd_iommu_unmap_flush = true;
1381 } 1361 }
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 128111d8ffe0..e0dfb6856aa2 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -28,6 +28,7 @@
28#include <asm/pci-direct.h> 28#include <asm/pci-direct.h>
29#include <asm/dma.h> 29#include <asm/dma.h>
30#include <asm/k8.h> 30#include <asm/k8.h>
31#include <asm/x86_init.h>
31 32
32int gart_iommu_aperture; 33int gart_iommu_aperture;
33int gart_iommu_aperture_disabled __initdata; 34int gart_iommu_aperture_disabled __initdata;
@@ -400,6 +401,7 @@ void __init gart_iommu_hole_init(void)
400 401
401 iommu_detected = 1; 402 iommu_detected = 1;
402 gart_iommu_aperture = 1; 403 gart_iommu_aperture = 1;
404 x86_init.iommu.iommu_init = gart_iommu_init;
403 405
404 aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; 406 aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
405 aper_size = (32 * 1024 * 1024) << aper_order; 407 aper_size = (32 * 1024 * 1024) << aper_order;
@@ -456,7 +458,7 @@ out:
456 458
457 if (aper_alloc) { 459 if (aper_alloc) {
458 /* Got the aperture from the AGP bridge */ 460 /* Got the aperture from the AGP bridge */
459 } else if (swiotlb && !valid_agp) { 461 } else if (!valid_agp) {
460 /* Do nothing */ 462 /* Do nothing */
461 } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || 463 } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
462 force_iommu || 464 force_iommu ||
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 5e409dc298a4..a4849c10a77e 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -27,8 +27,7 @@
27#include <asm/cpu.h> 27#include <asm/cpu.h>
28#include <asm/reboot.h> 28#include <asm/reboot.h>
29#include <asm/virtext.h> 29#include <asm/virtext.h>
30#include <asm/iommu.h> 30#include <asm/x86_init.h>
31
32 31
33#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 32#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
34 33
@@ -106,7 +105,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
106#endif 105#endif
107 106
108#ifdef CONFIG_X86_64 107#ifdef CONFIG_X86_64
109 pci_iommu_shutdown(); 108 x86_platform.iommu_shutdown();
110#endif 109#endif
111 110
112 crash_save_cpu(regs, safe_smp_processor_id()); 111 crash_save_cpu(regs, safe_smp_processor_id());
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index c097e7d607c6..7d52e9da5e0c 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1185,17 +1185,14 @@ END(ftrace_graph_caller)
1185 1185
1186.globl return_to_handler 1186.globl return_to_handler
1187return_to_handler: 1187return_to_handler:
1188 pushl $0
1189 pushl %eax 1188 pushl %eax
1190 pushl %ecx
1191 pushl %edx 1189 pushl %edx
1192 movl %ebp, %eax 1190 movl %ebp, %eax
1193 call ftrace_return_to_handler 1191 call ftrace_return_to_handler
1194 movl %eax, 0xc(%esp) 1192 movl %eax, %ecx
1195 popl %edx 1193 popl %edx
1196 popl %ecx
1197 popl %eax 1194 popl %eax
1198 ret 1195 jmp *%ecx
1199#endif 1196#endif
1200 1197
1201.section .rodata,"a" 1198.section .rodata,"a"
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b5c061f8f358..bd5bbddddf91 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -155,11 +155,11 @@ GLOBAL(return_to_handler)
155 155
156 call ftrace_return_to_handler 156 call ftrace_return_to_handler
157 157
158 movq %rax, 16(%rsp) 158 movq %rax, %rdi
159 movq 8(%rsp), %rdx 159 movq 8(%rsp), %rdx
160 movq (%rsp), %rax 160 movq (%rsp), %rax
161 addq $16, %rsp 161 addq $24, %rsp
162 retq 162 jmp *%rdi
163#endif 163#endif
164 164
165 165
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 9dbb527e1652..5a1b9758fd62 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -9,6 +9,8 @@
9 * the dangers of modifying code on the run. 9 * the dangers of modifying code on the run.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/spinlock.h> 14#include <linux/spinlock.h>
13#include <linux/hardirq.h> 15#include <linux/hardirq.h>
14#include <linux/uaccess.h> 16#include <linux/uaccess.h>
@@ -336,15 +338,15 @@ int __init ftrace_dyn_arch_init(void *data)
336 338
337 switch (faulted) { 339 switch (faulted) {
338 case 0: 340 case 0:
339 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); 341 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
340 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); 342 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
341 break; 343 break;
342 case 1: 344 case 1:
343 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); 345 pr_info("converting mcount calls to 66 66 66 66 90\n");
344 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); 346 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
345 break; 347 break;
346 case 2: 348 case 2:
347 pr_info("ftrace: converting mcount calls to jmp . + 5\n"); 349 pr_info("converting mcount calls to jmp . + 5\n");
348 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); 350 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
349 break; 351 break;
350 } 352 }
@@ -468,82 +470,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
468 470
469#ifdef CONFIG_FTRACE_SYSCALLS 471#ifdef CONFIG_FTRACE_SYSCALLS
470 472
471extern unsigned long __start_syscalls_metadata[];
472extern unsigned long __stop_syscalls_metadata[];
473extern unsigned long *sys_call_table; 473extern unsigned long *sys_call_table;
474 474
475static struct syscall_metadata **syscalls_metadata; 475unsigned long __init arch_syscall_addr(int nr)
476
477static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
478{
479 struct syscall_metadata *start;
480 struct syscall_metadata *stop;
481 char str[KSYM_SYMBOL_LEN];
482
483
484 start = (struct syscall_metadata *)__start_syscalls_metadata;
485 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
486 kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
487
488 for ( ; start < stop; start++) {
489 if (start->name && !strcmp(start->name, str))
490 return start;
491 }
492 return NULL;
493}
494
495struct syscall_metadata *syscall_nr_to_meta(int nr)
496{
497 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
498 return NULL;
499
500 return syscalls_metadata[nr];
501}
502
503int syscall_name_to_nr(char *name)
504{ 476{
505 int i; 477 return (unsigned long)(&sys_call_table)[nr];
506
507 if (!syscalls_metadata)
508 return -1;
509
510 for (i = 0; i < NR_syscalls; i++) {
511 if (syscalls_metadata[i]) {
512 if (!strcmp(syscalls_metadata[i]->name, name))
513 return i;
514 }
515 }
516 return -1;
517}
518
519void set_syscall_enter_id(int num, int id)
520{
521 syscalls_metadata[num]->enter_id = id;
522}
523
524void set_syscall_exit_id(int num, int id)
525{
526 syscalls_metadata[num]->exit_id = id;
527}
528
529static int __init arch_init_ftrace_syscalls(void)
530{
531 int i;
532 struct syscall_metadata *meta;
533 unsigned long **psys_syscall_table = &sys_call_table;
534
535 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
536 NR_syscalls, GFP_KERNEL);
537 if (!syscalls_metadata) {
538 WARN_ON(1);
539 return -ENOMEM;
540 }
541
542 for (i = 0; i < NR_syscalls; i++) {
543 meta = find_syscall_meta(psys_syscall_table[i]);
544 syscalls_metadata[i] = meta;
545 }
546 return 0;
547} 478}
548arch_initcall(arch_init_ftrace_syscalls);
549#endif 479#endif
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 971a3bec47a8..c563e4c8ff39 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -46,6 +46,7 @@
46#include <asm/dma.h> 46#include <asm/dma.h>
47#include <asm/rio.h> 47#include <asm/rio.h>
48#include <asm/bios_ebda.h> 48#include <asm/bios_ebda.h>
49#include <asm/x86_init.h>
49 50
50#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT 51#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
51int use_calgary __read_mostly = 1; 52int use_calgary __read_mostly = 1;
@@ -244,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
244 if (panic_on_overflow) 245 if (panic_on_overflow)
245 panic("Calgary: fix the allocator.\n"); 246 panic("Calgary: fix the allocator.\n");
246 else 247 else
247 return bad_dma_address; 248 return DMA_ERROR_CODE;
248 } 249 }
249 } 250 }
250 251
@@ -260,12 +261,15 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
260 void *vaddr, unsigned int npages, int direction) 261 void *vaddr, unsigned int npages, int direction)
261{ 262{
262 unsigned long entry; 263 unsigned long entry;
263 dma_addr_t ret = bad_dma_address; 264 dma_addr_t ret;
264 265
265 entry = iommu_range_alloc(dev, tbl, npages); 266 entry = iommu_range_alloc(dev, tbl, npages);
266 267
267 if (unlikely(entry == bad_dma_address)) 268 if (unlikely(entry == DMA_ERROR_CODE)) {
268 goto error; 269 printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
270 "iommu %p\n", npages, tbl);
271 return DMA_ERROR_CODE;
272 }
269 273
270 /* set the return dma address */ 274 /* set the return dma address */
271 ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK); 275 ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
@@ -273,13 +277,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
273 /* put the TCEs in the HW table */ 277 /* put the TCEs in the HW table */
274 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, 278 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
275 direction); 279 direction);
276
277 return ret; 280 return ret;
278
279error:
280 printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
281 "iommu %p\n", npages, tbl);
282 return bad_dma_address;
283} 281}
284 282
285static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 283static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
@@ -290,8 +288,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
290 unsigned long flags; 288 unsigned long flags;
291 289
292 /* were we called with bad_dma_address? */ 290 /* were we called with bad_dma_address? */
293 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); 291 badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
294 if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) { 292 if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
295 WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " 293 WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
296 "address 0x%Lx\n", dma_addr); 294 "address 0x%Lx\n", dma_addr);
297 return; 295 return;
@@ -318,13 +316,15 @@ static inline struct iommu_table *find_iommu_table(struct device *dev)
318 316
319 pdev = to_pci_dev(dev); 317 pdev = to_pci_dev(dev);
320 318
319 /* search up the device tree for an iommu */
321 pbus = pdev->bus; 320 pbus = pdev->bus;
322 321 do {
323 /* is the device behind a bridge? Look for the root bus */ 322 tbl = pci_iommu(pbus);
324 while (pbus->parent) 323 if (tbl && tbl->it_busno == pbus->number)
324 break;
325 tbl = NULL;
325 pbus = pbus->parent; 326 pbus = pbus->parent;
326 327 } while (pbus);
327 tbl = pci_iommu(pbus);
328 328
329 BUG_ON(tbl && (tbl->it_busno != pbus->number)); 329 BUG_ON(tbl && (tbl->it_busno != pbus->number));
330 330
@@ -373,7 +373,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
373 npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); 373 npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
374 374
375 entry = iommu_range_alloc(dev, tbl, npages); 375 entry = iommu_range_alloc(dev, tbl, npages);
376 if (entry == bad_dma_address) { 376 if (entry == DMA_ERROR_CODE) {
377 /* makes sure unmap knows to stop */ 377 /* makes sure unmap knows to stop */
378 s->dma_length = 0; 378 s->dma_length = 0;
379 goto error; 379 goto error;
@@ -391,7 +391,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
391error: 391error:
392 calgary_unmap_sg(dev, sg, nelems, dir, NULL); 392 calgary_unmap_sg(dev, sg, nelems, dir, NULL);
393 for_each_sg(sg, s, nelems, i) { 393 for_each_sg(sg, s, nelems, i) {
394 sg->dma_address = bad_dma_address; 394 sg->dma_address = DMA_ERROR_CODE;
395 sg->dma_length = 0; 395 sg->dma_length = 0;
396 } 396 }
397 return 0; 397 return 0;
@@ -446,7 +446,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
446 446
447 /* set up tces to cover the allocated range */ 447 /* set up tces to cover the allocated range */
448 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); 448 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
449 if (mapping == bad_dma_address) 449 if (mapping == DMA_ERROR_CODE)
450 goto free; 450 goto free;
451 *dma_handle = mapping; 451 *dma_handle = mapping;
452 return ret; 452 return ret;
@@ -727,7 +727,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
727 struct iommu_table *tbl = pci_iommu(dev->bus); 727 struct iommu_table *tbl = pci_iommu(dev->bus);
728 728
729 /* reserve EMERGENCY_PAGES from bad_dma_address and up */ 729 /* reserve EMERGENCY_PAGES from bad_dma_address and up */
730 iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES); 730 iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);
731 731
732 /* avoid the BIOS/VGA first 640KB-1MB region */ 732 /* avoid the BIOS/VGA first 640KB-1MB region */
733 /* for CalIOC2 - avoid the entire first MB */ 733 /* for CalIOC2 - avoid the entire first MB */
@@ -1344,6 +1344,23 @@ static void __init get_tce_space_from_tar(void)
1344 return; 1344 return;
1345} 1345}
1346 1346
1347static int __init calgary_iommu_init(void)
1348{
1349 int ret;
1350
1351 /* ok, we're trying to use Calgary - let's roll */
1352 printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
1353
1354 ret = calgary_init();
1355 if (ret) {
1356 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
1357 "falling back to no_iommu\n", ret);
1358 return ret;
1359 }
1360
1361 return 0;
1362}
1363
1347void __init detect_calgary(void) 1364void __init detect_calgary(void)
1348{ 1365{
1349 int bus; 1366 int bus;
@@ -1357,7 +1374,7 @@ void __init detect_calgary(void)
1357 * if the user specified iommu=off or iommu=soft or we found 1374 * if the user specified iommu=off or iommu=soft or we found
1358 * another HW IOMMU already, bail out. 1375 * another HW IOMMU already, bail out.
1359 */ 1376 */
1360 if (swiotlb || no_iommu || iommu_detected) 1377 if (no_iommu || iommu_detected)
1361 return; 1378 return;
1362 1379
1363 if (!use_calgary) 1380 if (!use_calgary)
@@ -1442,9 +1459,7 @@ void __init detect_calgary(void)
1442 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n", 1459 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n",
1443 specified_table_size); 1460 specified_table_size);
1444 1461
1445 /* swiotlb for devices that aren't behind the Calgary. */ 1462 x86_init.iommu.iommu_init = calgary_iommu_init;
1446 if (max_pfn > MAX_DMA32_PFN)
1447 swiotlb = 1;
1448 } 1463 }
1449 return; 1464 return;
1450 1465
@@ -1457,35 +1472,6 @@ cleanup:
1457 } 1472 }
1458} 1473}
1459 1474
1460int __init calgary_iommu_init(void)
1461{
1462 int ret;
1463
1464 if (no_iommu || (swiotlb && !calgary_detected))
1465 return -ENODEV;
1466
1467 if (!calgary_detected)
1468 return -ENODEV;
1469
1470 /* ok, we're trying to use Calgary - let's roll */
1471 printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
1472
1473 ret = calgary_init();
1474 if (ret) {
1475 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
1476 "falling back to no_iommu\n", ret);
1477 return ret;
1478 }
1479
1480 force_iommu = 1;
1481 bad_dma_address = 0x0;
1482 /* dma_ops is set to swiotlb or nommu */
1483 if (!dma_ops)
1484 dma_ops = &nommu_dma_ops;
1485
1486 return 0;
1487}
1488
1489static int __init calgary_parse_options(char *p) 1475static int __init calgary_parse_options(char *p)
1490{ 1476{
1491 unsigned int bridge; 1477 unsigned int bridge;
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index a6e804d16c35..afcc58b69c7c 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -11,10 +11,11 @@
11#include <asm/gart.h> 11#include <asm/gart.h>
12#include <asm/calgary.h> 12#include <asm/calgary.h>
13#include <asm/amd_iommu.h> 13#include <asm/amd_iommu.h>
14#include <asm/x86_init.h>
14 15
15static int forbid_dac __read_mostly; 16static int forbid_dac __read_mostly;
16 17
17struct dma_map_ops *dma_ops; 18struct dma_map_ops *dma_ops = &nommu_dma_ops;
18EXPORT_SYMBOL(dma_ops); 19EXPORT_SYMBOL(dma_ops);
19 20
20static int iommu_sac_force __read_mostly; 21static int iommu_sac_force __read_mostly;
@@ -42,9 +43,6 @@ int iommu_detected __read_mostly = 0;
42 */ 43 */
43int iommu_pass_through __read_mostly; 44int iommu_pass_through __read_mostly;
44 45
45dma_addr_t bad_dma_address __read_mostly = 0;
46EXPORT_SYMBOL(bad_dma_address);
47
48/* Dummy device used for NULL arguments (normally ISA). */ 46/* Dummy device used for NULL arguments (normally ISA). */
49struct device x86_dma_fallback_dev = { 47struct device x86_dma_fallback_dev = {
50 .init_name = "fallback device", 48 .init_name = "fallback device",
@@ -126,20 +124,17 @@ void __init pci_iommu_alloc(void)
126 /* free the range so iommu could get some range less than 4G */ 124 /* free the range so iommu could get some range less than 4G */
127 dma32_free_bootmem(); 125 dma32_free_bootmem();
128#endif 126#endif
127 if (pci_swiotlb_init())
128 return;
129 129
130 /*
131 * The order of these functions is important for
132 * fall-back/fail-over reasons
133 */
134 gart_iommu_hole_init(); 130 gart_iommu_hole_init();
135 131
136 detect_calgary(); 132 detect_calgary();
137 133
138 detect_intel_iommu(); 134 detect_intel_iommu();
139 135
136 /* needs to be called after gart_iommu_hole_init */
140 amd_iommu_detect(); 137 amd_iommu_detect();
141
142 pci_swiotlb_init();
143} 138}
144 139
145void *dma_generic_alloc_coherent(struct device *dev, size_t size, 140void *dma_generic_alloc_coherent(struct device *dev, size_t size,
@@ -214,7 +209,7 @@ static __init int iommu_setup(char *p)
214 if (!strncmp(p, "allowdac", 8)) 209 if (!strncmp(p, "allowdac", 8))
215 forbid_dac = 0; 210 forbid_dac = 0;
216 if (!strncmp(p, "nodac", 5)) 211 if (!strncmp(p, "nodac", 5))
217 forbid_dac = -1; 212 forbid_dac = 1;
218 if (!strncmp(p, "usedac", 6)) { 213 if (!strncmp(p, "usedac", 6)) {
219 forbid_dac = -1; 214 forbid_dac = -1;
220 return 1; 215 return 1;
@@ -289,25 +284,17 @@ static int __init pci_iommu_init(void)
289#ifdef CONFIG_PCI 284#ifdef CONFIG_PCI
290 dma_debug_add_bus(&pci_bus_type); 285 dma_debug_add_bus(&pci_bus_type);
291#endif 286#endif
287 x86_init.iommu.iommu_init();
292 288
293 calgary_iommu_init(); 289 if (swiotlb) {
294 290 printk(KERN_INFO "PCI-DMA: "
295 intel_iommu_init(); 291 "Using software bounce buffering for IO (SWIOTLB)\n");
292 swiotlb_print_info();
293 } else
294 swiotlb_free();
296 295
297 amd_iommu_init();
298
299 gart_iommu_init();
300
301 no_iommu_init();
302 return 0; 296 return 0;
303} 297}
304
305void pci_iommu_shutdown(void)
306{
307 gart_iommu_shutdown();
308
309 amd_iommu_shutdown();
310}
311/* Must execute after PCI subsystem */ 298/* Must execute after PCI subsystem */
312rootfs_initcall(pci_iommu_init); 299rootfs_initcall(pci_iommu_init);
313 300
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index a7f1b64f86e0..e6a0d402f171 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -39,6 +39,7 @@
39#include <asm/swiotlb.h> 39#include <asm/swiotlb.h>
40#include <asm/dma.h> 40#include <asm/dma.h>
41#include <asm/k8.h> 41#include <asm/k8.h>
42#include <asm/x86_init.h>
42 43
43static unsigned long iommu_bus_base; /* GART remapping area (physical) */ 44static unsigned long iommu_bus_base; /* GART remapping area (physical) */
44static unsigned long iommu_size; /* size of remapping area bytes */ 45static unsigned long iommu_size; /* size of remapping area bytes */
@@ -46,6 +47,8 @@ static unsigned long iommu_pages; /* .. and in pages */
46 47
47static u32 *iommu_gatt_base; /* Remapping table */ 48static u32 *iommu_gatt_base; /* Remapping table */
48 49
50static dma_addr_t bad_dma_addr;
51
49/* 52/*
50 * If this is disabled the IOMMU will use an optimized flushing strategy 53 * If this is disabled the IOMMU will use an optimized flushing strategy
51 * of only flushing when an mapping is reused. With it true the GART is 54 * of only flushing when an mapping is reused. With it true the GART is
@@ -92,7 +95,7 @@ static unsigned long alloc_iommu(struct device *dev, int size,
92 95
93 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 96 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
94 PAGE_SIZE) >> PAGE_SHIFT; 97 PAGE_SIZE) >> PAGE_SHIFT;
95 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, 98 boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
96 PAGE_SIZE) >> PAGE_SHIFT; 99 PAGE_SIZE) >> PAGE_SHIFT;
97 100
98 spin_lock_irqsave(&iommu_bitmap_lock, flags); 101 spin_lock_irqsave(&iommu_bitmap_lock, flags);
@@ -216,7 +219,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
216 if (panic_on_overflow) 219 if (panic_on_overflow)
217 panic("dma_map_area overflow %lu bytes\n", size); 220 panic("dma_map_area overflow %lu bytes\n", size);
218 iommu_full(dev, size, dir); 221 iommu_full(dev, size, dir);
219 return bad_dma_address; 222 return bad_dma_addr;
220 } 223 }
221 224
222 for (i = 0; i < npages; i++) { 225 for (i = 0; i < npages; i++) {
@@ -294,7 +297,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
294 int i; 297 int i;
295 298
296#ifdef CONFIG_IOMMU_DEBUG 299#ifdef CONFIG_IOMMU_DEBUG
297 printk(KERN_DEBUG "dma_map_sg overflow\n"); 300 pr_debug("dma_map_sg overflow\n");
298#endif 301#endif
299 302
300 for_each_sg(sg, s, nents, i) { 303 for_each_sg(sg, s, nents, i) {
@@ -302,7 +305,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
302 305
303 if (nonforced_iommu(dev, addr, s->length)) { 306 if (nonforced_iommu(dev, addr, s->length)) {
304 addr = dma_map_area(dev, addr, s->length, dir, 0); 307 addr = dma_map_area(dev, addr, s->length, dir, 0);
305 if (addr == bad_dma_address) { 308 if (addr == bad_dma_addr) {
306 if (i > 0) 309 if (i > 0)
307 gart_unmap_sg(dev, sg, i, dir, NULL); 310 gart_unmap_sg(dev, sg, i, dir, NULL);
308 nents = 0; 311 nents = 0;
@@ -389,12 +392,14 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
389 if (!dev) 392 if (!dev)
390 dev = &x86_dma_fallback_dev; 393 dev = &x86_dma_fallback_dev;
391 394
392 out = 0; 395 out = 0;
393 start = 0; 396 start = 0;
394 start_sg = sgmap = sg; 397 start_sg = sg;
395 seg_size = 0; 398 sgmap = sg;
396 max_seg_size = dma_get_max_seg_size(dev); 399 seg_size = 0;
397 ps = NULL; /* shut up gcc */ 400 max_seg_size = dma_get_max_seg_size(dev);
401 ps = NULL; /* shut up gcc */
402
398 for_each_sg(sg, s, nents, i) { 403 for_each_sg(sg, s, nents, i) {
399 dma_addr_t addr = sg_phys(s); 404 dma_addr_t addr = sg_phys(s);
400 405
@@ -417,11 +422,12 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
417 sgmap, pages, need) < 0) 422 sgmap, pages, need) < 0)
418 goto error; 423 goto error;
419 out++; 424 out++;
420 seg_size = 0; 425
421 sgmap = sg_next(sgmap); 426 seg_size = 0;
422 pages = 0; 427 sgmap = sg_next(sgmap);
423 start = i; 428 pages = 0;
424 start_sg = s; 429 start = i;
430 start_sg = s;
425 } 431 }
426 } 432 }
427 433
@@ -455,7 +461,7 @@ error:
455 461
456 iommu_full(dev, pages << PAGE_SHIFT, dir); 462 iommu_full(dev, pages << PAGE_SHIFT, dir);
457 for_each_sg(sg, s, nents, i) 463 for_each_sg(sg, s, nents, i)
458 s->dma_address = bad_dma_address; 464 s->dma_address = bad_dma_addr;
459 return 0; 465 return 0;
460} 466}
461 467
@@ -479,7 +485,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
479 DMA_BIDIRECTIONAL, align_mask); 485 DMA_BIDIRECTIONAL, align_mask);
480 486
481 flush_gart(); 487 flush_gart();
482 if (paddr != bad_dma_address) { 488 if (paddr != bad_dma_addr) {
483 *dma_addr = paddr; 489 *dma_addr = paddr;
484 return page_address(page); 490 return page_address(page);
485 } 491 }
@@ -499,6 +505,11 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
499 free_pages((unsigned long)vaddr, get_order(size)); 505 free_pages((unsigned long)vaddr, get_order(size));
500} 506}
501 507
508static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
509{
510 return (dma_addr == bad_dma_addr);
511}
512
502static int no_agp; 513static int no_agp;
503 514
504static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 515static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -515,7 +526,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
515 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; 526 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
516 527
517 if (iommu_size < 64*1024*1024) { 528 if (iommu_size < 64*1024*1024) {
518 printk(KERN_WARNING 529 pr_warning(
519 "PCI-DMA: Warning: Small IOMMU %luMB." 530 "PCI-DMA: Warning: Small IOMMU %luMB."
520 " Consider increasing the AGP aperture in BIOS\n", 531 " Consider increasing the AGP aperture in BIOS\n",
521 iommu_size >> 20); 532 iommu_size >> 20);
@@ -570,28 +581,32 @@ void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
570 aperture_alloc = aper_alloc; 581 aperture_alloc = aper_alloc;
571} 582}
572 583
573static int gart_resume(struct sys_device *dev) 584static void gart_fixup_northbridges(struct sys_device *dev)
574{ 585{
575 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n"); 586 int i;
576 587
577 if (fix_up_north_bridges) { 588 if (!fix_up_north_bridges)
578 int i; 589 return;
579 590
580 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n"); 591 pr_info("PCI-DMA: Restoring GART aperture settings\n");
581 592
582 for (i = 0; i < num_k8_northbridges; i++) { 593 for (i = 0; i < num_k8_northbridges; i++) {
583 struct pci_dev *dev = k8_northbridges[i]; 594 struct pci_dev *dev = k8_northbridges[i];
584 595
585 /* 596 /*
586 * Don't enable translations just yet. That is the next 597 * Don't enable translations just yet. That is the next
587 * step. Restore the pre-suspend aperture settings. 598 * step. Restore the pre-suspend aperture settings.
588 */ 599 */
589 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, 600 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1);
590 aperture_order << 1); 601 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
591 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
592 aperture_alloc >> 25);
593 }
594 } 602 }
603}
604
605static int gart_resume(struct sys_device *dev)
606{
607 pr_info("PCI-DMA: Resuming GART IOMMU\n");
608
609 gart_fixup_northbridges(dev);
595 610
596 enable_gart_translations(); 611 enable_gart_translations();
597 612
@@ -604,15 +619,14 @@ static int gart_suspend(struct sys_device *dev, pm_message_t state)
604} 619}
605 620
606static struct sysdev_class gart_sysdev_class = { 621static struct sysdev_class gart_sysdev_class = {
607 .name = "gart", 622 .name = "gart",
608 .suspend = gart_suspend, 623 .suspend = gart_suspend,
609 .resume = gart_resume, 624 .resume = gart_resume,
610 625
611}; 626};
612 627
613static struct sys_device device_gart = { 628static struct sys_device device_gart = {
614 .id = 0, 629 .cls = &gart_sysdev_class,
615 .cls = &gart_sysdev_class,
616}; 630};
617 631
618/* 632/*
@@ -627,7 +641,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
627 void *gatt; 641 void *gatt;
628 int i, error; 642 int i, error;
629 643
630 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); 644 pr_info("PCI-DMA: Disabling AGP.\n");
645
631 aper_size = aper_base = info->aper_size = 0; 646 aper_size = aper_base = info->aper_size = 0;
632 dev = NULL; 647 dev = NULL;
633 for (i = 0; i < num_k8_northbridges; i++) { 648 for (i = 0; i < num_k8_northbridges; i++) {
@@ -645,6 +660,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
645 } 660 }
646 if (!aper_base) 661 if (!aper_base)
647 goto nommu; 662 goto nommu;
663
648 info->aper_base = aper_base; 664 info->aper_base = aper_base;
649 info->aper_size = aper_size >> 20; 665 info->aper_size = aper_size >> 20;
650 666
@@ -667,14 +683,14 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
667 683
668 flush_gart(); 684 flush_gart();
669 685
670 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", 686 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
671 aper_base, aper_size>>10); 687 aper_base, aper_size>>10);
672 688
673 return 0; 689 return 0;
674 690
675 nommu: 691 nommu:
676 /* Should not happen anymore */ 692 /* Should not happen anymore */
677 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n" 693 pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
678 "falling back to iommu=soft.\n"); 694 "falling back to iommu=soft.\n");
679 return -1; 695 return -1;
680} 696}
@@ -686,14 +702,15 @@ static struct dma_map_ops gart_dma_ops = {
686 .unmap_page = gart_unmap_page, 702 .unmap_page = gart_unmap_page,
687 .alloc_coherent = gart_alloc_coherent, 703 .alloc_coherent = gart_alloc_coherent,
688 .free_coherent = gart_free_coherent, 704 .free_coherent = gart_free_coherent,
705 .mapping_error = gart_mapping_error,
689}; 706};
690 707
691void gart_iommu_shutdown(void) 708static void gart_iommu_shutdown(void)
692{ 709{
693 struct pci_dev *dev; 710 struct pci_dev *dev;
694 int i; 711 int i;
695 712
696 if (no_agp && (dma_ops != &gart_dma_ops)) 713 if (no_agp)
697 return; 714 return;
698 715
699 for (i = 0; i < num_k8_northbridges; i++) { 716 for (i = 0; i < num_k8_northbridges; i++) {
@@ -708,7 +725,7 @@ void gart_iommu_shutdown(void)
708 } 725 }
709} 726}
710 727
711void __init gart_iommu_init(void) 728int __init gart_iommu_init(void)
712{ 729{
713 struct agp_kern_info info; 730 struct agp_kern_info info;
714 unsigned long iommu_start; 731 unsigned long iommu_start;
@@ -718,7 +735,7 @@ void __init gart_iommu_init(void)
718 long i; 735 long i;
719 736
720 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) 737 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
721 return; 738 return 0;
722 739
723#ifndef CONFIG_AGP_AMD64 740#ifndef CONFIG_AGP_AMD64
724 no_agp = 1; 741 no_agp = 1;
@@ -730,35 +747,28 @@ void __init gart_iommu_init(void)
730 (agp_copy_info(agp_bridge, &info) < 0); 747 (agp_copy_info(agp_bridge, &info) < 0);
731#endif 748#endif
732 749
733 if (swiotlb)
734 return;
735
736 /* Did we detect a different HW IOMMU? */
737 if (iommu_detected && !gart_iommu_aperture)
738 return;
739
740 if (no_iommu || 750 if (no_iommu ||
741 (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 751 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
742 !gart_iommu_aperture || 752 !gart_iommu_aperture ||
743 (no_agp && init_k8_gatt(&info) < 0)) { 753 (no_agp && init_k8_gatt(&info) < 0)) {
744 if (max_pfn > MAX_DMA32_PFN) { 754 if (max_pfn > MAX_DMA32_PFN) {
745 printk(KERN_WARNING "More than 4GB of memory " 755 pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
746 "but GART IOMMU not available.\n"); 756 pr_warning("falling back to iommu=soft.\n");
747 printk(KERN_WARNING "falling back to iommu=soft.\n");
748 } 757 }
749 return; 758 return 0;
750 } 759 }
751 760
752 /* need to map that range */ 761 /* need to map that range */
753 aper_size = info.aper_size << 20; 762 aper_size = info.aper_size << 20;
754 aper_base = info.aper_base; 763 aper_base = info.aper_base;
755 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); 764 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
765
756 if (end_pfn > max_low_pfn_mapped) { 766 if (end_pfn > max_low_pfn_mapped) {
757 start_pfn = (aper_base>>PAGE_SHIFT); 767 start_pfn = (aper_base>>PAGE_SHIFT);
758 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); 768 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
759 } 769 }
760 770
761 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); 771 pr_info("PCI-DMA: using GART IOMMU.\n");
762 iommu_size = check_iommu_size(info.aper_base, aper_size); 772 iommu_size = check_iommu_size(info.aper_base, aper_size);
763 iommu_pages = iommu_size >> PAGE_SHIFT; 773 iommu_pages = iommu_size >> PAGE_SHIFT;
764 774
@@ -773,8 +783,7 @@ void __init gart_iommu_init(void)
773 783
774 ret = dma_debug_resize_entries(iommu_pages); 784 ret = dma_debug_resize_entries(iommu_pages);
775 if (ret) 785 if (ret)
776 printk(KERN_DEBUG 786 pr_debug("PCI-DMA: Cannot trace all the entries\n");
777 "PCI-DMA: Cannot trace all the entries\n");
778 } 787 }
779#endif 788#endif
780 789
@@ -784,15 +793,14 @@ void __init gart_iommu_init(void)
784 */ 793 */
785 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 794 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
786 795
787 agp_memory_reserved = iommu_size; 796 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
788 printk(KERN_INFO
789 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
790 iommu_size >> 20); 797 iommu_size >> 20);
791 798
792 iommu_start = aper_size - iommu_size; 799 agp_memory_reserved = iommu_size;
793 iommu_bus_base = info.aper_base + iommu_start; 800 iommu_start = aper_size - iommu_size;
794 bad_dma_address = iommu_bus_base; 801 iommu_bus_base = info.aper_base + iommu_start;
795 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); 802 bad_dma_addr = iommu_bus_base;
803 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
796 804
797 /* 805 /*
798 * Unmap the IOMMU part of the GART. The alias of the page is 806 * Unmap the IOMMU part of the GART. The alias of the page is
@@ -814,7 +822,7 @@ void __init gart_iommu_init(void)
814 * the pages as Not-Present: 822 * the pages as Not-Present:
815 */ 823 */
816 wbinvd(); 824 wbinvd();
817 825
818 /* 826 /*
819 * Now all caches are flushed and we can safely enable 827 * Now all caches are flushed and we can safely enable
820 * GART hardware. Doing it early leaves the possibility 828 * GART hardware. Doing it early leaves the possibility
@@ -838,6 +846,10 @@ void __init gart_iommu_init(void)
838 846
839 flush_gart(); 847 flush_gart();
840 dma_ops = &gart_dma_ops; 848 dma_ops = &gart_dma_ops;
849 x86_platform.iommu_shutdown = gart_iommu_shutdown;
850 swiotlb = 0;
851
852 return 0;
841} 853}
842 854
843void __init gart_parse_options(char *p) 855void __init gart_parse_options(char *p)
@@ -856,7 +868,7 @@ void __init gart_parse_options(char *p)
856#endif 868#endif
857 if (isdigit(*p) && get_option(&p, &arg)) 869 if (isdigit(*p) && get_option(&p, &arg))
858 iommu_size = arg; 870 iommu_size = arg;
859 if (!strncmp(p, "fullflush", 8)) 871 if (!strncmp(p, "fullflush", 9))
860 iommu_fullflush = 1; 872 iommu_fullflush = 1;
861 if (!strncmp(p, "nofullflush", 11)) 873 if (!strncmp(p, "nofullflush", 11))
862 iommu_fullflush = 0; 874 iommu_fullflush = 0;
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index a3933d4330cd..22be12b60a8f 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -33,7 +33,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
33 dma_addr_t bus = page_to_phys(page) + offset; 33 dma_addr_t bus = page_to_phys(page) + offset;
34 WARN_ON(size == 0); 34 WARN_ON(size == 0);
35 if (!check_addr("map_single", dev, bus, size)) 35 if (!check_addr("map_single", dev, bus, size))
36 return bad_dma_address; 36 return DMA_ERROR_CODE;
37 flush_write_buffers(); 37 flush_write_buffers();
38 return bus; 38 return bus;
39} 39}
@@ -103,12 +103,3 @@ struct dma_map_ops nommu_dma_ops = {
103 .sync_sg_for_device = nommu_sync_sg_for_device, 103 .sync_sg_for_device = nommu_sync_sg_for_device,
104 .is_phys = 1, 104 .is_phys = 1,
105}; 105};
106
107void __init no_iommu_init(void)
108{
109 if (dma_ops)
110 return;
111
112 force_iommu = 0; /* no HW IOMMU */
113 dma_ops = &nommu_dma_ops;
114}
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index aaa6b7839f1e..e3c0a66b9e77 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -42,18 +42,28 @@ static struct dma_map_ops swiotlb_dma_ops = {
42 .dma_supported = NULL, 42 .dma_supported = NULL,
43}; 43};
44 44
45void __init pci_swiotlb_init(void) 45/*
46 * pci_swiotlb_init - initialize swiotlb if necessary
47 *
48 * This returns non-zero if we are forced to use swiotlb (by the boot
49 * option).
50 */
51int __init pci_swiotlb_init(void)
46{ 52{
53 int use_swiotlb = swiotlb | swiotlb_force;
54
47 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 55 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
48#ifdef CONFIG_X86_64 56#ifdef CONFIG_X86_64
49 if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)) 57 if (!no_iommu && max_pfn > MAX_DMA32_PFN)
50 swiotlb = 1; 58 swiotlb = 1;
51#endif 59#endif
52 if (swiotlb_force) 60 if (swiotlb_force)
53 swiotlb = 1; 61 swiotlb = 1;
62
54 if (swiotlb) { 63 if (swiotlb) {
55 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); 64 swiotlb_init(0);
56 swiotlb_init();
57 dma_ops = &swiotlb_dma_ops; 65 dma_ops = &swiotlb_dma_ops;
58 } 66 }
67
68 return use_swiotlb;
59} 69}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f93078746e00..2b97fc5b124e 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -23,7 +23,7 @@
23# include <linux/ctype.h> 23# include <linux/ctype.h>
24# include <linux/mc146818rtc.h> 24# include <linux/mc146818rtc.h>
25#else 25#else
26# include <asm/iommu.h> 26# include <asm/x86_init.h>
27#endif 27#endif
28 28
29/* 29/*
@@ -622,7 +622,7 @@ void native_machine_shutdown(void)
622#endif 622#endif
623 623
624#ifdef CONFIG_X86_64 624#ifdef CONFIG_X86_64
625 pci_iommu_shutdown(); 625 x86_platform.iommu_shutdown();
626#endif 626#endif
627} 627}
628 628
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 4449a4a2c2ed..d11c5ff7c65e 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -14,10 +14,13 @@
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/irq.h> 15#include <asm/irq.h>
16#include <asm/tsc.h> 16#include <asm/tsc.h>
17#include <asm/iommu.h>
17 18
18void __cpuinit x86_init_noop(void) { } 19void __cpuinit x86_init_noop(void) { }
19void __init x86_init_uint_noop(unsigned int unused) { } 20void __init x86_init_uint_noop(unsigned int unused) { }
20void __init x86_init_pgd_noop(pgd_t *unused) { } 21void __init x86_init_pgd_noop(pgd_t *unused) { }
22int __init iommu_init_noop(void) { return 0; }
23void iommu_shutdown_noop(void) { }
21 24
22/* 25/*
23 * The platform setup functions are preset with the default functions 26 * The platform setup functions are preset with the default functions
@@ -62,6 +65,10 @@ struct x86_init_ops x86_init __initdata = {
62 .tsc_pre_init = x86_init_noop, 65 .tsc_pre_init = x86_init_noop,
63 .timer_init = hpet_time_init, 66 .timer_init = hpet_time_init,
64 }, 67 },
68
69 .iommu = {
70 .iommu_init = iommu_init_noop,
71 },
65}; 72};
66 73
67struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 74struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
@@ -72,4 +79,5 @@ struct x86_platform_ops x86_platform = {
72 .calibrate_tsc = native_calibrate_tsc, 79 .calibrate_tsc = native_calibrate_tsc,
73 .get_wallclock = mach_get_cmos_time, 80 .get_wallclock = mach_get_cmos_time,
74 .set_wallclock = mach_set_rtc_mmss, 81 .set_wallclock = mach_set_rtc_mmss,
82 .iommu_shutdown = iommu_shutdown_noop,
75}; 83};
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index 427fd1b56df5..8565d944f7cf 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -1,12 +1,13 @@
1/* 1/*
2 * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi> 2 * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
3 */ 3 */
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
4#include <linux/module.h> 7#include <linux/module.h>
5#include <linux/io.h> 8#include <linux/io.h>
6#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
7 10
8#define MODULE_NAME "testmmiotrace"
9
10static unsigned long mmio_address; 11static unsigned long mmio_address;
11module_param(mmio_address, ulong, 0); 12module_param(mmio_address, ulong, 0);
12MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB " 13MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
@@ -30,7 +31,7 @@ static unsigned v32(unsigned i)
30static void do_write_test(void __iomem *p) 31static void do_write_test(void __iomem *p)
31{ 32{
32 unsigned int i; 33 unsigned int i;
33 pr_info(MODULE_NAME ": write test.\n"); 34 pr_info("write test.\n");
34 mmiotrace_printk("Write test.\n"); 35 mmiotrace_printk("Write test.\n");
35 36
36 for (i = 0; i < 256; i++) 37 for (i = 0; i < 256; i++)
@@ -47,7 +48,7 @@ static void do_read_test(void __iomem *p)
47{ 48{
48 unsigned int i; 49 unsigned int i;
49 unsigned errs[3] = { 0 }; 50 unsigned errs[3] = { 0 };
50 pr_info(MODULE_NAME ": read test.\n"); 51 pr_info("read test.\n");
51 mmiotrace_printk("Read test.\n"); 52 mmiotrace_printk("Read test.\n");
52 53
53 for (i = 0; i < 256; i++) 54 for (i = 0; i < 256; i++)
@@ -68,7 +69,7 @@ static void do_read_test(void __iomem *p)
68 69
69static void do_read_far_test(void __iomem *p) 70static void do_read_far_test(void __iomem *p)
70{ 71{
71 pr_info(MODULE_NAME ": read far test.\n"); 72 pr_info("read far test.\n");
72 mmiotrace_printk("Read far test.\n"); 73 mmiotrace_printk("Read far test.\n");
73 74
74 ioread32(p + read_far); 75 ioread32(p + read_far);
@@ -78,7 +79,7 @@ static void do_test(unsigned long size)
78{ 79{
79 void __iomem *p = ioremap_nocache(mmio_address, size); 80 void __iomem *p = ioremap_nocache(mmio_address, size);
80 if (!p) { 81 if (!p) {
81 pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); 82 pr_err("could not ioremap, aborting.\n");
82 return; 83 return;
83 } 84 }
84 mmiotrace_printk("ioremap returned %p.\n", p); 85 mmiotrace_printk("ioremap returned %p.\n", p);
@@ -94,24 +95,22 @@ static int __init init(void)
94 unsigned long size = (read_far) ? (8 << 20) : (16 << 10); 95 unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
95 96
96 if (mmio_address == 0) { 97 if (mmio_address == 0) {
97 pr_err(MODULE_NAME ": you have to use the module argument " 98 pr_err("you have to use the module argument mmio_address.\n");
98 "mmio_address.\n"); 99 pr_err("DO NOT LOAD THIS MODULE UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!\n");
99 pr_err(MODULE_NAME ": DO NOT LOAD THIS MODULE UNLESS"
100 " YOU REALLY KNOW WHAT YOU ARE DOING!\n");
101 return -ENXIO; 100 return -ENXIO;
102 } 101 }
103 102
104 pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI " 103 pr_warning("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, "
105 "address space, and writing 16 kB of rubbish in there.\n", 104 "and writing 16 kB of rubbish in there.\n",
106 size >> 10, mmio_address); 105 size >> 10, mmio_address);
107 do_test(size); 106 do_test(size);
108 pr_info(MODULE_NAME ": All done.\n"); 107 pr_info("All done.\n");
109 return 0; 108 return 0;
110} 109}
111 110
112static void __exit cleanup(void) 111static void __exit cleanup(void)
113{ 112{
114 pr_debug(MODULE_NAME ": unloaded.\n"); 113 pr_debug("unloaded.\n");
115} 114}
116 115
117module_init(init); 116module_init(init);
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index dc99e26f8e5b..1b392c9e8531 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -177,9 +177,6 @@ static struct ata_port_operations pcmcia_8bit_port_ops = {
177 .drain_fifo = pcmcia_8bit_drain_fifo, 177 .drain_fifo = pcmcia_8bit_drain_fifo,
178}; 178};
179 179
180#define CS_CHECK(fn, ret) \
181do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
182
183 180
184struct pcmcia_config_check { 181struct pcmcia_config_check {
185 unsigned long ctl_base; 182 unsigned long ctl_base;
@@ -252,7 +249,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
252 struct ata_port *ap; 249 struct ata_port *ap;
253 struct ata_pcmcia_info *info; 250 struct ata_pcmcia_info *info;
254 struct pcmcia_config_check *stk = NULL; 251 struct pcmcia_config_check *stk = NULL;
255 int last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM, p; 252 int is_kme = 0, ret = -ENOMEM, p;
256 unsigned long io_base, ctl_base; 253 unsigned long io_base, ctl_base;
257 void __iomem *io_addr, *ctl_addr; 254 void __iomem *io_addr, *ctl_addr;
258 int n_ports = 1; 255 int n_ports = 1;
@@ -271,7 +268,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
271 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 268 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
272 pdev->io.IOAddrLines = 3; 269 pdev->io.IOAddrLines = 3;
273 pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 270 pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
274 pdev->irq.IRQInfo1 = IRQ_LEVEL_ID;
275 pdev->conf.Attributes = CONF_ENABLE_IRQ; 271 pdev->conf.Attributes = CONF_ENABLE_IRQ;
276 pdev->conf.IntType = INT_MEMORY_AND_IO; 272 pdev->conf.IntType = INT_MEMORY_AND_IO;
277 273
@@ -296,8 +292,13 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
296 } 292 }
297 io_base = pdev->io.BasePort1; 293 io_base = pdev->io.BasePort1;
298 ctl_base = stk->ctl_base; 294 ctl_base = stk->ctl_base;
299 CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq)); 295 ret = pcmcia_request_irq(pdev, &pdev->irq);
300 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf)); 296 if (ret)
297 goto failed;
298
299 ret = pcmcia_request_configuration(pdev, &pdev->conf);
300 if (ret)
301 goto failed;
301 302
302 /* iomap */ 303 /* iomap */
303 ret = -ENOMEM; 304 ret = -ENOMEM;
@@ -351,8 +352,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
351 kfree(stk); 352 kfree(stk);
352 return 0; 353 return 0;
353 354
354cs_failed:
355 cs_error(pdev, last_fn, last_ret);
356failed: 355failed:
357 kfree(stk); 356 kfree(stk);
358 info->ndev = 0; 357 info->ndev = 0;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 965ece2c7e4d..13bb69d2abb3 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -735,6 +735,21 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
735 part_stat_unlock(); 735 part_stat_unlock();
736} 736}
737 737
738/*
739 * Ensure we don't create aliases in VI caches
740 */
741static inline void
742killalias(struct bio *bio)
743{
744 struct bio_vec *bv;
745 int i;
746
747 if (bio_data_dir(bio) == READ)
748 __bio_for_each_segment(bv, bio, i, 0) {
749 flush_dcache_page(bv->bv_page);
750 }
751}
752
738void 753void
739aoecmd_ata_rsp(struct sk_buff *skb) 754aoecmd_ata_rsp(struct sk_buff *skb)
740{ 755{
@@ -853,8 +868,12 @@ aoecmd_ata_rsp(struct sk_buff *skb)
853 868
854 if (buf && --buf->nframesout == 0 && buf->resid == 0) { 869 if (buf && --buf->nframesout == 0 && buf->resid == 0) {
855 diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector); 870 diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector);
856 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; 871 if (buf->flags & BUFFL_FAIL)
857 bio_endio(buf->bio, n); 872 bio_endio(buf->bio, -EIO);
873 else {
874 killalias(buf->bio);
875 bio_endio(buf->bio, 0);
876 }
858 mempool_free(buf, d->bufpool); 877 mempool_free(buf, d->bufpool);
859 } 878 }
860 879
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index b0e569ba730d..2acdc605cb4b 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -867,11 +867,9 @@ static int bluecard_probe(struct pcmcia_device *link)
867 867
868 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 868 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
869 link->io.NumPorts1 = 8; 869 link->io.NumPorts1 = 8;
870 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 870 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
871 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
872 871
873 link->irq.Handler = bluecard_interrupt; 872 link->irq.Handler = bluecard_interrupt;
874 link->irq.Instance = info;
875 873
876 link->conf.Attributes = CONF_ENABLE_IRQ; 874 link->conf.Attributes = CONF_ENABLE_IRQ;
877 link->conf.IntType = INT_MEMORY_AND_IO; 875 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -905,22 +903,16 @@ static int bluecard_config(struct pcmcia_device *link)
905 break; 903 break;
906 } 904 }
907 905
908 if (i != 0) { 906 if (i != 0)
909 cs_error(link, RequestIO, i);
910 goto failed; 907 goto failed;
911 }
912 908
913 i = pcmcia_request_irq(link, &link->irq); 909 i = pcmcia_request_irq(link, &link->irq);
914 if (i != 0) { 910 if (i != 0)
915 cs_error(link, RequestIRQ, i);
916 link->irq.AssignedIRQ = 0; 911 link->irq.AssignedIRQ = 0;
917 }
918 912
919 i = pcmcia_request_configuration(link, &link->conf); 913 i = pcmcia_request_configuration(link, &link->conf);
920 if (i != 0) { 914 if (i != 0)
921 cs_error(link, RequestConfiguration, i);
922 goto failed; 915 goto failed;
923 }
924 916
925 if (bluecard_open(info) != 0) 917 if (bluecard_open(info) != 0)
926 goto failed; 918 goto failed;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index d58e22b9f06a..d814a2755ccb 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -659,11 +659,9 @@ static int bt3c_probe(struct pcmcia_device *link)
659 659
660 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 660 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
661 link->io.NumPorts1 = 8; 661 link->io.NumPorts1 = 8;
662 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 662 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
663 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
664 663
665 link->irq.Handler = bt3c_interrupt; 664 link->irq.Handler = bt3c_interrupt;
666 link->irq.Instance = info;
667 665
668 link->conf.Attributes = CONF_ENABLE_IRQ; 666 link->conf.Attributes = CONF_ENABLE_IRQ;
669 link->conf.IntType = INT_MEMORY_AND_IO; 667 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -740,21 +738,16 @@ static int bt3c_config(struct pcmcia_device *link)
740 goto found_port; 738 goto found_port;
741 739
742 BT_ERR("No usable port range found"); 740 BT_ERR("No usable port range found");
743 cs_error(link, RequestIO, -ENODEV);
744 goto failed; 741 goto failed;
745 742
746found_port: 743found_port:
747 i = pcmcia_request_irq(link, &link->irq); 744 i = pcmcia_request_irq(link, &link->irq);
748 if (i != 0) { 745 if (i != 0)
749 cs_error(link, RequestIRQ, i);
750 link->irq.AssignedIRQ = 0; 746 link->irq.AssignedIRQ = 0;
751 }
752 747
753 i = pcmcia_request_configuration(link, &link->conf); 748 i = pcmcia_request_configuration(link, &link->conf);
754 if (i != 0) { 749 if (i != 0)
755 cs_error(link, RequestConfiguration, i);
756 goto failed; 750 goto failed;
757 }
758 751
759 if (bt3c_open(info) != 0) 752 if (bt3c_open(info) != 0)
760 goto failed; 753 goto failed;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index efd689a062eb..d339464dc15e 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -588,11 +588,9 @@ static int btuart_probe(struct pcmcia_device *link)
588 588
589 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 589 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
590 link->io.NumPorts1 = 8; 590 link->io.NumPorts1 = 8;
591 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 591 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
592 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
593 592
594 link->irq.Handler = btuart_interrupt; 593 link->irq.Handler = btuart_interrupt;
595 link->irq.Instance = info;
596 594
597 link->conf.Attributes = CONF_ENABLE_IRQ; 595 link->conf.Attributes = CONF_ENABLE_IRQ;
598 link->conf.IntType = INT_MEMORY_AND_IO; 596 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -669,21 +667,16 @@ static int btuart_config(struct pcmcia_device *link)
669 goto found_port; 667 goto found_port;
670 668
671 BT_ERR("No usable port range found"); 669 BT_ERR("No usable port range found");
672 cs_error(link, RequestIO, -ENODEV);
673 goto failed; 670 goto failed;
674 671
675found_port: 672found_port:
676 i = pcmcia_request_irq(link, &link->irq); 673 i = pcmcia_request_irq(link, &link->irq);
677 if (i != 0) { 674 if (i != 0)
678 cs_error(link, RequestIRQ, i);
679 link->irq.AssignedIRQ = 0; 675 link->irq.AssignedIRQ = 0;
680 }
681 676
682 i = pcmcia_request_configuration(link, &link->conf); 677 i = pcmcia_request_configuration(link, &link->conf);
683 if (i != 0) { 678 if (i != 0)
684 cs_error(link, RequestConfiguration, i);
685 goto failed; 679 goto failed;
686 }
687 680
688 if (btuart_open(info) != 0) 681 if (btuart_open(info) != 0)
689 goto failed; 682 goto failed;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index b881a9cd8741..4f02a6f3c980 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -573,11 +573,9 @@ static int dtl1_probe(struct pcmcia_device *link)
573 573
574 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 574 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
575 link->io.NumPorts1 = 8; 575 link->io.NumPorts1 = 8;
576 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 576 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
577 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
578 577
579 link->irq.Handler = dtl1_interrupt; 578 link->irq.Handler = dtl1_interrupt;
580 link->irq.Instance = info;
581 579
582 link->conf.Attributes = CONF_ENABLE_IRQ; 580 link->conf.Attributes = CONF_ENABLE_IRQ;
583 link->conf.IntType = INT_MEMORY_AND_IO; 581 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -622,16 +620,12 @@ static int dtl1_config(struct pcmcia_device *link)
622 goto failed; 620 goto failed;
623 621
624 i = pcmcia_request_irq(link, &link->irq); 622 i = pcmcia_request_irq(link, &link->irq);
625 if (i != 0) { 623 if (i != 0)
626 cs_error(link, RequestIRQ, i);
627 link->irq.AssignedIRQ = 0; 624 link->irq.AssignedIRQ = 0;
628 }
629 625
630 i = pcmcia_request_configuration(link, &link->conf); 626 i = pcmcia_request_configuration(link, &link->conf);
631 if (i != 0) { 627 if (i != 0)
632 cs_error(link, RequestConfiguration, i);
633 goto failed; 628 goto failed;
634 }
635 629
636 if (dtl1_open(info) != 0) 630 if (dtl1_open(info) != 0)
637 goto failed; 631 goto failed;
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index ccb1fa89de29..2fb3a480f6b0 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -56,9 +56,8 @@ config AGP_AMD
56 X on AMD Irongate, 761, and 762 chipsets. 56 X on AMD Irongate, 761, and 762 chipsets.
57 57
58config AGP_AMD64 58config AGP_AMD64
59 tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU 59 tristate "AMD Opteron/Athlon64 on-CPU GART support"
60 depends on AGP && X86 60 depends on AGP && X86
61 default y if GART_IOMMU
62 help 61 help
63 This option gives you AGP support for the GLX component of 62 This option gives you AGP support for the GLX component of
64 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. 63 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index c250a31efa53..2db4c0a29b05 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -23,8 +23,6 @@
23 * All rights reserved. Licensed under dual BSD/GPL license. 23 * All rights reserved. Licensed under dual BSD/GPL license.
24 */ 24 */
25 25
26/* #define PCMCIA_DEBUG 6 */
27
28#include <linux/kernel.h> 26#include <linux/kernel.h>
29#include <linux/module.h> 27#include <linux/module.h>
30#include <linux/slab.h> 28#include <linux/slab.h>
@@ -47,18 +45,17 @@
47 45
48/* #define ATR_CSUM */ 46/* #define ATR_CSUM */
49 47
50#ifdef PCMCIA_DEBUG 48#define reader_to_dev(x) (&x->p_dev->dev)
51#define reader_to_dev(x) (&handle_to_dev(x->p_dev)) 49
52static int pc_debug = PCMCIA_DEBUG; 50/* n (debug level) is ignored */
53module_param(pc_debug, int, 0600); 51/* additional debug output may be enabled by re-compiling with
54#define DEBUGP(n, rdr, x, args...) do { \ 52 * CM4000_DEBUG set */
55 if (pc_debug >= (n)) \ 53/* #define CM4000_DEBUG */
56 dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ 54#define DEBUGP(n, rdr, x, args...) do { \
57 __func__ , ## args); \ 55 dev_dbg(reader_to_dev(rdr), "%s:" x, \
56 __func__ , ## args); \
58 } while (0) 57 } while (0)
59#else 58
60#define DEBUGP(n, rdr, x, args...)
61#endif
62static char *version = "cm4000_cs.c v2.4.0gm6 - All bugs added by Harald Welte"; 59static char *version = "cm4000_cs.c v2.4.0gm6 - All bugs added by Harald Welte";
63 60
64#define T_1SEC (HZ) 61#define T_1SEC (HZ)
@@ -174,14 +171,13 @@ static unsigned char fi_di_table[10][14] = {
174/* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9} 171/* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9}
175}; 172};
176 173
177#ifndef PCMCIA_DEBUG 174#ifndef CM4000_DEBUG
178#define xoutb outb 175#define xoutb outb
179#define xinb inb 176#define xinb inb
180#else 177#else
181static inline void xoutb(unsigned char val, unsigned short port) 178static inline void xoutb(unsigned char val, unsigned short port)
182{ 179{
183 if (pc_debug >= 7) 180 pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
184 printk(KERN_DEBUG "outb(val=%.2x,port=%.4x)\n", val, port);
185 outb(val, port); 181 outb(val, port);
186} 182}
187static inline unsigned char xinb(unsigned short port) 183static inline unsigned char xinb(unsigned short port)
@@ -189,8 +185,7 @@ static inline unsigned char xinb(unsigned short port)
189 unsigned char val; 185 unsigned char val;
190 186
191 val = inb(port); 187 val = inb(port);
192 if (pc_debug >= 7) 188 pr_debug("%.2x=inb(%.4x)\n", val, port);
193 printk(KERN_DEBUG "%.2x=inb(%.4x)\n", val, port);
194 189
195 return val; 190 return val;
196} 191}
@@ -514,12 +509,10 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
514 for (i = 0; i < 4; i++) { 509 for (i = 0; i < 4; i++) {
515 xoutb(i, REG_BUF_ADDR(iobase)); 510 xoutb(i, REG_BUF_ADDR(iobase));
516 xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */ 511 xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */
517#ifdef PCMCIA_DEBUG 512#ifdef CM4000_DEBUG
518 if (pc_debug >= 5) 513 pr_debug("0x%.2x ", dev->pts[i]);
519 printk("0x%.2x ", dev->pts[i]);
520 } 514 }
521 if (pc_debug >= 5) 515 pr_debug("\n");
522 printk("\n");
523#else 516#else
524 } 517 }
525#endif 518#endif
@@ -579,14 +572,13 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
579 pts_reply[i] = inb(REG_BUF_DATA(iobase)); 572 pts_reply[i] = inb(REG_BUF_DATA(iobase));
580 } 573 }
581 574
582#ifdef PCMCIA_DEBUG 575#ifdef CM4000_DEBUG
583 DEBUGP(2, dev, "PTSreply: "); 576 DEBUGP(2, dev, "PTSreply: ");
584 for (i = 0; i < num_bytes_read; i++) { 577 for (i = 0; i < num_bytes_read; i++) {
585 if (pc_debug >= 5) 578 pr_debug("0x%.2x ", pts_reply[i]);
586 printk("0x%.2x ", pts_reply[i]);
587 } 579 }
588 printk("\n"); 580 pr_debug("\n");
589#endif /* PCMCIA_DEBUG */ 581#endif /* CM4000_DEBUG */
590 582
591 DEBUGP(5, dev, "Clear Tactive in Flags1\n"); 583 DEBUGP(5, dev, "Clear Tactive in Flags1\n");
592 xoutb(0x20, REG_FLAGS1(iobase)); 584 xoutb(0x20, REG_FLAGS1(iobase));
@@ -655,7 +647,7 @@ static void terminate_monitor(struct cm4000_dev *dev)
655 647
656 DEBUGP(5, dev, "Delete timer\n"); 648 DEBUGP(5, dev, "Delete timer\n");
657 del_timer_sync(&dev->timer); 649 del_timer_sync(&dev->timer);
658#ifdef PCMCIA_DEBUG 650#ifdef CM4000_DEBUG
659 dev->monitor_running = 0; 651 dev->monitor_running = 0;
660#endif 652#endif
661 653
@@ -898,7 +890,7 @@ static void monitor_card(unsigned long p)
898 DEBUGP(4, dev, "ATR checksum (0x%.2x, should " 890 DEBUGP(4, dev, "ATR checksum (0x%.2x, should "
899 "be zero) failed\n", dev->atr_csum); 891 "be zero) failed\n", dev->atr_csum);
900 } 892 }
901#ifdef PCMCIA_DEBUG 893#ifdef CM4000_DEBUG
902 else if (test_bit(IS_BAD_LENGTH, &dev->flags)) { 894 else if (test_bit(IS_BAD_LENGTH, &dev->flags)) {
903 DEBUGP(4, dev, "ATR length error\n"); 895 DEBUGP(4, dev, "ATR length error\n");
904 } else { 896 } else {
@@ -1415,7 +1407,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1415 int size; 1407 int size;
1416 int rc; 1408 int rc;
1417 void __user *argp = (void __user *)arg; 1409 void __user *argp = (void __user *)arg;
1418#ifdef PCMCIA_DEBUG 1410#ifdef CM4000_DEBUG
1419 char *ioctl_names[CM_IOC_MAXNR + 1] = { 1411 char *ioctl_names[CM_IOC_MAXNR + 1] = {
1420 [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS", 1412 [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS",
1421 [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR", 1413 [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR",
@@ -1423,9 +1415,9 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1423 [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS", 1415 [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS",
1424 [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL", 1416 [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL",
1425 }; 1417 };
1426#endif
1427 DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode), 1418 DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode),
1428 iminor(inode), ioctl_names[_IOC_NR(cmd)]); 1419 iminor(inode), ioctl_names[_IOC_NR(cmd)]);
1420#endif
1429 1421
1430 lock_kernel(); 1422 lock_kernel();
1431 rc = -ENODEV; 1423 rc = -ENODEV;
@@ -1523,7 +1515,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1523 } 1515 }
1524 case CM_IOCARDOFF: 1516 case CM_IOCARDOFF:
1525 1517
1526#ifdef PCMCIA_DEBUG 1518#ifdef CM4000_DEBUG
1527 DEBUGP(4, dev, "... in CM_IOCARDOFF\n"); 1519 DEBUGP(4, dev, "... in CM_IOCARDOFF\n");
1528 if (dev->flags0 & 0x01) { 1520 if (dev->flags0 & 0x01) {
1529 DEBUGP(4, dev, " Card inserted\n"); 1521 DEBUGP(4, dev, " Card inserted\n");
@@ -1625,18 +1617,9 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1625 1617
1626 } 1618 }
1627 break; 1619 break;
1628#ifdef PCMCIA_DEBUG 1620#ifdef CM4000_DEBUG
1629 case CM_IOSDBGLVL: /* set debug log level */ 1621 case CM_IOSDBGLVL:
1630 { 1622 rc = -ENOTTY;
1631 int old_pc_debug = 0;
1632
1633 old_pc_debug = pc_debug;
1634 if (copy_from_user(&pc_debug, argp, sizeof(int)))
1635 rc = -EFAULT;
1636 else if (old_pc_debug != pc_debug)
1637 DEBUGP(0, dev, "Changed debug log level "
1638 "to %i\n", pc_debug);
1639 }
1640 break; 1623 break;
1641#endif 1624#endif
1642 default: 1625 default:
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 4f0723b07974..a6a70e476bea 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -17,8 +17,6 @@
17 * All rights reserved, Dual BSD/GPL Licensed. 17 * All rights reserved, Dual BSD/GPL Licensed.
18 */ 18 */
19 19
20/* #define PCMCIA_DEBUG 6 */
21
22#include <linux/kernel.h> 20#include <linux/kernel.h>
23#include <linux/module.h> 21#include <linux/module.h>
24#include <linux/slab.h> 22#include <linux/slab.h>
@@ -41,18 +39,16 @@
41#include "cm4040_cs.h" 39#include "cm4040_cs.h"
42 40
43 41
44#ifdef PCMCIA_DEBUG 42#define reader_to_dev(x) (&x->p_dev->dev)
45#define reader_to_dev(x) (&handle_to_dev(x->p_dev)) 43
46static int pc_debug = PCMCIA_DEBUG; 44/* n (debug level) is ignored */
47module_param(pc_debug, int, 0600); 45/* additional debug output may be enabled by re-compiling with
48#define DEBUGP(n, rdr, x, args...) do { \ 46 * CM4040_DEBUG set */
49 if (pc_debug >= (n)) \ 47/* #define CM4040_DEBUG */
50 dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ 48#define DEBUGP(n, rdr, x, args...) do { \
51 __func__ , ##args); \ 49 dev_dbg(reader_to_dev(rdr), "%s:" x, \
50 __func__ , ## args); \
52 } while (0) 51 } while (0)
53#else
54#define DEBUGP(n, rdr, x, args...)
55#endif
56 52
57static char *version = 53static char *version =
58"OMNIKEY CardMan 4040 v1.1.0gm5 - All bugs added by Harald Welte"; 54"OMNIKEY CardMan 4040 v1.1.0gm5 - All bugs added by Harald Welte";
@@ -90,14 +86,13 @@ struct reader_dev {
90 86
91static struct pcmcia_device *dev_table[CM_MAX_DEV]; 87static struct pcmcia_device *dev_table[CM_MAX_DEV];
92 88
93#ifndef PCMCIA_DEBUG 89#ifndef CM4040_DEBUG
94#define xoutb outb 90#define xoutb outb
95#define xinb inb 91#define xinb inb
96#else 92#else
97static inline void xoutb(unsigned char val, unsigned short port) 93static inline void xoutb(unsigned char val, unsigned short port)
98{ 94{
99 if (pc_debug >= 7) 95 pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
100 printk(KERN_DEBUG "outb(val=%.2x,port=%.4x)\n", val, port);
101 outb(val, port); 96 outb(val, port);
102} 97}
103 98
@@ -106,8 +101,7 @@ static inline unsigned char xinb(unsigned short port)
106 unsigned char val; 101 unsigned char val;
107 102
108 val = inb(port); 103 val = inb(port);
109 if (pc_debug >= 7) 104 pr_debug("%.2x=inb(%.4x)\n", val, port);
110 printk(KERN_DEBUG "%.2x=inb(%.4x)\n", val, port);
111 return val; 105 return val;
112} 106}
113#endif 107#endif
@@ -260,23 +254,22 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
260 return -EIO; 254 return -EIO;
261 } 255 }
262 dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN); 256 dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN);
263#ifdef PCMCIA_DEBUG 257#ifdef CM4040_DEBUG
264 if (pc_debug >= 6) 258 pr_debug("%lu:%2x ", i, dev->r_buf[i]);
265 printk(KERN_DEBUG "%lu:%2x ", i, dev->r_buf[i]);
266 } 259 }
267 printk("\n"); 260 pr_debug("\n");
268#else 261#else
269 } 262 }
270#endif 263#endif
271 264
272 bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]); 265 bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]);
273 266
274 DEBUGP(6, dev, "BytesToRead=%lu\n", bytes_to_read); 267 DEBUGP(6, dev, "BytesToRead=%zu\n", bytes_to_read);
275 268
276 min_bytes_to_read = min(count, bytes_to_read + 5); 269 min_bytes_to_read = min(count, bytes_to_read + 5);
277 min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE); 270 min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE);
278 271
279 DEBUGP(6, dev, "Min=%lu\n", min_bytes_to_read); 272 DEBUGP(6, dev, "Min=%zu\n", min_bytes_to_read);
280 273
281 for (i = 0; i < (min_bytes_to_read-5); i++) { 274 for (i = 0; i < (min_bytes_to_read-5); i++) {
282 rc = wait_for_bulk_in_ready(dev); 275 rc = wait_for_bulk_in_ready(dev);
@@ -288,11 +281,10 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
288 return -EIO; 281 return -EIO;
289 } 282 }
290 dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN); 283 dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN);
291#ifdef PCMCIA_DEBUG 284#ifdef CM4040_DEBUG
292 if (pc_debug >= 6) 285 pr_debug("%lu:%2x ", i, dev->r_buf[i]);
293 printk(KERN_DEBUG "%lu:%2x ", i, dev->r_buf[i]);
294 } 286 }
295 printk("\n"); 287 pr_debug("\n");
296#else 288#else
297 } 289 }
298#endif 290#endif
@@ -547,7 +539,7 @@ static int cm4040_config_check(struct pcmcia_device *p_dev,
547 p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK; 539 p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK;
548 540
549 rc = pcmcia_request_io(p_dev, &p_dev->io); 541 rc = pcmcia_request_io(p_dev, &p_dev->io);
550 dev_printk(KERN_INFO, &handle_to_dev(p_dev), 542 dev_printk(KERN_INFO, &p_dev->dev,
551 "pcmcia_request_io returned 0x%x\n", rc); 543 "pcmcia_request_io returned 0x%x\n", rc);
552 return rc; 544 return rc;
553} 545}
@@ -569,7 +561,7 @@ static int reader_config(struct pcmcia_device *link, int devno)
569 561
570 fail_rc = pcmcia_request_configuration(link, &link->conf); 562 fail_rc = pcmcia_request_configuration(link, &link->conf);
571 if (fail_rc != 0) { 563 if (fail_rc != 0) {
572 dev_printk(KERN_INFO, &handle_to_dev(link), 564 dev_printk(KERN_INFO, &link->dev,
573 "pcmcia_request_configuration failed 0x%x\n", 565 "pcmcia_request_configuration failed 0x%x\n",
574 fail_rc); 566 fail_rc);
575 goto cs_release; 567 goto cs_release;
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c
index 4c1820cad712..99cffdab1056 100644
--- a/drivers/char/pcmcia/ipwireless/hardware.c
+++ b/drivers/char/pcmcia/ipwireless/hardware.c
@@ -1213,12 +1213,12 @@ static irqreturn_t ipwireless_handle_v2_v3_interrupt(int irq,
1213 1213
1214irqreturn_t ipwireless_interrupt(int irq, void *dev_id) 1214irqreturn_t ipwireless_interrupt(int irq, void *dev_id)
1215{ 1215{
1216 struct ipw_hardware *hw = dev_id; 1216 struct ipw_dev *ipw = dev_id;
1217 1217
1218 if (hw->hw_version == HW_VERSION_1) 1218 if (ipw->hardware->hw_version == HW_VERSION_1)
1219 return ipwireless_handle_v1_interrupt(irq, hw); 1219 return ipwireless_handle_v1_interrupt(irq, ipw->hardware);
1220 else 1220 else
1221 return ipwireless_handle_v2_v3_interrupt(irq, hw); 1221 return ipwireless_handle_v2_v3_interrupt(irq, ipw->hardware);
1222} 1222}
1223 1223
1224static void flush_packets_to_hw(struct ipw_hardware *hw) 1224static void flush_packets_to_hw(struct ipw_hardware *hw)
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 5216fce0c62d..dff24dae1485 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -65,10 +65,7 @@ static void signalled_reboot_work(struct work_struct *work_reboot)
65 struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev, 65 struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev,
66 work_reboot); 66 work_reboot);
67 struct pcmcia_device *link = ipw->link; 67 struct pcmcia_device *link = ipw->link;
68 int ret = pcmcia_reset_card(link->socket); 68 pcmcia_reset_card(link->socket);
69
70 if (ret != 0)
71 cs_error(link, ResetCard, ret);
72} 69}
73 70
74static void signalled_reboot_callback(void *callback_data) 71static void signalled_reboot_callback(void *callback_data)
@@ -79,208 +76,127 @@ static void signalled_reboot_callback(void *callback_data)
79 schedule_work(&ipw->work_reboot); 76 schedule_work(&ipw->work_reboot);
80} 77}
81 78
82static int config_ipwireless(struct ipw_dev *ipw) 79static int ipwireless_probe(struct pcmcia_device *p_dev,
80 cistpl_cftable_entry_t *cfg,
81 cistpl_cftable_entry_t *dflt,
82 unsigned int vcc,
83 void *priv_data)
83{ 84{
84 struct pcmcia_device *link = ipw->link; 85 struct ipw_dev *ipw = priv_data;
85 int ret; 86 struct resource *io_resource;
86 tuple_t tuple;
87 unsigned short buf[64];
88 cisparse_t parse;
89 unsigned short cor_value;
90 memreq_t memreq_attr_memory; 87 memreq_t memreq_attr_memory;
91 memreq_t memreq_common_memory; 88 memreq_t memreq_common_memory;
89 int ret;
92 90
93 ipw->is_v2_card = 0; 91 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
94 92 p_dev->io.BasePort1 = cfg->io.win[0].base;
95 tuple.Attributes = 0; 93 p_dev->io.NumPorts1 = cfg->io.win[0].len;
96 tuple.TupleData = (cisdata_t *) buf; 94 p_dev->io.IOAddrLines = 16;
97 tuple.TupleDataMax = sizeof(buf);
98 tuple.TupleOffset = 0;
99
100 tuple.DesiredTuple = RETURN_FIRST_TUPLE;
101
102 ret = pcmcia_get_first_tuple(link, &tuple);
103
104 while (ret == 0) {
105 ret = pcmcia_get_tuple_data(link, &tuple);
106
107 if (ret != 0) {
108 cs_error(link, GetTupleData, ret);
109 goto exit0;
110 }
111 ret = pcmcia_get_next_tuple(link, &tuple);
112 }
113
114 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
115
116 ret = pcmcia_get_first_tuple(link, &tuple);
117
118 if (ret != 0) {
119 cs_error(link, GetFirstTuple, ret);
120 goto exit0;
121 }
122
123 ret = pcmcia_get_tuple_data(link, &tuple);
124
125 if (ret != 0) {
126 cs_error(link, GetTupleData, ret);
127 goto exit0;
128 }
129
130 ret = pcmcia_parse_tuple(&tuple, &parse);
131
132 if (ret != 0) {
133 cs_error(link, ParseTuple, ret);
134 goto exit0;
135 }
136
137 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
138 link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
139 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
140 link->io.IOAddrLines = 16;
141
142 link->irq.IRQInfo1 = parse.cftable_entry.irq.IRQInfo1;
143 95
144 /* 0x40 causes it to generate level mode interrupts. */ 96 /* 0x40 causes it to generate level mode interrupts. */
145 /* 0x04 enables IREQ pin. */ 97 /* 0x04 enables IREQ pin. */
146 cor_value = parse.cftable_entry.index | 0x44; 98 p_dev->conf.ConfigIndex = cfg->index | 0x44;
147 link->conf.ConfigIndex = cor_value; 99 ret = pcmcia_request_io(p_dev, &p_dev->io);
100 if (ret)
101 return ret;
148 102
149 /* IRQ and I/O settings */ 103 io_resource = request_region(p_dev->io.BasePort1, p_dev->io.NumPorts1,
150 tuple.DesiredTuple = CISTPL_CONFIG; 104 IPWIRELESS_PCCARD_NAME);
151 105
152 ret = pcmcia_get_first_tuple(link, &tuple); 106 if (cfg->mem.nwin == 0)
107 return 0;
153 108
154 if (ret != 0) { 109 ipw->request_common_memory.Attributes =
155 cs_error(link, GetFirstTuple, ret); 110 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
156 goto exit0; 111 ipw->request_common_memory.Base = cfg->mem.win[0].host_addr;
157 } 112 ipw->request_common_memory.Size = cfg->mem.win[0].len;
113 if (ipw->request_common_memory.Size < 0x1000)
114 ipw->request_common_memory.Size = 0x1000;
115 ipw->request_common_memory.AccessSpeed = 0;
158 116
159 ret = pcmcia_get_tuple_data(link, &tuple); 117 ret = pcmcia_request_window(p_dev, &ipw->request_common_memory,
160 118 &ipw->handle_common_memory);
161 if (ret != 0) {
162 cs_error(link, GetTupleData, ret);
163 goto exit0;
164 }
165 119
166 ret = pcmcia_parse_tuple(&tuple, &parse); 120 if (ret != 0)
121 goto exit1;
167 122
168 if (ret != 0) { 123 memreq_common_memory.CardOffset = cfg->mem.win[0].card_addr;
169 cs_error(link, GetTupleData, ret); 124 memreq_common_memory.Page = 0;
170 goto exit0;
171 }
172 link->conf.Attributes = CONF_ENABLE_IRQ;
173 link->conf.ConfigBase = parse.config.base;
174 link->conf.Present = parse.config.rmask[0];
175 link->conf.IntType = INT_MEMORY_AND_IO;
176 125
177 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 126 ret = pcmcia_map_mem_page(p_dev, ipw->handle_common_memory,
178 link->irq.Handler = ipwireless_interrupt; 127 &memreq_common_memory);
179 link->irq.Instance = ipw->hardware;
180 128
181 ret = pcmcia_request_io(link, &link->io); 129 if (ret != 0)
130 goto exit2;
182 131
183 if (ret != 0) { 132 ipw->is_v2_card = cfg->mem.win[0].len == 0x100;
184 cs_error(link, RequestIO, ret);
185 goto exit0;
186 }
187 133
188 request_region(link->io.BasePort1, link->io.NumPorts1, 134 ipw->common_memory = ioremap(ipw->request_common_memory.Base,
135 ipw->request_common_memory.Size);
136 request_mem_region(ipw->request_common_memory.Base,
137 ipw->request_common_memory.Size,
189 IPWIRELESS_PCCARD_NAME); 138 IPWIRELESS_PCCARD_NAME);
190 139
191 /* memory settings */ 140 ipw->request_attr_memory.Attributes =
192 141 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE;
193 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 142 ipw->request_attr_memory.Base = 0;
194 143 ipw->request_attr_memory.Size = 0; /* this used to be 0x1000 */
195 ret = pcmcia_get_first_tuple(link, &tuple); 144 ipw->request_attr_memory.AccessSpeed = 0;
196
197 if (ret != 0) {
198 cs_error(link, GetFirstTuple, ret);
199 goto exit1;
200 }
201
202 ret = pcmcia_get_tuple_data(link, &tuple);
203 145
204 if (ret != 0) { 146 ret = pcmcia_request_window(p_dev, &ipw->request_attr_memory,
205 cs_error(link, GetTupleData, ret); 147 &ipw->handle_attr_memory);
206 goto exit1;
207 }
208
209 ret = pcmcia_parse_tuple(&tuple, &parse);
210
211 if (ret != 0) {
212 cs_error(link, ParseTuple, ret);
213 goto exit1;
214 }
215 148
216 if (parse.cftable_entry.mem.nwin > 0) { 149 if (ret != 0)
217 ipw->request_common_memory.Attributes = 150 goto exit2;
218 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
219 ipw->request_common_memory.Base =
220 parse.cftable_entry.mem.win[0].host_addr;
221 ipw->request_common_memory.Size = parse.cftable_entry.mem.win[0].len;
222 if (ipw->request_common_memory.Size < 0x1000)
223 ipw->request_common_memory.Size = 0x1000;
224 ipw->request_common_memory.AccessSpeed = 0;
225
226 ret = pcmcia_request_window(&link, &ipw->request_common_memory,
227 &ipw->handle_common_memory);
228 151
229 if (ret != 0) { 152 memreq_attr_memory.CardOffset = 0;
230 cs_error(link, RequestWindow, ret); 153 memreq_attr_memory.Page = 0;
231 goto exit1;
232 }
233 154
234 memreq_common_memory.CardOffset = 155 ret = pcmcia_map_mem_page(p_dev, ipw->handle_attr_memory,
235 parse.cftable_entry.mem.win[0].card_addr; 156 &memreq_attr_memory);
236 memreq_common_memory.Page = 0;
237 157
238 ret = pcmcia_map_mem_page(ipw->handle_common_memory, 158 if (ret != 0)
239 &memreq_common_memory); 159 goto exit3;
240 160
241 if (ret != 0) { 161 ipw->attr_memory = ioremap(ipw->request_attr_memory.Base,
242 cs_error(link, MapMemPage, ret); 162 ipw->request_attr_memory.Size);
243 goto exit1; 163 request_mem_region(ipw->request_attr_memory.Base,
244 } 164 ipw->request_attr_memory.Size, IPWIRELESS_PCCARD_NAME);
245 165
246 ipw->is_v2_card = 166 return 0;
247 parse.cftable_entry.mem.win[0].len == 0x100;
248 167
249 ipw->common_memory = ioremap(ipw->request_common_memory.Base, 168exit3:
169 pcmcia_release_window(p_dev, ipw->handle_attr_memory);
170exit2:
171 if (ipw->common_memory) {
172 release_mem_region(ipw->request_common_memory.Base,
250 ipw->request_common_memory.Size); 173 ipw->request_common_memory.Size);
251 request_mem_region(ipw->request_common_memory.Base, 174 iounmap(ipw->common_memory);
252 ipw->request_common_memory.Size, IPWIRELESS_PCCARD_NAME); 175 pcmcia_release_window(p_dev, ipw->handle_common_memory);
253 176 } else
254 ipw->request_attr_memory.Attributes = 177 pcmcia_release_window(p_dev, ipw->handle_common_memory);
255 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; 178exit1:
256 ipw->request_attr_memory.Base = 0; 179 release_resource(io_resource);
257 ipw->request_attr_memory.Size = 0; /* this used to be 0x1000 */ 180 pcmcia_disable_device(p_dev);
258 ipw->request_attr_memory.AccessSpeed = 0; 181 return -1;
259 182}
260 ret = pcmcia_request_window(&link, &ipw->request_attr_memory,
261 &ipw->handle_attr_memory);
262 183
263 if (ret != 0) { 184static int config_ipwireless(struct ipw_dev *ipw)
264 cs_error(link, RequestWindow, ret); 185{
265 goto exit2; 186 struct pcmcia_device *link = ipw->link;
266 } 187 int ret = 0;
267 188
268 memreq_attr_memory.CardOffset = 0; 189 ipw->is_v2_card = 0;
269 memreq_attr_memory.Page = 0;
270 190
271 ret = pcmcia_map_mem_page(ipw->handle_attr_memory, 191 ret = pcmcia_loop_config(link, ipwireless_probe, ipw);
272 &memreq_attr_memory); 192 if (ret != 0)
193 return ret;
273 194
274 if (ret != 0) { 195 link->conf.Attributes = CONF_ENABLE_IRQ;
275 cs_error(link, MapMemPage, ret); 196 link->conf.IntType = INT_MEMORY_AND_IO;
276 goto exit2;
277 }
278 197
279 ipw->attr_memory = ioremap(ipw->request_attr_memory.Base, 198 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
280 ipw->request_attr_memory.Size); 199 link->irq.Handler = ipwireless_interrupt;
281 request_mem_region(ipw->request_attr_memory.Base, ipw->request_attr_memory.Size,
282 IPWIRELESS_PCCARD_NAME);
283 }
284 200
285 INIT_WORK(&ipw->work_reboot, signalled_reboot_work); 201 INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
286 202
@@ -291,10 +207,8 @@ static int config_ipwireless(struct ipw_dev *ipw)
291 207
292 ret = pcmcia_request_irq(link, &link->irq); 208 ret = pcmcia_request_irq(link, &link->irq);
293 209
294 if (ret != 0) { 210 if (ret != 0)
295 cs_error(link, RequestIRQ, ret); 211 goto exit;
296 goto exit3;
297 }
298 212
299 printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n", 213 printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n",
300 ipw->is_v2_card ? "V2/V3" : "V1"); 214 ipw->is_v2_card ? "V2/V3" : "V1");
@@ -316,12 +230,12 @@ static int config_ipwireless(struct ipw_dev *ipw)
316 230
317 ipw->network = ipwireless_network_create(ipw->hardware); 231 ipw->network = ipwireless_network_create(ipw->hardware);
318 if (!ipw->network) 232 if (!ipw->network)
319 goto exit3; 233 goto exit;
320 234
321 ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network, 235 ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network,
322 ipw->nodes); 236 ipw->nodes);
323 if (!ipw->tty) 237 if (!ipw->tty)
324 goto exit3; 238 goto exit;
325 239
326 ipwireless_init_hardware_v2_v3(ipw->hardware); 240 ipwireless_init_hardware_v2_v3(ipw->hardware);
327 241
@@ -331,35 +245,27 @@ static int config_ipwireless(struct ipw_dev *ipw)
331 */ 245 */
332 ret = pcmcia_request_configuration(link, &link->conf); 246 ret = pcmcia_request_configuration(link, &link->conf);
333 247
334 if (ret != 0) { 248 if (ret != 0)
335 cs_error(link, RequestConfiguration, ret); 249 goto exit;
336 goto exit4;
337 }
338 250
339 link->dev_node = &ipw->nodes[0]; 251 link->dev_node = &ipw->nodes[0];
340 252
341 return 0; 253 return 0;
342 254
343exit4: 255exit:
344 pcmcia_disable_device(link);
345exit3:
346 if (ipw->attr_memory) { 256 if (ipw->attr_memory) {
347 release_mem_region(ipw->request_attr_memory.Base, 257 release_mem_region(ipw->request_attr_memory.Base,
348 ipw->request_attr_memory.Size); 258 ipw->request_attr_memory.Size);
349 iounmap(ipw->attr_memory); 259 iounmap(ipw->attr_memory);
350 pcmcia_release_window(ipw->handle_attr_memory); 260 pcmcia_release_window(link, ipw->handle_attr_memory);
351 pcmcia_disable_device(link);
352 } 261 }
353exit2:
354 if (ipw->common_memory) { 262 if (ipw->common_memory) {
355 release_mem_region(ipw->request_common_memory.Base, 263 release_mem_region(ipw->request_common_memory.Base,
356 ipw->request_common_memory.Size); 264 ipw->request_common_memory.Size);
357 iounmap(ipw->common_memory); 265 iounmap(ipw->common_memory);
358 pcmcia_release_window(ipw->handle_common_memory); 266 pcmcia_release_window(link, ipw->handle_common_memory);
359 } 267 }
360exit1:
361 pcmcia_disable_device(link); 268 pcmcia_disable_device(link);
362exit0:
363 return -1; 269 return -1;
364} 270}
365 271
@@ -378,9 +284,9 @@ static void release_ipwireless(struct ipw_dev *ipw)
378 iounmap(ipw->attr_memory); 284 iounmap(ipw->attr_memory);
379 } 285 }
380 if (ipw->common_memory) 286 if (ipw->common_memory)
381 pcmcia_release_window(ipw->handle_common_memory); 287 pcmcia_release_window(ipw->link, ipw->handle_common_memory);
382 if (ipw->attr_memory) 288 if (ipw->attr_memory)
383 pcmcia_release_window(ipw->handle_attr_memory); 289 pcmcia_release_window(ipw->link, ipw->handle_attr_memory);
384 290
385 /* Break the link with Card Services */ 291 /* Break the link with Card Services */
386 pcmcia_disable_device(ipw->link); 292 pcmcia_disable_device(ipw->link);
@@ -406,7 +312,6 @@ static int ipwireless_attach(struct pcmcia_device *link)
406 312
407 ipw->link = link; 313 ipw->link = link;
408 link->priv = ipw; 314 link->priv = ipw;
409 link->irq.Instance = ipw;
410 315
411 /* Link this device into our device list. */ 316 /* Link this device into our device list. */
412 link->dev_node = &ipw->nodes[0]; 317 link->dev_node = &ipw->nodes[0];
@@ -421,7 +326,6 @@ static int ipwireless_attach(struct pcmcia_device *link)
421 ret = config_ipwireless(ipw); 326 ret = config_ipwireless(ipw);
422 327
423 if (ret != 0) { 328 if (ret != 0) {
424 cs_error(link, RegisterClient, ret);
425 ipwireless_detach(link); 329 ipwireless_detach(link);
426 return ret; 330 return ret;
427 } 331 }
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index caf6e4d19469..c31a0d913d37 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -554,7 +554,6 @@ static int mgslpc_probe(struct pcmcia_device *link)
554 554
555 /* Interrupt setup */ 555 /* Interrupt setup */
556 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 556 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
557 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
558 link->irq.Handler = NULL; 557 link->irq.Handler = NULL;
559 558
560 link->conf.Attributes = 0; 559 link->conf.Attributes = 0;
@@ -572,69 +571,51 @@ static int mgslpc_probe(struct pcmcia_device *link)
572/* Card has been inserted. 571/* Card has been inserted.
573 */ 572 */
574 573
575#define CS_CHECK(fn, ret) \ 574static int mgslpc_ioprobe(struct pcmcia_device *p_dev,
576do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 575 cistpl_cftable_entry_t *cfg,
576 cistpl_cftable_entry_t *dflt,
577 unsigned int vcc,
578 void *priv_data)
579{
580 if (cfg->io.nwin > 0) {
581 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
582 if (!(cfg->io.flags & CISTPL_IO_8BIT))
583 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
584 if (!(cfg->io.flags & CISTPL_IO_16BIT))
585 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
586 p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK;
587 p_dev->io.BasePort1 = cfg->io.win[0].base;
588 p_dev->io.NumPorts1 = cfg->io.win[0].len;
589 return pcmcia_request_io(p_dev, &p_dev->io);
590 }
591 return -ENODEV;
592}
577 593
578static int mgslpc_config(struct pcmcia_device *link) 594static int mgslpc_config(struct pcmcia_device *link)
579{ 595{
580 MGSLPC_INFO *info = link->priv; 596 MGSLPC_INFO *info = link->priv;
581 tuple_t tuple; 597 int ret;
582 cisparse_t parse;
583 int last_fn, last_ret;
584 u_char buf[64];
585 cistpl_cftable_entry_t dflt = { 0 };
586 cistpl_cftable_entry_t *cfg;
587 598
588 if (debug_level >= DEBUG_LEVEL_INFO) 599 if (debug_level >= DEBUG_LEVEL_INFO)
589 printk("mgslpc_config(0x%p)\n", link); 600 printk("mgslpc_config(0x%p)\n", link);
590 601
591 tuple.Attributes = 0; 602 ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
592 tuple.TupleData = buf; 603 if (ret != 0)
593 tuple.TupleDataMax = sizeof(buf); 604 goto failed;
594 tuple.TupleOffset = 0;
595
596 /* get CIS configuration entry */
597
598 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
599 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
600
601 cfg = &(parse.cftable_entry);
602 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
603 CS_CHECK(ParseTuple, pcmcia_parse_tuple(&tuple, &parse));
604
605 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
606 if (cfg->index == 0)
607 goto cs_failed;
608
609 link->conf.ConfigIndex = cfg->index;
610 link->conf.Attributes |= CONF_ENABLE_IRQ;
611
612 /* IO window settings */
613 link->io.NumPorts1 = 0;
614 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
615 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
616 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
617 if (!(io->flags & CISTPL_IO_8BIT))
618 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
619 if (!(io->flags & CISTPL_IO_16BIT))
620 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
621 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
622 link->io.BasePort1 = io->win[0].base;
623 link->io.NumPorts1 = io->win[0].len;
624 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
625 }
626 605
627 link->conf.Attributes = CONF_ENABLE_IRQ; 606 link->conf.Attributes = CONF_ENABLE_IRQ;
628 link->conf.IntType = INT_MEMORY_AND_IO; 607 link->conf.IntType = INT_MEMORY_AND_IO;
629 link->conf.ConfigIndex = 8; 608 link->conf.ConfigIndex = 8;
630 link->conf.Present = PRESENT_OPTION; 609 link->conf.Present = PRESENT_OPTION;
631 610
632 link->irq.Attributes |= IRQ_HANDLE_PRESENT;
633 link->irq.Handler = mgslpc_isr; 611 link->irq.Handler = mgslpc_isr;
634 link->irq.Instance = info;
635 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
636 612
637 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 613 ret = pcmcia_request_irq(link, &link->irq);
614 if (ret)
615 goto failed;
616 ret = pcmcia_request_configuration(link, &link->conf);
617 if (ret)
618 goto failed;
638 619
639 info->io_base = link->io.BasePort1; 620 info->io_base = link->io.BasePort1;
640 info->irq_level = link->irq.AssignedIRQ; 621 info->irq_level = link->irq.AssignedIRQ;
@@ -654,8 +635,7 @@ static int mgslpc_config(struct pcmcia_device *link)
654 printk("\n"); 635 printk("\n");
655 return 0; 636 return 0;
656 637
657cs_failed: 638failed:
658 cs_error(link, last_fn, last_ret);
659 mgslpc_release((u_long)link); 639 mgslpc_release((u_long)link);
660 return -ENODEV; 640 return -ENODEV;
661} 641}
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 47c2d2763456..f06bb37defb1 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -31,7 +31,7 @@
31 31
32enum tpm_const { 32enum tpm_const {
33 TPM_MINOR = 224, /* officially assigned */ 33 TPM_MINOR = 224, /* officially assigned */
34 TPM_BUFSIZE = 2048, 34 TPM_BUFSIZE = 4096,
35 TPM_NUM_DEVICES = 256, 35 TPM_NUM_DEVICES = 256,
36}; 36};
37 37
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 0b73e4ec1add..2405f17b29dd 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -257,6 +257,10 @@ out:
257 return size; 257 return size;
258} 258}
259 259
260static int itpm;
261module_param(itpm, bool, 0444);
262MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
263
260/* 264/*
261 * If interrupts are used (signaled by an irq set in the vendor structure) 265 * If interrupts are used (signaled by an irq set in the vendor structure)
262 * tpm.c can skip polling for the data to be available as the interrupt is 266 * tpm.c can skip polling for the data to be available as the interrupt is
@@ -293,7 +297,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
293 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 297 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
294 &chip->vendor.int_queue); 298 &chip->vendor.int_queue);
295 status = tpm_tis_status(chip); 299 status = tpm_tis_status(chip);
296 if ((status & TPM_STS_DATA_EXPECT) == 0) { 300 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
297 rc = -EIO; 301 rc = -EIO;
298 goto out_err; 302 goto out_err;
299 } 303 }
@@ -467,6 +471,10 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
467 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 471 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
468 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 472 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
469 473
474 if (itpm)
475 dev_info(dev, "Intel iTPM workaround enabled\n");
476
477
470 /* Figure out the capabilities */ 478 /* Figure out the capabilities */
471 intfcaps = 479 intfcaps =
472 ioread32(chip->vendor.iobase + 480 ioread32(chip->vendor.iobase +
@@ -629,6 +637,7 @@ static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
629 {"", 0}, /* User Specified */ 637 {"", 0}, /* User Specified */
630 {"", 0} /* Terminator */ 638 {"", 0} /* Terminator */
631}; 639};
640MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
632 641
633static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev) 642static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
634{ 643{
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 5711ce5353c6..4baf3d7d0f8e 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -144,13 +144,6 @@ static int lnw_irq_type(unsigned irq, unsigned type)
144 144
145static void lnw_irq_unmask(unsigned irq) 145static void lnw_irq_unmask(unsigned irq)
146{ 146{
147 struct lnw_gpio *lnw = get_irq_chip_data(irq);
148 u32 gpio = irq - lnw->irq_base;
149 u8 reg = gpio / 32;
150 void __iomem *gedr;
151
152 gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]);
153 writel(BIT(gpio % 32), gedr);
154}; 147};
155 148
156static void lnw_irq_mask(unsigned irq) 149static void lnw_irq_mask(unsigned irq)
@@ -183,13 +176,11 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
183 gedr_v = readl(gedr); 176 gedr_v = readl(gedr);
184 if (!gedr_v) 177 if (!gedr_v)
185 continue; 178 continue;
186 for (gpio = reg*32; gpio < reg*32+32; gpio++) { 179 for (gpio = reg*32; gpio < reg*32+32; gpio++)
187 gedr_v = readl(gedr);
188 if (gedr_v & BIT(gpio % 32)) { 180 if (gedr_v & BIT(gpio % 32)) {
189 pr_debug("pin %d triggered\n", gpio); 181 pr_debug("pin %d triggered\n", gpio);
190 generic_handle_irq(lnw->irq_base + gpio); 182 generic_handle_irq(lnw->irq_base + gpio);
191 } 183 }
192 }
193 /* clear the edge detect status bit */ 184 /* clear the edge detect status bit */
194 writel(gedr_v, gedr); 185 writel(gedr_v, gedr);
195 } 186 }
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 063b933d864a..dd6396384c25 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -60,15 +60,6 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
60MODULE_DESCRIPTION("PCMCIA ATA/IDE card driver"); 60MODULE_DESCRIPTION("PCMCIA ATA/IDE card driver");
61MODULE_LICENSE("Dual MPL/GPL"); 61MODULE_LICENSE("Dual MPL/GPL");
62 62
63#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
64
65#ifdef CONFIG_PCMCIA_DEBUG
66INT_MODULE_PARM(pc_debug, 0);
67#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
68#else
69#define DEBUG(n, args...)
70#endif
71
72/*====================================================================*/ 63/*====================================================================*/
73 64
74typedef struct ide_info_t { 65typedef struct ide_info_t {
@@ -98,7 +89,7 @@ static int ide_probe(struct pcmcia_device *link)
98{ 89{
99 ide_info_t *info; 90 ide_info_t *info;
100 91
101 DEBUG(0, "ide_attach()\n"); 92 dev_dbg(&link->dev, "ide_attach()\n");
102 93
103 /* Create new ide device */ 94 /* Create new ide device */
104 info = kzalloc(sizeof(*info), GFP_KERNEL); 95 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -112,7 +103,6 @@ static int ide_probe(struct pcmcia_device *link)
112 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 103 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
113 link->io.IOAddrLines = 3; 104 link->io.IOAddrLines = 3;
114 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 105 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
115 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
116 link->conf.Attributes = CONF_ENABLE_IRQ; 106 link->conf.Attributes = CONF_ENABLE_IRQ;
117 link->conf.IntType = INT_MEMORY_AND_IO; 107 link->conf.IntType = INT_MEMORY_AND_IO;
118 108
@@ -134,7 +124,7 @@ static void ide_detach(struct pcmcia_device *link)
134 ide_hwif_t *hwif = info->host->ports[0]; 124 ide_hwif_t *hwif = info->host->ports[0];
135 unsigned long data_addr, ctl_addr; 125 unsigned long data_addr, ctl_addr;
136 126
137 DEBUG(0, "ide_detach(0x%p)\n", link); 127 dev_dbg(&link->dev, "ide_detach(0x%p)\n", link);
138 128
139 data_addr = hwif->io_ports.data_addr; 129 data_addr = hwif->io_ports.data_addr;
140 ctl_addr = hwif->io_ports.ctl_addr; 130 ctl_addr = hwif->io_ports.ctl_addr;
@@ -217,9 +207,6 @@ out_release:
217 207
218======================================================================*/ 208======================================================================*/
219 209
220#define CS_CHECK(fn, ret) \
221do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
222
223struct pcmcia_config_check { 210struct pcmcia_config_check {
224 unsigned long ctl_base; 211 unsigned long ctl_base;
225 int skip_vcc; 212 int skip_vcc;
@@ -282,11 +269,11 @@ static int ide_config(struct pcmcia_device *link)
282{ 269{
283 ide_info_t *info = link->priv; 270 ide_info_t *info = link->priv;
284 struct pcmcia_config_check *stk = NULL; 271 struct pcmcia_config_check *stk = NULL;
285 int last_ret = 0, last_fn = 0, is_kme = 0; 272 int ret = 0, is_kme = 0;
286 unsigned long io_base, ctl_base; 273 unsigned long io_base, ctl_base;
287 struct ide_host *host; 274 struct ide_host *host;
288 275
289 DEBUG(0, "ide_config(0x%p)\n", link); 276 dev_dbg(&link->dev, "ide_config(0x%p)\n", link);
290 277
291 is_kme = ((link->manf_id == MANFID_KME) && 278 is_kme = ((link->manf_id == MANFID_KME) &&
292 ((link->card_id == PRODID_KME_KXLC005_A) || 279 ((link->card_id == PRODID_KME_KXLC005_A) ||
@@ -306,8 +293,12 @@ static int ide_config(struct pcmcia_device *link)
306 io_base = link->io.BasePort1; 293 io_base = link->io.BasePort1;
307 ctl_base = stk->ctl_base; 294 ctl_base = stk->ctl_base;
308 295
309 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 296 ret = pcmcia_request_irq(link, &link->irq);
310 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 297 if (ret)
298 goto failed;
299 ret = pcmcia_request_configuration(link, &link->conf);
300 if (ret)
301 goto failed;
311 302
312 /* disable drive interrupts during IDE probe */ 303 /* disable drive interrupts during IDE probe */
313 outb(0x02, ctl_base); 304 outb(0x02, ctl_base);
@@ -342,8 +333,6 @@ err_mem:
342 printk(KERN_NOTICE "ide-cs: ide_config failed memory allocation\n"); 333 printk(KERN_NOTICE "ide-cs: ide_config failed memory allocation\n");
343 goto failed; 334 goto failed;
344 335
345cs_failed:
346 cs_error(link, last_fn, last_ret);
347failed: 336failed:
348 kfree(stk); 337 kfree(stk);
349 ide_release(link); 338 ide_release(link);
@@ -363,7 +352,7 @@ static void ide_release(struct pcmcia_device *link)
363 ide_info_t *info = link->priv; 352 ide_info_t *info = link->priv;
364 struct ide_host *host = info->host; 353 struct ide_host *host = info->host;
365 354
366 DEBUG(0, "ide_release(0x%p)\n", link); 355 dev_dbg(&link->dev, "ide_release(0x%p)\n", link);
367 356
368 if (info->ndev) 357 if (info->ndev)
369 /* FIXME: if this fails we need to queue the cleanup somehow 358 /* FIXME: if this fails we need to queue the cleanup somehow
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a537925f7651..2bcf1ace27c0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -447,6 +447,27 @@ static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
447 DMI_MATCH(DMI_PRODUCT_NAME, "N10"), 447 DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
448 }, 448 },
449 }, 449 },
450 {
451 .ident = "Dell Vostro 1320",
452 .matches = {
453 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
454 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
455 },
456 },
457 {
458 .ident = "Dell Vostro 1520",
459 .matches = {
460 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
461 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
462 },
463 },
464 {
465 .ident = "Dell Vostro 1720",
466 .matches = {
467 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
468 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
469 },
470 },
450 { } 471 { }
451}; 472};
452 473
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index c72565520e41..5a6ae646a636 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -111,8 +111,6 @@ static int avmcs_probe(struct pcmcia_device *p_dev)
111 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 111 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
112 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 112 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
113 113
114 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
115
116 /* General socket configuration */ 114 /* General socket configuration */
117 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 115 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
118 p_dev->conf.IntType = INT_MEMORY_AND_IO; 116 p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -198,7 +196,6 @@ static int avmcs_config(struct pcmcia_device *link)
198 */ 196 */
199 i = pcmcia_request_irq(link, &link->irq); 197 i = pcmcia_request_irq(link, &link->irq);
200 if (i != 0) { 198 if (i != 0) {
201 cs_error(link, RequestIRQ, i);
202 /* undo */ 199 /* undo */
203 pcmcia_disable_device(link); 200 pcmcia_disable_device(link);
204 break; 201 break;
@@ -209,7 +206,6 @@ static int avmcs_config(struct pcmcia_device *link)
209 */ 206 */
210 i = pcmcia_request_configuration(link, &link->conf); 207 i = pcmcia_request_configuration(link, &link->conf);
211 if (i != 0) { 208 if (i != 0) {
212 cs_error(link, RequestConfiguration, i);
213 pcmcia_disable_device(link); 209 pcmcia_disable_device(link);
214 break; 210 break;
215 } 211 }
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index 23560c897ec3..f9bdff39cf4a 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -30,22 +30,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for AVM A1/Fritz!PCMCIA car
30MODULE_AUTHOR("Carsten Paeth"); 30MODULE_AUTHOR("Carsten Paeth");
31MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
32 32
33/*
34 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
35 you do not define PCMCIA_DEBUG at all, all the debug code will be
36 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
37 be present but disabled -- but it can then be enabled for specific
38 modules at load time with a 'pc_debug=#' option to insmod.
39*/
40#ifdef PCMCIA_DEBUG
41static int pc_debug = PCMCIA_DEBUG;
42module_param(pc_debug, int, 0);
43#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
44static char *version =
45"avma1_cs.c 1.00 1998/01/23 10:00:00 (Carsten Paeth)";
46#else
47#define DEBUG(n, args...)
48#endif
49 33
50/*====================================================================*/ 34/*====================================================================*/
51 35
@@ -119,7 +103,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev)
119{ 103{
120 local_info_t *local; 104 local_info_t *local;
121 105
122 DEBUG(0, "avma1cs_attach()\n"); 106 dev_dbg(&p_dev->dev, "avma1cs_attach()\n");
123 107
124 /* Allocate space for private device-specific data */ 108 /* Allocate space for private device-specific data */
125 local = kzalloc(sizeof(local_info_t), GFP_KERNEL); 109 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
@@ -139,8 +123,6 @@ static int avma1cs_probe(struct pcmcia_device *p_dev)
139 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 123 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
140 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 124 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
141 125
142 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
143
144 /* General socket configuration */ 126 /* General socket configuration */
145 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 127 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
146 p_dev->conf.IntType = INT_MEMORY_AND_IO; 128 p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -161,7 +143,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev)
161 143
162static void avma1cs_detach(struct pcmcia_device *link) 144static void avma1cs_detach(struct pcmcia_device *link)
163{ 145{
164 DEBUG(0, "avma1cs_detach(0x%p)\n", link); 146 dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link);
165 avma1cs_release(link); 147 avma1cs_release(link);
166 kfree(link->priv); 148 kfree(link->priv);
167} /* avma1cs_detach */ 149} /* avma1cs_detach */
@@ -203,7 +185,7 @@ static int avma1cs_config(struct pcmcia_device *link)
203 185
204 dev = link->priv; 186 dev = link->priv;
205 187
206 DEBUG(0, "avma1cs_config(0x%p)\n", link); 188 dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link);
207 189
208 devname[0] = 0; 190 devname[0] = 0;
209 if (link->prod_id[1]) 191 if (link->prod_id[1])
@@ -218,7 +200,6 @@ static int avma1cs_config(struct pcmcia_device *link)
218 */ 200 */
219 i = pcmcia_request_irq(link, &link->irq); 201 i = pcmcia_request_irq(link, &link->irq);
220 if (i != 0) { 202 if (i != 0) {
221 cs_error(link, RequestIRQ, i);
222 /* undo */ 203 /* undo */
223 pcmcia_disable_device(link); 204 pcmcia_disable_device(link);
224 break; 205 break;
@@ -229,7 +210,6 @@ static int avma1cs_config(struct pcmcia_device *link)
229 */ 210 */
230 i = pcmcia_request_configuration(link, &link->conf); 211 i = pcmcia_request_configuration(link, &link->conf);
231 if (i != 0) { 212 if (i != 0) {
232 cs_error(link, RequestConfiguration, i);
233 pcmcia_disable_device(link); 213 pcmcia_disable_device(link);
234 break; 214 break;
235 } 215 }
@@ -281,7 +261,7 @@ static void avma1cs_release(struct pcmcia_device *link)
281{ 261{
282 local_info_t *local = link->priv; 262 local_info_t *local = link->priv;
283 263
284 DEBUG(0, "avma1cs_release(0x%p)\n", link); 264 dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link);
285 265
286 /* now unregister function with hisax */ 266 /* now unregister function with hisax */
287 HiSax_closecard(local->node.minor); 267 HiSax_closecard(local->node.minor);
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index f4d0fe29bcf8..a2f709f53974 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -57,23 +57,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Elsa PCM cards");
57MODULE_AUTHOR("Klaus Lichtenwalder"); 57MODULE_AUTHOR("Klaus Lichtenwalder");
58MODULE_LICENSE("Dual MPL/GPL"); 58MODULE_LICENSE("Dual MPL/GPL");
59 59
60/*
61 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
62 you do not define PCMCIA_DEBUG at all, all the debug code will be
63 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
64 be present but disabled -- but it can then be enabled for specific
65 modules at load time with a 'pc_debug=#' option to insmod.
66*/
67
68#ifdef PCMCIA_DEBUG
69static int pc_debug = PCMCIA_DEBUG;
70module_param(pc_debug, int, 0);
71#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
72static char *version =
73"elsa_cs.c $Revision: 1.2.2.4 $ $Date: 2004/01/25 15:07:06 $ (K.Lichtenwalder)";
74#else
75#define DEBUG(n, args...)
76#endif
77 60
78/*====================================================================*/ 61/*====================================================================*/
79 62
@@ -142,7 +125,7 @@ static int elsa_cs_probe(struct pcmcia_device *link)
142{ 125{
143 local_info_t *local; 126 local_info_t *local;
144 127
145 DEBUG(0, "elsa_cs_attach()\n"); 128 dev_dbg(&link->dev, "elsa_cs_attach()\n");
146 129
147 /* Allocate space for private device-specific data */ 130 /* Allocate space for private device-specific data */
148 local = kzalloc(sizeof(local_info_t), GFP_KERNEL); 131 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
@@ -155,7 +138,6 @@ static int elsa_cs_probe(struct pcmcia_device *link)
155 138
156 /* Interrupt setup */ 139 /* Interrupt setup */
157 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 140 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
158 link->irq.IRQInfo1 = IRQ_LEVEL_ID|IRQ_SHARE_ID;
159 link->irq.Handler = NULL; 141 link->irq.Handler = NULL;
160 142
161 /* 143 /*
@@ -188,7 +170,7 @@ static void elsa_cs_detach(struct pcmcia_device *link)
188{ 170{
189 local_info_t *info = link->priv; 171 local_info_t *info = link->priv;
190 172
191 DEBUG(0, "elsa_cs_detach(0x%p)\n", link); 173 dev_dbg(&link->dev, "elsa_cs_detach(0x%p)\n", link);
192 174
193 info->busy = 1; 175 info->busy = 1;
194 elsa_cs_release(link); 176 elsa_cs_release(link);
@@ -231,30 +213,25 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev,
231static int elsa_cs_config(struct pcmcia_device *link) 213static int elsa_cs_config(struct pcmcia_device *link)
232{ 214{
233 local_info_t *dev; 215 local_info_t *dev;
234 int i, last_fn; 216 int i;
235 IsdnCard_t icard; 217 IsdnCard_t icard;
236 218
237 DEBUG(0, "elsa_config(0x%p)\n", link); 219 dev_dbg(&link->dev, "elsa_config(0x%p)\n", link);
238 dev = link->priv; 220 dev = link->priv;
239 221
240 i = pcmcia_loop_config(link, elsa_cs_configcheck, NULL); 222 i = pcmcia_loop_config(link, elsa_cs_configcheck, NULL);
241 if (i != 0) { 223 if (i != 0)
242 last_fn = RequestIO; 224 goto failed;
243 goto cs_failed;
244 }
245 225
246 i = pcmcia_request_irq(link, &link->irq); 226 i = pcmcia_request_irq(link, &link->irq);
247 if (i != 0) { 227 if (i != 0) {
248 link->irq.AssignedIRQ = 0; 228 link->irq.AssignedIRQ = 0;
249 last_fn = RequestIRQ; 229 goto failed;
250 goto cs_failed;
251 } 230 }
252 231
253 i = pcmcia_request_configuration(link, &link->conf); 232 i = pcmcia_request_configuration(link, &link->conf);
254 if (i != 0) { 233 if (i != 0)
255 last_fn = RequestConfiguration; 234 goto failed;
256 goto cs_failed;
257 }
258 235
259 /* At this point, the dev_node_t structure(s) should be 236 /* At this point, the dev_node_t structure(s) should be
260 initialized and arranged in a linked list at link->dev. *//* */ 237 initialized and arranged in a linked list at link->dev. *//* */
@@ -290,8 +267,7 @@ static int elsa_cs_config(struct pcmcia_device *link)
290 ((local_info_t*)link->priv)->cardnr = i; 267 ((local_info_t*)link->priv)->cardnr = i;
291 268
292 return 0; 269 return 0;
293cs_failed: 270failed:
294 cs_error(link, last_fn, i);
295 elsa_cs_release(link); 271 elsa_cs_release(link);
296 return -ENODEV; 272 return -ENODEV;
297} /* elsa_cs_config */ 273} /* elsa_cs_config */
@@ -308,7 +284,7 @@ static void elsa_cs_release(struct pcmcia_device *link)
308{ 284{
309 local_info_t *local = link->priv; 285 local_info_t *local = link->priv;
310 286
311 DEBUG(0, "elsa_cs_release(0x%p)\n", link); 287 dev_dbg(&link->dev, "elsa_cs_release(0x%p)\n", link);
312 288
313 if (local) { 289 if (local) {
314 if (local->cardnr >= 0) { 290 if (local->cardnr >= 0) {
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 9a3c9f5e4fe8..af5d393cc2d0 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -57,24 +57,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Sedlbauer cards");
57MODULE_AUTHOR("Marcus Niemann"); 57MODULE_AUTHOR("Marcus Niemann");
58MODULE_LICENSE("Dual MPL/GPL"); 58MODULE_LICENSE("Dual MPL/GPL");
59 59
60/*
61 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
62 you do not define PCMCIA_DEBUG at all, all the debug code will be
63 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
64 be present but disabled -- but it can then be enabled for specific
65 modules at load time with a 'pc_debug=#' option to insmod.
66*/
67
68#ifdef PCMCIA_DEBUG
69static int pc_debug = PCMCIA_DEBUG;
70module_param(pc_debug, int, 0);
71#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
72static char *version =
73"sedlbauer_cs.c 1.1a 2001/01/28 15:04:04 (M.Niemann)";
74#else
75#define DEBUG(n, args...)
76#endif
77
78 60
79/*====================================================================*/ 61/*====================================================================*/
80 62
@@ -151,7 +133,7 @@ static int sedlbauer_probe(struct pcmcia_device *link)
151{ 133{
152 local_info_t *local; 134 local_info_t *local;
153 135
154 DEBUG(0, "sedlbauer_attach()\n"); 136 dev_dbg(&link->dev, "sedlbauer_attach()\n");
155 137
156 /* Allocate space for private device-specific data */ 138 /* Allocate space for private device-specific data */
157 local = kzalloc(sizeof(local_info_t), GFP_KERNEL); 139 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
@@ -163,7 +145,6 @@ static int sedlbauer_probe(struct pcmcia_device *link)
163 145
164 /* Interrupt setup */ 146 /* Interrupt setup */
165 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 147 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
166 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
167 link->irq.Handler = NULL; 148 link->irq.Handler = NULL;
168 149
169 /* 150 /*
@@ -198,7 +179,7 @@ static int sedlbauer_probe(struct pcmcia_device *link)
198 179
199static void sedlbauer_detach(struct pcmcia_device *link) 180static void sedlbauer_detach(struct pcmcia_device *link)
200{ 181{
201 DEBUG(0, "sedlbauer_detach(0x%p)\n", link); 182 dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link);
202 183
203 ((local_info_t *)link->priv)->stop = 1; 184 ((local_info_t *)link->priv)->stop = 1;
204 sedlbauer_release(link); 185 sedlbauer_release(link);
@@ -214,9 +195,6 @@ static void sedlbauer_detach(struct pcmcia_device *link)
214 device available to the system. 195 device available to the system.
215 196
216======================================================================*/ 197======================================================================*/
217#define CS_CHECK(fn, ret) \
218do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
219
220static int sedlbauer_config_check(struct pcmcia_device *p_dev, 198static int sedlbauer_config_check(struct pcmcia_device *p_dev,
221 cistpl_cftable_entry_t *cfg, 199 cistpl_cftable_entry_t *cfg,
222 cistpl_cftable_entry_t *dflt, 200 cistpl_cftable_entry_t *dflt,
@@ -293,11 +271,11 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
293 req->Base = mem->win[0].host_addr; 271 req->Base = mem->win[0].host_addr;
294 req->Size = mem->win[0].len; 272 req->Size = mem->win[0].len;
295 req->AccessSpeed = 0; 273 req->AccessSpeed = 0;
296 if (pcmcia_request_window(&p_dev, req, &p_dev->win) != 0) 274 if (pcmcia_request_window(p_dev, req, &p_dev->win) != 0)
297 return -ENODEV; 275 return -ENODEV;
298 map.Page = 0; 276 map.Page = 0;
299 map.CardOffset = mem->win[0].card_addr; 277 map.CardOffset = mem->win[0].card_addr;
300 if (pcmcia_map_mem_page(p_dev->win, &map) != 0) 278 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
301 return -ENODEV; 279 return -ENODEV;
302 } 280 }
303 return 0; 281 return 0;
@@ -309,10 +287,10 @@ static int sedlbauer_config(struct pcmcia_device *link)
309{ 287{
310 local_info_t *dev = link->priv; 288 local_info_t *dev = link->priv;
311 win_req_t *req; 289 win_req_t *req;
312 int last_fn, last_ret; 290 int ret;
313 IsdnCard_t icard; 291 IsdnCard_t icard;
314 292
315 DEBUG(0, "sedlbauer_config(0x%p)\n", link); 293 dev_dbg(&link->dev, "sedlbauer_config(0x%p)\n", link);
316 294
317 req = kzalloc(sizeof(win_req_t), GFP_KERNEL); 295 req = kzalloc(sizeof(win_req_t), GFP_KERNEL);
318 if (!req) 296 if (!req)
@@ -330,8 +308,8 @@ static int sedlbauer_config(struct pcmcia_device *link)
330 these things without consulting the CIS, and most client drivers 308 these things without consulting the CIS, and most client drivers
331 will only use the CIS to fill in implementation-defined details. 309 will only use the CIS to fill in implementation-defined details.
332 */ 310 */
333 last_ret = pcmcia_loop_config(link, sedlbauer_config_check, req); 311 ret = pcmcia_loop_config(link, sedlbauer_config_check, req);
334 if (last_ret) 312 if (ret)
335 goto failed; 313 goto failed;
336 314
337 /* 315 /*
@@ -339,15 +317,20 @@ static int sedlbauer_config(struct pcmcia_device *link)
339 handler to the interrupt, unless the 'Handler' member of the 317 handler to the interrupt, unless the 'Handler' member of the
340 irq structure is initialized. 318 irq structure is initialized.
341 */ 319 */
342 if (link->conf.Attributes & CONF_ENABLE_IRQ) 320 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
343 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 321 ret = pcmcia_request_irq(link, &link->irq);
322 if (ret)
323 goto failed;
324 }
344 325
345 /* 326 /*
346 This actually configures the PCMCIA socket -- setting up 327 This actually configures the PCMCIA socket -- setting up
347 the I/O windows and the interrupt mapping, and putting the 328 the I/O windows and the interrupt mapping, and putting the
348 card and host interface into "Memory and IO" mode. 329 card and host interface into "Memory and IO" mode.
349 */ 330 */
350 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 331 ret = pcmcia_request_configuration(link, &link->conf);
332 if (ret)
333 goto failed;
351 334
352 /* 335 /*
353 At this point, the dev_node_t structure(s) need to be 336 At this point, the dev_node_t structure(s) need to be
@@ -380,19 +363,18 @@ static int sedlbauer_config(struct pcmcia_device *link)
380 icard.protocol = protocol; 363 icard.protocol = protocol;
381 icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA; 364 icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA;
382 365
383 last_ret = hisax_init_pcmcia(link, &(((local_info_t*)link->priv)->stop), &icard); 366 ret = hisax_init_pcmcia(link,
384 if (last_ret < 0) { 367 &(((local_info_t *)link->priv)->stop), &icard);
385 printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n", 368 if (ret < 0) {
386 last_ret, link->io.BasePort1); 369 printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n",
370 ret, link->io.BasePort1);
387 sedlbauer_release(link); 371 sedlbauer_release(link);
388 return -ENODEV; 372 return -ENODEV;
389 } else 373 } else
390 ((local_info_t*)link->priv)->cardnr = last_ret; 374 ((local_info_t *)link->priv)->cardnr = ret;
391 375
392 return 0; 376 return 0;
393 377
394cs_failed:
395 cs_error(link, last_fn, last_ret);
396failed: 378failed:
397 sedlbauer_release(link); 379 sedlbauer_release(link);
398 return -ENODEV; 380 return -ENODEV;
@@ -410,7 +392,7 @@ failed:
410static void sedlbauer_release(struct pcmcia_device *link) 392static void sedlbauer_release(struct pcmcia_device *link)
411{ 393{
412 local_info_t *local = link->priv; 394 local_info_t *local = link->priv;
413 DEBUG(0, "sedlbauer_release(0x%p)\n", link); 395 dev_dbg(&link->dev, "sedlbauer_release(0x%p)\n", link);
414 396
415 if (local) { 397 if (local) {
416 if (local->cardnr >= 0) { 398 if (local->cardnr >= 0) {
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index 623d111544d4..ea705394ce2b 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -38,23 +38,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Teles PCMCIA cards");
38MODULE_AUTHOR("Christof Petig, christof.petig@wtal.de, Karsten Keil, kkeil@suse.de"); 38MODULE_AUTHOR("Christof Petig, christof.petig@wtal.de, Karsten Keil, kkeil@suse.de");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41/*
42 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
43 you do not define PCMCIA_DEBUG at all, all the debug code will be
44 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
45 be present but disabled -- but it can then be enabled for specific
46 modules at load time with a 'pc_debug=#' option to insmod.
47*/
48
49#ifdef PCMCIA_DEBUG
50static int pc_debug = PCMCIA_DEBUG;
51module_param(pc_debug, int, 0);
52#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
53static char *version =
54"teles_cs.c 2.10 2002/07/30 22:23:34 kkeil";
55#else
56#define DEBUG(n, args...)
57#endif
58 41
59/*====================================================================*/ 42/*====================================================================*/
60 43
@@ -133,7 +116,7 @@ static int teles_probe(struct pcmcia_device *link)
133{ 116{
134 local_info_t *local; 117 local_info_t *local;
135 118
136 DEBUG(0, "teles_attach()\n"); 119 dev_dbg(&link->dev, "teles_attach()\n");
137 120
138 /* Allocate space for private device-specific data */ 121 /* Allocate space for private device-specific data */
139 local = kzalloc(sizeof(local_info_t), GFP_KERNEL); 122 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
@@ -145,7 +128,6 @@ static int teles_probe(struct pcmcia_device *link)
145 128
146 /* Interrupt setup */ 129 /* Interrupt setup */
147 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 130 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
148 link->irq.IRQInfo1 = IRQ_LEVEL_ID|IRQ_SHARE_ID;
149 link->irq.Handler = NULL; 131 link->irq.Handler = NULL;
150 132
151 /* 133 /*
@@ -178,7 +160,7 @@ static void teles_detach(struct pcmcia_device *link)
178{ 160{
179 local_info_t *info = link->priv; 161 local_info_t *info = link->priv;
180 162
181 DEBUG(0, "teles_detach(0x%p)\n", link); 163 dev_dbg(&link->dev, "teles_detach(0x%p)\n", link);
182 164
183 info->busy = 1; 165 info->busy = 1;
184 teles_cs_release(link); 166 teles_cs_release(link);
@@ -221,30 +203,25 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev,
221static int teles_cs_config(struct pcmcia_device *link) 203static int teles_cs_config(struct pcmcia_device *link)
222{ 204{
223 local_info_t *dev; 205 local_info_t *dev;
224 int i, last_fn; 206 int i;
225 IsdnCard_t icard; 207 IsdnCard_t icard;
226 208
227 DEBUG(0, "teles_config(0x%p)\n", link); 209 dev_dbg(&link->dev, "teles_config(0x%p)\n", link);
228 dev = link->priv; 210 dev = link->priv;
229 211
230 i = pcmcia_loop_config(link, teles_cs_configcheck, NULL); 212 i = pcmcia_loop_config(link, teles_cs_configcheck, NULL);
231 if (i != 0) { 213 if (i != 0)
232 last_fn = RequestIO;
233 goto cs_failed; 214 goto cs_failed;
234 }
235 215
236 i = pcmcia_request_irq(link, &link->irq); 216 i = pcmcia_request_irq(link, &link->irq);
237 if (i != 0) { 217 if (i != 0) {
238 link->irq.AssignedIRQ = 0; 218 link->irq.AssignedIRQ = 0;
239 last_fn = RequestIRQ;
240 goto cs_failed; 219 goto cs_failed;
241 } 220 }
242 221
243 i = pcmcia_request_configuration(link, &link->conf); 222 i = pcmcia_request_configuration(link, &link->conf);
244 if (i != 0) { 223 if (i != 0)
245 last_fn = RequestConfiguration;
246 goto cs_failed; 224 goto cs_failed;
247 }
248 225
249 /* At this point, the dev_node_t structure(s) should be 226 /* At this point, the dev_node_t structure(s) should be
250 initialized and arranged in a linked list at link->dev. *//* */ 227 initialized and arranged in a linked list at link->dev. *//* */
@@ -283,7 +260,6 @@ static int teles_cs_config(struct pcmcia_device *link)
283 return 0; 260 return 0;
284 261
285cs_failed: 262cs_failed:
286 cs_error(link, last_fn, i);
287 teles_cs_release(link); 263 teles_cs_release(link);
288 return -ENODEV; 264 return -ENODEV;
289} /* teles_cs_config */ 265} /* teles_cs_config */
@@ -300,7 +276,7 @@ static void teles_cs_release(struct pcmcia_device *link)
300{ 276{
301 local_info_t *local = link->priv; 277 local_info_t *local = link->priv;
302 278
303 DEBUG(0, "teles_cs_release(0x%p)\n", link); 279 dev_dbg(&link->dev, "teles_cs_release(0x%p)\n", link);
304 280
305 if (local) { 281 if (local) {
306 if (local->cardnr >= 0) { 282 if (local->cardnr >= 0) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a053423785c9..e07ce2e033a9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1650,11 +1650,12 @@ static void raid1d(mddev_t *mddev)
1650 r1_bio->sector, 1650 r1_bio->sector,
1651 r1_bio->sectors); 1651 r1_bio->sectors);
1652 unfreeze_array(conf); 1652 unfreeze_array(conf);
1653 } 1653 } else
1654 md_error(mddev,
1655 conf->mirrors[r1_bio->read_disk].rdev);
1654 1656
1655 bio = r1_bio->bios[r1_bio->read_disk]; 1657 bio = r1_bio->bios[r1_bio->read_disk];
1656 if ((disk=read_balance(conf, r1_bio)) == -1 || 1658 if ((disk=read_balance(conf, r1_bio)) == -1) {
1657 disk == r1_bio->read_disk) {
1658 printk(KERN_ALERT "raid1: %s: unrecoverable I/O" 1659 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
1659 " read error for block %llu\n", 1660 " read error for block %llu\n",
1660 bdevname(bio->bi_bdev,b), 1661 bdevname(bio->bi_bdev,b),
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index ddf639ed2fd8..98082416aa52 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -31,6 +31,7 @@
31#include <linux/wait.h> 31#include <linux/wait.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/poll.h> 33#include <linux/poll.h>
34#include <linux/semaphore.h>
34#include <linux/module.h> 35#include <linux/module.h>
35#include <linux/list.h> 36#include <linux/list.h>
36#include <linux/freezer.h> 37#include <linux/freezer.h>
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 49b7885c2702..7f27576ca046 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -29,7 +29,7 @@
29/* Current settings - values are 2*2^(reg_val/4) microamps. These are 29/* Current settings - values are 2*2^(reg_val/4) microamps. These are
30 * exported since they are used by multiple drivers. 30 * exported since they are used by multiple drivers.
31 */ 31 */
32int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL] = { 32int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = {
33 2, 33 2,
34 2, 34 2,
35 3, 35 3,
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index b00d67319058..9fb480bb0e0a 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -760,6 +760,8 @@ static int pxamci_remove(struct platform_device *pdev)
760 if (mmc) { 760 if (mmc) {
761 struct pxamci_host *host = mmc_priv(mmc); 761 struct pxamci_host *host = mmc_priv(mmc);
762 762
763 mmc_remove_host(mmc);
764
763 if (host->pdata) { 765 if (host->pdata) {
764 gpio_cd = host->pdata->gpio_card_detect; 766 gpio_cd = host->pdata->gpio_card_detect;
765 gpio_ro = host->pdata->gpio_card_ro; 767 gpio_ro = host->pdata->gpio_card_ro;
@@ -779,8 +781,6 @@ static int pxamci_remove(struct platform_device *pdev)
779 if (host->pdata && host->pdata->exit) 781 if (host->pdata && host->pdata->exit)
780 host->pdata->exit(&pdev->dev, mmc); 782 host->pdata->exit(&pdev->dev, mmc);
781 783
782 mmc_remove_host(mmc);
783
784 pxamci_stop_clock(host); 784 pxamci_stop_clock(host);
785 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD| 785 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
786 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, 786 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index d600c2deff73..689d6a79ffc0 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -118,11 +118,9 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
118 DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x", 118 DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x",
119 dev->offset, mrq.CardOffset); 119 dev->offset, mrq.CardOffset);
120 mrq.Page = 0; 120 mrq.Page = 0;
121 ret = pcmcia_map_mem_page(win, &mrq); 121 ret = pcmcia_map_mem_page(dev->p_dev, win, &mrq);
122 if (ret != 0) { 122 if (ret != 0)
123 cs_error(dev->p_dev, MapMemPage, ret);
124 return NULL; 123 return NULL;
125 }
126 dev->offset = mrq.CardOffset; 124 dev->offset = mrq.CardOffset;
127 } 125 }
128 return dev->win_base + (to & (dev->win_size-1)); 126 return dev->win_base + (to & (dev->win_size-1));
@@ -327,8 +325,6 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
327 325
328 DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp); 326 DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
329 ret = pcmcia_modify_configuration(link, &mod); 327 ret = pcmcia_modify_configuration(link, &mod);
330 if (ret != 0)
331 cs_error(link, ModifyConfiguration, ret);
332} 328}
333 329
334 330
@@ -348,107 +344,116 @@ static void pcmciamtd_release(struct pcmcia_device *link)
348 iounmap(dev->win_base); 344 iounmap(dev->win_base);
349 dev->win_base = NULL; 345 dev->win_base = NULL;
350 } 346 }
351 pcmcia_release_window(link->win); 347 pcmcia_release_window(link, link->win);
352 } 348 }
353 pcmcia_disable_device(link); 349 pcmcia_disable_device(link);
354} 350}
355 351
356 352
357static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name) 353#ifdef CONFIG_MTD_DEBUG
354static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
355 tuple_t *tuple,
356 void *priv_data)
358{ 357{
359 int rc;
360 tuple_t tuple;
361 cisparse_t parse; 358 cisparse_t parse;
362 u_char buf[64];
363
364 tuple.Attributes = 0;
365 tuple.TupleData = (cisdata_t *)buf;
366 tuple.TupleDataMax = sizeof(buf);
367 tuple.TupleOffset = 0;
368 tuple.DesiredTuple = RETURN_FIRST_TUPLE;
369
370 rc = pcmcia_get_first_tuple(link, &tuple);
371 while (rc == 0) {
372 rc = pcmcia_get_tuple_data(link, &tuple);
373 if (rc != 0) {
374 cs_error(link, GetTupleData, rc);
375 break;
376 }
377 rc = pcmcia_parse_tuple(&tuple, &parse);
378 if (rc != 0) {
379 cs_error(link, ParseTuple, rc);
380 break;
381 }
382 359
383 switch(tuple.TupleCode) { 360 if (!pcmcia_parse_tuple(tuple, &parse)) {
384 case CISTPL_FORMAT: { 361 cistpl_format_t *t = &parse.format;
385 cistpl_format_t *t = &parse.format; 362 (void)t; /* Shut up, gcc */
386 (void)t; /* Shut up, gcc */ 363 DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u",
387 DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u", 364 t->type, t->edc, t->offset, t->length);
388 t->type, t->edc, t->offset, t->length); 365 }
389 break; 366 return -ENOSPC;
367}
390 368
391 } 369static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
370 tuple_t *tuple,
371 void *priv_data)
372{
373 cisparse_t parse;
374 int i;
392 375
393 case CISTPL_DEVICE: { 376 if (!pcmcia_parse_tuple(tuple, &parse)) {
394 cistpl_device_t *t = &parse.device; 377 cistpl_jedec_t *t = &parse.jedec;
395 int i; 378 for (i = 0; i < t->nid; i++)
396 DEBUG(2, "Common memory:"); 379 DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info);
397 dev->pcmcia_map.size = t->dev[0].size; 380 }
398 for(i = 0; i < t->ndev; i++) { 381 return -ENOSPC;
399 DEBUG(2, "Region %d, type = %u", i, t->dev[i].type); 382}
400 DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp); 383#endif
401 DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
402 DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
403 }
404 break;
405 }
406 384
407 case CISTPL_VERS_1: { 385static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
408 cistpl_vers_1_t *t = &parse.version_1; 386 tuple_t *tuple,
409 int i; 387 void *priv_data)
410 if(t->ns) { 388{
411 dev->mtd_name[0] = '\0'; 389 struct pcmciamtd_dev *dev = priv_data;
412 for(i = 0; i < t->ns; i++) { 390 cisparse_t parse;
413 if(i) 391 cistpl_device_t *t = &parse.device;
414 strcat(dev->mtd_name, " "); 392 int i;
415 strcat(dev->mtd_name, t->str+t->ofs[i]);
416 }
417 }
418 DEBUG(2, "Found name: %s", dev->mtd_name);
419 break;
420 }
421 393
422 case CISTPL_JEDEC_C: { 394 if (pcmcia_parse_tuple(tuple, &parse))
423 cistpl_jedec_t *t = &parse.jedec; 395 return -EINVAL;
424 int i; 396
425 for(i = 0; i < t->nid; i++) { 397 DEBUG(2, "Common memory:");
426 DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info); 398 dev->pcmcia_map.size = t->dev[0].size;
427 } 399 /* from here on: DEBUG only */
428 break; 400 for (i = 0; i < t->ndev; i++) {
429 } 401 DEBUG(2, "Region %d, type = %u", i, t->dev[i].type);
402 DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp);
403 DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
404 DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
405 }
406 return 0;
407}
430 408
431 case CISTPL_DEVICE_GEO: { 409static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
432 cistpl_device_geo_t *t = &parse.device_geo; 410 tuple_t *tuple,
433 int i; 411 void *priv_data)
434 dev->pcmcia_map.bankwidth = t->geo[0].buswidth; 412{
435 for(i = 0; i < t->ngeo; i++) { 413 struct pcmciamtd_dev *dev = priv_data;
436 DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth); 414 cisparse_t parse;
437 DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block); 415 cistpl_device_geo_t *t = &parse.device_geo;
438 DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block); 416 int i;
439 DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
440 DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
441 DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
442 }
443 break;
444 }
445 417
446 default: 418 if (pcmcia_parse_tuple(tuple, &parse))
447 DEBUG(2, "Unknown tuple code %d", tuple.TupleCode); 419 return -EINVAL;
448 } 420
421 dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
422 /* from here on: DEBUG only */
423 for (i = 0; i < t->ngeo; i++) {
424 DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth);
425 DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block);
426 DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block);
427 DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
428 DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
429 DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
430 }
431 return 0;
432}
433
434
435static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name)
436{
437 int i;
449 438
450 rc = pcmcia_get_next_tuple(link, &tuple); 439 if (p_dev->prod_id[0]) {
440 dev->mtd_name[0] = '\0';
441 for (i = 0; i < 4; i++) {
442 if (i)
443 strcat(dev->mtd_name, " ");
444 if (p_dev->prod_id[i])
445 strcat(dev->mtd_name, p_dev->prod_id[i]);
446 }
447 DEBUG(2, "Found name: %s", dev->mtd_name);
451 } 448 }
449
450#ifdef CONFIG_MTD_DEBUG
451 pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL);
452 pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL);
453#endif
454 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev);
455 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev);
456
452 if(!dev->pcmcia_map.size) 457 if(!dev->pcmcia_map.size)
453 dev->pcmcia_map.size = MAX_PCMCIA_ADDR; 458 dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
454 459
@@ -481,16 +486,12 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link,
481 * MTD device available to the system. 486 * MTD device available to the system.
482 */ 487 */
483 488
484#define CS_CHECK(fn, ret) \
485do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
486
487static int pcmciamtd_config(struct pcmcia_device *link) 489static int pcmciamtd_config(struct pcmcia_device *link)
488{ 490{
489 struct pcmciamtd_dev *dev = link->priv; 491 struct pcmciamtd_dev *dev = link->priv;
490 struct mtd_info *mtd = NULL; 492 struct mtd_info *mtd = NULL;
491 cs_status_t status; 493 cs_status_t status;
492 win_req_t req; 494 win_req_t req;
493 int last_ret = 0, last_fn = 0;
494 int ret; 495 int ret;
495 int i; 496 int i;
496 static char *probes[] = { "jedec_probe", "cfi_probe" }; 497 static char *probes[] = { "jedec_probe", "cfi_probe" };
@@ -529,7 +530,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
529 int ret; 530 int ret;
530 DEBUG(2, "requesting window with size = %dKiB memspeed = %d", 531 DEBUG(2, "requesting window with size = %dKiB memspeed = %d",
531 req.Size >> 10, req.AccessSpeed); 532 req.Size >> 10, req.AccessSpeed);
532 ret = pcmcia_request_window(&link, &req, &link->win); 533 ret = pcmcia_request_window(link, &req, &link->win);
533 DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size); 534 DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
534 if(ret) { 535 if(ret) {
535 req.Size >>= 1; 536 req.Size >>= 1;
@@ -577,7 +578,6 @@ static int pcmciamtd_config(struct pcmcia_device *link)
577 DEBUG(2, "Setting Configuration"); 578 DEBUG(2, "Setting Configuration");
578 ret = pcmcia_request_configuration(link, &link->conf); 579 ret = pcmcia_request_configuration(link, &link->conf);
579 if (ret != 0) { 580 if (ret != 0) {
580 cs_error(link, RequestConfiguration, ret);
581 if (dev->win_base) { 581 if (dev->win_base) {
582 iounmap(dev->win_base); 582 iounmap(dev->win_base);
583 dev->win_base = NULL; 583 dev->win_base = NULL;
@@ -652,8 +652,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
652 link->dev_node = &dev->node; 652 link->dev_node = &dev->node;
653 return 0; 653 return 0;
654 654
655 cs_failed: 655 failed:
656 cs_error(link, last_fn, last_ret);
657 err("CS Error, exiting"); 656 err("CS Error, exiting");
658 pcmciamtd_release(link); 657 pcmciamtd_release(link);
659 return -ENODEV; 658 return -ENODEV;
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index b58965a2b3ae..17a27225cc98 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -118,14 +118,6 @@ INT_MODULE_PARM(full_duplex, 0);
118/* Autodetect link polarity reversal? */ 118/* Autodetect link polarity reversal? */
119INT_MODULE_PARM(auto_polarity, 1); 119INT_MODULE_PARM(auto_polarity, 1);
120 120
121#ifdef PCMCIA_DEBUG
122INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
123#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
124static char *version =
125"3c574_cs.c 1.65ac1 2003/04/07 Donald Becker/David Hinds, becker@scyld.com.\n";
126#else
127#define DEBUG(n, args...)
128#endif
129 121
130/*====================================================================*/ 122/*====================================================================*/
131 123
@@ -278,7 +270,7 @@ static int tc574_probe(struct pcmcia_device *link)
278 struct el3_private *lp; 270 struct el3_private *lp;
279 struct net_device *dev; 271 struct net_device *dev;
280 272
281 DEBUG(0, "3c574_attach()\n"); 273 dev_dbg(&link->dev, "3c574_attach()\n");
282 274
283 /* Create the PC card device object. */ 275 /* Create the PC card device object. */
284 dev = alloc_etherdev(sizeof(struct el3_private)); 276 dev = alloc_etherdev(sizeof(struct el3_private));
@@ -291,10 +283,8 @@ static int tc574_probe(struct pcmcia_device *link)
291 spin_lock_init(&lp->window_lock); 283 spin_lock_init(&lp->window_lock);
292 link->io.NumPorts1 = 32; 284 link->io.NumPorts1 = 32;
293 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 285 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
294 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 286 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
295 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
296 link->irq.Handler = &el3_interrupt; 287 link->irq.Handler = &el3_interrupt;
297 link->irq.Instance = dev;
298 link->conf.Attributes = CONF_ENABLE_IRQ; 288 link->conf.Attributes = CONF_ENABLE_IRQ;
299 link->conf.IntType = INT_MEMORY_AND_IO; 289 link->conf.IntType = INT_MEMORY_AND_IO;
300 link->conf.ConfigIndex = 1; 290 link->conf.ConfigIndex = 1;
@@ -319,7 +309,7 @@ static void tc574_detach(struct pcmcia_device *link)
319{ 309{
320 struct net_device *dev = link->priv; 310 struct net_device *dev = link->priv;
321 311
322 DEBUG(0, "3c574_detach(0x%p)\n", link); 312 dev_dbg(&link->dev, "3c574_detach()\n");
323 313
324 if (link->dev_node) 314 if (link->dev_node)
325 unregister_netdev(dev); 315 unregister_netdev(dev);
@@ -335,26 +325,23 @@ static void tc574_detach(struct pcmcia_device *link)
335 ethernet device available to the system. 325 ethernet device available to the system.
336*/ 326*/
337 327
338#define CS_CHECK(fn, ret) \
339 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
340
341static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 328static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
342 329
343static int tc574_config(struct pcmcia_device *link) 330static int tc574_config(struct pcmcia_device *link)
344{ 331{
345 struct net_device *dev = link->priv; 332 struct net_device *dev = link->priv;
346 struct el3_private *lp = netdev_priv(dev); 333 struct el3_private *lp = netdev_priv(dev);
347 tuple_t tuple; 334 int ret, i, j;
348 __le16 buf[32];
349 int last_fn, last_ret, i, j;
350 unsigned int ioaddr; 335 unsigned int ioaddr;
351 __be16 *phys_addr; 336 __be16 *phys_addr;
352 char *cardname; 337 char *cardname;
353 __u32 config; 338 __u32 config;
339 u8 *buf;
340 size_t len;
354 341
355 phys_addr = (__be16 *)dev->dev_addr; 342 phys_addr = (__be16 *)dev->dev_addr;
356 343
357 DEBUG(0, "3c574_config(0x%p)\n", link); 344 dev_dbg(&link->dev, "3c574_config()\n");
358 345
359 link->io.IOAddrLines = 16; 346 link->io.IOAddrLines = 16;
360 for (i = j = 0; j < 0x400; j += 0x20) { 347 for (i = j = 0; j < 0x400; j += 0x20) {
@@ -363,12 +350,16 @@ static int tc574_config(struct pcmcia_device *link)
363 if (i == 0) 350 if (i == 0)
364 break; 351 break;
365 } 352 }
366 if (i != 0) { 353 if (i != 0)
367 cs_error(link, RequestIO, i); 354 goto failed;
355
356 ret = pcmcia_request_irq(link, &link->irq);
357 if (ret)
358 goto failed;
359
360 ret = pcmcia_request_configuration(link, &link->conf);
361 if (ret)
368 goto failed; 362 goto failed;
369 }
370 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
371 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
372 363
373 dev->irq = link->irq.AssignedIRQ; 364 dev->irq = link->irq.AssignedIRQ;
374 dev->base_addr = link->io.BasePort1; 365 dev->base_addr = link->io.BasePort1;
@@ -378,16 +369,14 @@ static int tc574_config(struct pcmcia_device *link)
378 /* The 3c574 normally uses an EEPROM for configuration info, including 369 /* The 3c574 normally uses an EEPROM for configuration info, including
379 the hardware address. The future products may include a modem chip 370 the hardware address. The future products may include a modem chip
380 and put the address in the CIS. */ 371 and put the address in the CIS. */
381 tuple.Attributes = 0; 372
382 tuple.TupleData = (cisdata_t *)buf; 373 len = pcmcia_get_tuple(link, 0x88, &buf);
383 tuple.TupleDataMax = 64; 374 if (buf && len >= 6) {
384 tuple.TupleOffset = 0;
385 tuple.DesiredTuple = 0x88;
386 if (pcmcia_get_first_tuple(link, &tuple) == 0) {
387 pcmcia_get_tuple_data(link, &tuple);
388 for (i = 0; i < 3; i++) 375 for (i = 0; i < 3; i++)
389 phys_addr[i] = htons(le16_to_cpu(buf[i])); 376 phys_addr[i] = htons(le16_to_cpu(buf[i * 2]));
377 kfree(buf);
390 } else { 378 } else {
379 kfree(buf); /* 0 < len < 6 */
391 EL3WINDOW(0); 380 EL3WINDOW(0);
392 for (i = 0; i < 3; i++) 381 for (i = 0; i < 3; i++)
393 phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); 382 phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
@@ -435,7 +424,8 @@ static int tc574_config(struct pcmcia_device *link)
435 mii_status = mdio_read(ioaddr, phy & 0x1f, 1); 424 mii_status = mdio_read(ioaddr, phy & 0x1f, 1);
436 if (mii_status != 0xffff) { 425 if (mii_status != 0xffff) {
437 lp->phys = phy & 0x1f; 426 lp->phys = phy & 0x1f;
438 DEBUG(0, " MII transceiver at index %d, status %x.\n", 427 dev_dbg(&link->dev, " MII transceiver at "
428 "index %d, status %x.\n",
439 phy, mii_status); 429 phy, mii_status);
440 if ((mii_status & 0x0040) == 0) 430 if ((mii_status & 0x0040) == 0)
441 mii_preamble_required = 1; 431 mii_preamble_required = 1;
@@ -457,7 +447,7 @@ static int tc574_config(struct pcmcia_device *link)
457 } 447 }
458 448
459 link->dev_node = &lp->node; 449 link->dev_node = &lp->node;
460 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 450 SET_NETDEV_DEV(dev, &link->dev);
461 451
462 if (register_netdev(dev) != 0) { 452 if (register_netdev(dev) != 0) {
463 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n"); 453 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
@@ -478,8 +468,6 @@ static int tc574_config(struct pcmcia_device *link)
478 468
479 return 0; 469 return 0;
480 470
481cs_failed:
482 cs_error(link, last_fn, last_ret);
483failed: 471failed:
484 tc574_release(link); 472 tc574_release(link);
485 return -ENODEV; 473 return -ENODEV;
@@ -738,7 +726,7 @@ static int el3_open(struct net_device *dev)
738 lp->media.expires = jiffies + HZ; 726 lp->media.expires = jiffies + HZ;
739 add_timer(&lp->media); 727 add_timer(&lp->media);
740 728
741 DEBUG(2, "%s: opened, status %4.4x.\n", 729 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
742 dev->name, inw(dev->base_addr + EL3_STATUS)); 730 dev->name, inw(dev->base_addr + EL3_STATUS));
743 731
744 return 0; 732 return 0;
@@ -772,7 +760,7 @@ static void pop_tx_status(struct net_device *dev)
772 if (tx_status & 0x30) 760 if (tx_status & 0x30)
773 tc574_wait_for_completion(dev, TxReset); 761 tc574_wait_for_completion(dev, TxReset);
774 if (tx_status & 0x38) { 762 if (tx_status & 0x38) {
775 DEBUG(1, "%s: transmit error: status 0x%02x\n", 763 pr_debug("%s: transmit error: status 0x%02x\n",
776 dev->name, tx_status); 764 dev->name, tx_status);
777 outw(TxEnable, ioaddr + EL3_CMD); 765 outw(TxEnable, ioaddr + EL3_CMD);
778 dev->stats.tx_aborted_errors++; 766 dev->stats.tx_aborted_errors++;
@@ -788,7 +776,7 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
788 struct el3_private *lp = netdev_priv(dev); 776 struct el3_private *lp = netdev_priv(dev);
789 unsigned long flags; 777 unsigned long flags;
790 778
791 DEBUG(3, "%s: el3_start_xmit(length = %ld) called, " 779 pr_debug("%s: el3_start_xmit(length = %ld) called, "
792 "status %4.4x.\n", dev->name, (long)skb->len, 780 "status %4.4x.\n", dev->name, (long)skb->len,
793 inw(ioaddr + EL3_STATUS)); 781 inw(ioaddr + EL3_STATUS));
794 782
@@ -827,7 +815,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
827 return IRQ_NONE; 815 return IRQ_NONE;
828 ioaddr = dev->base_addr; 816 ioaddr = dev->base_addr;
829 817
830 DEBUG(3, "%s: interrupt, status %4.4x.\n", 818 pr_debug("%s: interrupt, status %4.4x.\n",
831 dev->name, inw(ioaddr + EL3_STATUS)); 819 dev->name, inw(ioaddr + EL3_STATUS));
832 820
833 spin_lock(&lp->window_lock); 821 spin_lock(&lp->window_lock);
@@ -836,7 +824,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
836 (IntLatch | RxComplete | RxEarly | StatsFull)) { 824 (IntLatch | RxComplete | RxEarly | StatsFull)) {
837 if (!netif_device_present(dev) || 825 if (!netif_device_present(dev) ||
838 ((status & 0xe000) != 0x2000)) { 826 ((status & 0xe000) != 0x2000)) {
839 DEBUG(1, "%s: Interrupt from dead card\n", dev->name); 827 pr_debug("%s: Interrupt from dead card\n", dev->name);
840 break; 828 break;
841 } 829 }
842 830
@@ -846,7 +834,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
846 work_budget = el3_rx(dev, work_budget); 834 work_budget = el3_rx(dev, work_budget);
847 835
848 if (status & TxAvailable) { 836 if (status & TxAvailable) {
849 DEBUG(3, " TX room bit was handled.\n"); 837 pr_debug(" TX room bit was handled.\n");
850 /* There's room in the FIFO for a full-sized packet. */ 838 /* There's room in the FIFO for a full-sized packet. */
851 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); 839 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
852 netif_wake_queue(dev); 840 netif_wake_queue(dev);
@@ -886,7 +874,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
886 } 874 }
887 875
888 if (--work_budget < 0) { 876 if (--work_budget < 0) {
889 DEBUG(0, "%s: Too much work in interrupt, " 877 pr_debug("%s: Too much work in interrupt, "
890 "status %4.4x.\n", dev->name, status); 878 "status %4.4x.\n", dev->name, status);
891 /* Clear all interrupts */ 879 /* Clear all interrupts */
892 outw(AckIntr | 0xFF, ioaddr + EL3_CMD); 880 outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
@@ -896,7 +884,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
896 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); 884 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
897 } 885 }
898 886
899 DEBUG(3, "%s: exiting interrupt, status %4.4x.\n", 887 pr_debug("%s: exiting interrupt, status %4.4x.\n",
900 dev->name, inw(ioaddr + EL3_STATUS)); 888 dev->name, inw(ioaddr + EL3_STATUS));
901 889
902 spin_unlock(&lp->window_lock); 890 spin_unlock(&lp->window_lock);
@@ -1003,7 +991,7 @@ static void update_stats(struct net_device *dev)
1003 unsigned int ioaddr = dev->base_addr; 991 unsigned int ioaddr = dev->base_addr;
1004 u8 rx, tx, up; 992 u8 rx, tx, up;
1005 993
1006 DEBUG(2, "%s: updating the statistics.\n", dev->name); 994 pr_debug("%s: updating the statistics.\n", dev->name);
1007 995
1008 if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */ 996 if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */
1009 return; 997 return;
@@ -1039,7 +1027,7 @@ static int el3_rx(struct net_device *dev, int worklimit)
1039 unsigned int ioaddr = dev->base_addr; 1027 unsigned int ioaddr = dev->base_addr;
1040 short rx_status; 1028 short rx_status;
1041 1029
1042 DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", 1030 pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
1043 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); 1031 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
1044 while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) && 1032 while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) &&
1045 worklimit > 0) { 1033 worklimit > 0) {
@@ -1061,7 +1049,7 @@ static int el3_rx(struct net_device *dev, int worklimit)
1061 1049
1062 skb = dev_alloc_skb(pkt_len+5); 1050 skb = dev_alloc_skb(pkt_len+5);
1063 1051
1064 DEBUG(3, " Receiving packet size %d status %4.4x.\n", 1052 pr_debug(" Receiving packet size %d status %4.4x.\n",
1065 pkt_len, rx_status); 1053 pkt_len, rx_status);
1066 if (skb != NULL) { 1054 if (skb != NULL) {
1067 skb_reserve(skb, 2); 1055 skb_reserve(skb, 2);
@@ -1072,7 +1060,7 @@ static int el3_rx(struct net_device *dev, int worklimit)
1072 dev->stats.rx_packets++; 1060 dev->stats.rx_packets++;
1073 dev->stats.rx_bytes += pkt_len; 1061 dev->stats.rx_bytes += pkt_len;
1074 } else { 1062 } else {
1075 DEBUG(1, "%s: couldn't allocate a sk_buff of" 1063 pr_debug("%s: couldn't allocate a sk_buff of"
1076 " size %d.\n", dev->name, pkt_len); 1064 " size %d.\n", dev->name, pkt_len);
1077 dev->stats.rx_dropped++; 1065 dev->stats.rx_dropped++;
1078 } 1066 }
@@ -1101,7 +1089,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1101 struct mii_ioctl_data *data = if_mii(rq); 1089 struct mii_ioctl_data *data = if_mii(rq);
1102 int phy = lp->phys & 0x1f; 1090 int phy = lp->phys & 0x1f;
1103 1091
1104 DEBUG(2, "%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n", 1092 pr_debug("%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n",
1105 dev->name, rq->ifr_ifrn.ifrn_name, cmd, 1093 dev->name, rq->ifr_ifrn.ifrn_name, cmd,
1106 data->phy_id, data->reg_num, data->val_in, data->val_out); 1094 data->phy_id, data->reg_num, data->val_in, data->val_out);
1107 1095
@@ -1178,7 +1166,7 @@ static int el3_close(struct net_device *dev)
1178 struct el3_private *lp = netdev_priv(dev); 1166 struct el3_private *lp = netdev_priv(dev);
1179 struct pcmcia_device *link = lp->p_dev; 1167 struct pcmcia_device *link = lp->p_dev;
1180 1168
1181 DEBUG(2, "%s: shutting down ethercard.\n", dev->name); 1169 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
1182 1170
1183 if (pcmcia_dev_present(link)) { 1171 if (pcmcia_dev_present(link)) {
1184 unsigned long flags; 1172 unsigned long flags;
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 569fb06793cf..6f8d7e2e5922 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -130,14 +130,6 @@ MODULE_LICENSE("GPL");
130/* Special hook for setting if_port when module is loaded */ 130/* Special hook for setting if_port when module is loaded */
131INT_MODULE_PARM(if_port, 0); 131INT_MODULE_PARM(if_port, 0);
132 132
133#ifdef PCMCIA_DEBUG
134INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
135#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
136static char *version =
137DRV_NAME ".c " DRV_VERSION " 2001/10/13 00:08:50 (David Hinds)";
138#else
139#define DEBUG(n, args...)
140#endif
141 133
142/*====================================================================*/ 134/*====================================================================*/
143 135
@@ -189,7 +181,7 @@ static int tc589_probe(struct pcmcia_device *link)
189 struct el3_private *lp; 181 struct el3_private *lp;
190 struct net_device *dev; 182 struct net_device *dev;
191 183
192 DEBUG(0, "3c589_attach()\n"); 184 dev_dbg(&link->dev, "3c589_attach()\n");
193 185
194 /* Create new ethernet device */ 186 /* Create new ethernet device */
195 dev = alloc_etherdev(sizeof(struct el3_private)); 187 dev = alloc_etherdev(sizeof(struct el3_private));
@@ -202,10 +194,8 @@ static int tc589_probe(struct pcmcia_device *link)
202 spin_lock_init(&lp->lock); 194 spin_lock_init(&lp->lock);
203 link->io.NumPorts1 = 16; 195 link->io.NumPorts1 = 16;
204 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 196 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
205 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 197 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
206 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
207 link->irq.Handler = &el3_interrupt; 198 link->irq.Handler = &el3_interrupt;
208 link->irq.Instance = dev;
209 link->conf.Attributes = CONF_ENABLE_IRQ; 199 link->conf.Attributes = CONF_ENABLE_IRQ;
210 link->conf.IntType = INT_MEMORY_AND_IO; 200 link->conf.IntType = INT_MEMORY_AND_IO;
211 link->conf.ConfigIndex = 1; 201 link->conf.ConfigIndex = 1;
@@ -231,7 +221,7 @@ static void tc589_detach(struct pcmcia_device *link)
231{ 221{
232 struct net_device *dev = link->priv; 222 struct net_device *dev = link->priv;
233 223
234 DEBUG(0, "3c589_detach(0x%p)\n", link); 224 dev_dbg(&link->dev, "3c589_detach\n");
235 225
236 if (link->dev_node) 226 if (link->dev_node)
237 unregister_netdev(dev); 227 unregister_netdev(dev);
@@ -249,29 +239,20 @@ static void tc589_detach(struct pcmcia_device *link)
249 239
250======================================================================*/ 240======================================================================*/
251 241
252#define CS_CHECK(fn, ret) \
253do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
254
255static int tc589_config(struct pcmcia_device *link) 242static int tc589_config(struct pcmcia_device *link)
256{ 243{
257 struct net_device *dev = link->priv; 244 struct net_device *dev = link->priv;
258 struct el3_private *lp = netdev_priv(dev); 245 struct el3_private *lp = netdev_priv(dev);
259 tuple_t tuple;
260 __le16 buf[32];
261 __be16 *phys_addr; 246 __be16 *phys_addr;
262 int last_fn, last_ret, i, j, multi = 0, fifo; 247 int ret, i, j, multi = 0, fifo;
263 unsigned int ioaddr; 248 unsigned int ioaddr;
264 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 249 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
250 u8 *buf;
251 size_t len;
265 252
266 DEBUG(0, "3c589_config(0x%p)\n", link); 253 dev_dbg(&link->dev, "3c589_config\n");
267 254
268 phys_addr = (__be16 *)dev->dev_addr; 255 phys_addr = (__be16 *)dev->dev_addr;
269 tuple.Attributes = 0;
270 tuple.TupleData = (cisdata_t *)buf;
271 tuple.TupleDataMax = sizeof(buf);
272 tuple.TupleOffset = 0;
273 tuple.Attributes = TUPLE_RETURN_COMMON;
274
275 /* Is this a 3c562? */ 256 /* Is this a 3c562? */
276 if (link->manf_id != MANFID_3COM) 257 if (link->manf_id != MANFID_3COM)
277 printk(KERN_INFO "3c589_cs: hmmm, is this really a " 258 printk(KERN_INFO "3c589_cs: hmmm, is this really a "
@@ -287,12 +268,16 @@ static int tc589_config(struct pcmcia_device *link)
287 if (i == 0) 268 if (i == 0)
288 break; 269 break;
289 } 270 }
290 if (i != 0) { 271 if (i != 0)
291 cs_error(link, RequestIO, i);
292 goto failed; 272 goto failed;
293 } 273
294 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 274 ret = pcmcia_request_irq(link, &link->irq);
295 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 275 if (ret)
276 goto failed;
277
278 ret = pcmcia_request_configuration(link, &link->conf);
279 if (ret)
280 goto failed;
296 281
297 dev->irq = link->irq.AssignedIRQ; 282 dev->irq = link->irq.AssignedIRQ;
298 dev->base_addr = link->io.BasePort1; 283 dev->base_addr = link->io.BasePort1;
@@ -301,12 +286,13 @@ static int tc589_config(struct pcmcia_device *link)
301 286
302 /* The 3c589 has an extra EEPROM for configuration info, including 287 /* The 3c589 has an extra EEPROM for configuration info, including
303 the hardware address. The 3c562 puts the address in the CIS. */ 288 the hardware address. The 3c562 puts the address in the CIS. */
304 tuple.DesiredTuple = 0x88; 289 len = pcmcia_get_tuple(link, 0x88, &buf);
305 if (pcmcia_get_first_tuple(link, &tuple) == 0) { 290 if (buf && len >= 6) {
306 pcmcia_get_tuple_data(link, &tuple); 291 for (i = 0; i < 3; i++)
307 for (i = 0; i < 3; i++) 292 phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
308 phys_addr[i] = htons(le16_to_cpu(buf[i])); 293 kfree(buf);
309 } else { 294 } else {
295 kfree(buf); /* 0 < len < 6 */
310 for (i = 0; i < 3; i++) 296 for (i = 0; i < 3; i++)
311 phys_addr[i] = htons(read_eeprom(ioaddr, i)); 297 phys_addr[i] = htons(read_eeprom(ioaddr, i));
312 if (phys_addr[0] == htons(0x6060)) { 298 if (phys_addr[0] == htons(0x6060)) {
@@ -328,7 +314,7 @@ static int tc589_config(struct pcmcia_device *link)
328 printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); 314 printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
329 315
330 link->dev_node = &lp->node; 316 link->dev_node = &lp->node;
331 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 317 SET_NETDEV_DEV(dev, &link->dev);
332 318
333 if (register_netdev(dev) != 0) { 319 if (register_netdev(dev) != 0) {
334 printk(KERN_ERR "3c589_cs: register_netdev() failed\n"); 320 printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
@@ -347,8 +333,6 @@ static int tc589_config(struct pcmcia_device *link)
347 if_names[dev->if_port]); 333 if_names[dev->if_port]);
348 return 0; 334 return 0;
349 335
350cs_failed:
351 cs_error(link, last_fn, last_ret);
352failed: 336failed:
353 tc589_release(link); 337 tc589_release(link);
354 return -ENODEV; 338 return -ENODEV;
@@ -511,24 +495,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
511 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); 495 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
512} 496}
513 497
514#ifdef PCMCIA_DEBUG
515static u32 netdev_get_msglevel(struct net_device *dev)
516{
517 return pc_debug;
518}
519
520static void netdev_set_msglevel(struct net_device *dev, u32 level)
521{
522 pc_debug = level;
523}
524#endif /* PCMCIA_DEBUG */
525
526static const struct ethtool_ops netdev_ethtool_ops = { 498static const struct ethtool_ops netdev_ethtool_ops = {
527 .get_drvinfo = netdev_get_drvinfo, 499 .get_drvinfo = netdev_get_drvinfo,
528#ifdef PCMCIA_DEBUG
529 .get_msglevel = netdev_get_msglevel,
530 .set_msglevel = netdev_set_msglevel,
531#endif /* PCMCIA_DEBUG */
532}; 500};
533 501
534static int el3_config(struct net_device *dev, struct ifmap *map) 502static int el3_config(struct net_device *dev, struct ifmap *map)
@@ -563,7 +531,7 @@ static int el3_open(struct net_device *dev)
563 lp->media.expires = jiffies + HZ; 531 lp->media.expires = jiffies + HZ;
564 add_timer(&lp->media); 532 add_timer(&lp->media);
565 533
566 DEBUG(1, "%s: opened, status %4.4x.\n", 534 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
567 dev->name, inw(dev->base_addr + EL3_STATUS)); 535 dev->name, inw(dev->base_addr + EL3_STATUS));
568 536
569 return 0; 537 return 0;
@@ -596,7 +564,7 @@ static void pop_tx_status(struct net_device *dev)
596 if (tx_status & 0x30) 564 if (tx_status & 0x30)
597 tc589_wait_for_completion(dev, TxReset); 565 tc589_wait_for_completion(dev, TxReset);
598 if (tx_status & 0x38) { 566 if (tx_status & 0x38) {
599 DEBUG(1, "%s: transmit error: status 0x%02x\n", 567 pr_debug("%s: transmit error: status 0x%02x\n",
600 dev->name, tx_status); 568 dev->name, tx_status);
601 outw(TxEnable, ioaddr + EL3_CMD); 569 outw(TxEnable, ioaddr + EL3_CMD);
602 dev->stats.tx_aborted_errors++; 570 dev->stats.tx_aborted_errors++;
@@ -612,7 +580,7 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
612 struct el3_private *priv = netdev_priv(dev); 580 struct el3_private *priv = netdev_priv(dev);
613 unsigned long flags; 581 unsigned long flags;
614 582
615 DEBUG(3, "%s: el3_start_xmit(length = %ld) called, " 583 pr_debug("%s: el3_start_xmit(length = %ld) called, "
616 "status %4.4x.\n", dev->name, (long)skb->len, 584 "status %4.4x.\n", dev->name, (long)skb->len,
617 inw(ioaddr + EL3_STATUS)); 585 inw(ioaddr + EL3_STATUS));
618 586
@@ -654,14 +622,14 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
654 622
655 ioaddr = dev->base_addr; 623 ioaddr = dev->base_addr;
656 624
657 DEBUG(3, "%s: interrupt, status %4.4x.\n", 625 pr_debug("%s: interrupt, status %4.4x.\n",
658 dev->name, inw(ioaddr + EL3_STATUS)); 626 dev->name, inw(ioaddr + EL3_STATUS));
659 627
660 spin_lock(&lp->lock); 628 spin_lock(&lp->lock);
661 while ((status = inw(ioaddr + EL3_STATUS)) & 629 while ((status = inw(ioaddr + EL3_STATUS)) &
662 (IntLatch | RxComplete | StatsFull)) { 630 (IntLatch | RxComplete | StatsFull)) {
663 if ((status & 0xe000) != 0x2000) { 631 if ((status & 0xe000) != 0x2000) {
664 DEBUG(1, "%s: interrupt from dead card\n", dev->name); 632 pr_debug("%s: interrupt from dead card\n", dev->name);
665 handled = 0; 633 handled = 0;
666 break; 634 break;
667 } 635 }
@@ -670,7 +638,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
670 el3_rx(dev); 638 el3_rx(dev);
671 639
672 if (status & TxAvailable) { 640 if (status & TxAvailable) {
673 DEBUG(3, " TX room bit was handled.\n"); 641 pr_debug(" TX room bit was handled.\n");
674 /* There's room in the FIFO for a full-sized packet. */ 642 /* There's room in the FIFO for a full-sized packet. */
675 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); 643 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
676 netif_wake_queue(dev); 644 netif_wake_queue(dev);
@@ -722,7 +690,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
722 690
723 lp->last_irq = jiffies; 691 lp->last_irq = jiffies;
724 spin_unlock(&lp->lock); 692 spin_unlock(&lp->lock);
725 DEBUG(3, "%s: exiting interrupt, status %4.4x.\n", 693 pr_debug("%s: exiting interrupt, status %4.4x.\n",
726 dev->name, inw(ioaddr + EL3_STATUS)); 694 dev->name, inw(ioaddr + EL3_STATUS));
727 return IRQ_RETVAL(handled); 695 return IRQ_RETVAL(handled);
728} 696}
@@ -833,7 +801,7 @@ static void update_stats(struct net_device *dev)
833{ 801{
834 unsigned int ioaddr = dev->base_addr; 802 unsigned int ioaddr = dev->base_addr;
835 803
836 DEBUG(2, "%s: updating the statistics.\n", dev->name); 804 pr_debug("%s: updating the statistics.\n", dev->name);
837 /* Turn off statistics updates while reading. */ 805 /* Turn off statistics updates while reading. */
838 outw(StatsDisable, ioaddr + EL3_CMD); 806 outw(StatsDisable, ioaddr + EL3_CMD);
839 /* Switch to the stats window, and read everything. */ 807 /* Switch to the stats window, and read everything. */
@@ -861,7 +829,7 @@ static int el3_rx(struct net_device *dev)
861 int worklimit = 32; 829 int worklimit = 32;
862 short rx_status; 830 short rx_status;
863 831
864 DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", 832 pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
865 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); 833 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
866 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && 834 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
867 worklimit > 0) { 835 worklimit > 0) {
@@ -883,7 +851,7 @@ static int el3_rx(struct net_device *dev)
883 851
884 skb = dev_alloc_skb(pkt_len+5); 852 skb = dev_alloc_skb(pkt_len+5);
885 853
886 DEBUG(3, " Receiving packet size %d status %4.4x.\n", 854 pr_debug(" Receiving packet size %d status %4.4x.\n",
887 pkt_len, rx_status); 855 pkt_len, rx_status);
888 if (skb != NULL) { 856 if (skb != NULL) {
889 skb_reserve(skb, 2); 857 skb_reserve(skb, 2);
@@ -894,7 +862,7 @@ static int el3_rx(struct net_device *dev)
894 dev->stats.rx_packets++; 862 dev->stats.rx_packets++;
895 dev->stats.rx_bytes += pkt_len; 863 dev->stats.rx_bytes += pkt_len;
896 } else { 864 } else {
897 DEBUG(1, "%s: couldn't allocate a sk_buff of" 865 pr_debug("%s: couldn't allocate a sk_buff of"
898 " size %d.\n", dev->name, pkt_len); 866 " size %d.\n", dev->name, pkt_len);
899 dev->stats.rx_dropped++; 867 dev->stats.rx_dropped++;
900 } 868 }
@@ -935,7 +903,7 @@ static int el3_close(struct net_device *dev)
935 struct pcmcia_device *link = lp->p_dev; 903 struct pcmcia_device *link = lp->p_dev;
936 unsigned int ioaddr = dev->base_addr; 904 unsigned int ioaddr = dev->base_addr;
937 905
938 DEBUG(1, "%s: shutting down ethercard.\n", dev->name); 906 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
939 907
940 if (pcmcia_dev_present(link)) { 908 if (pcmcia_dev_present(link)) {
941 /* Turn off statistics ASAP. We update dev->stats below. */ 909 /* Turn off statistics ASAP. We update dev->stats below. */
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index ca711f46814e..81bafd578478 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -75,16 +75,6 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
75MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver"); 75MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver");
76MODULE_LICENSE("GPL"); 76MODULE_LICENSE("GPL");
77 77
78#ifdef PCMCIA_DEBUG
79#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
80
81INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
82#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
83static char *version =
84"axnet_cs.c 1.28 2002/06/29 06:27:37 (David Hinds)";
85#else
86#define DEBUG(n, args...)
87#endif
88 78
89/*====================================================================*/ 79/*====================================================================*/
90 80
@@ -167,7 +157,7 @@ static int axnet_probe(struct pcmcia_device *link)
167 struct net_device *dev; 157 struct net_device *dev;
168 struct ei_device *ei_local; 158 struct ei_device *ei_local;
169 159
170 DEBUG(0, "axnet_attach()\n"); 160 dev_dbg(&link->dev, "axnet_attach()\n");
171 161
172 dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(axnet_dev_t)); 162 dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(axnet_dev_t));
173 if (!dev) 163 if (!dev)
@@ -180,7 +170,6 @@ static int axnet_probe(struct pcmcia_device *link)
180 info->p_dev = link; 170 info->p_dev = link;
181 link->priv = dev; 171 link->priv = dev;
182 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 172 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
183 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
184 link->conf.Attributes = CONF_ENABLE_IRQ; 173 link->conf.Attributes = CONF_ENABLE_IRQ;
185 link->conf.IntType = INT_MEMORY_AND_IO; 174 link->conf.IntType = INT_MEMORY_AND_IO;
186 175
@@ -205,7 +194,7 @@ static void axnet_detach(struct pcmcia_device *link)
205{ 194{
206 struct net_device *dev = link->priv; 195 struct net_device *dev = link->priv;
207 196
208 DEBUG(0, "axnet_detach(0x%p)\n", link); 197 dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link);
209 198
210 if (link->dev_node) 199 if (link->dev_node)
211 unregister_netdev(dev); 200 unregister_netdev(dev);
@@ -272,9 +261,6 @@ static int get_prom(struct pcmcia_device *link)
272 261
273======================================================================*/ 262======================================================================*/
274 263
275#define CS_CHECK(fn, ret) \
276do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
277
278static int try_io_port(struct pcmcia_device *link) 264static int try_io_port(struct pcmcia_device *link)
279{ 265{
280 int j, ret; 266 int j, ret;
@@ -341,26 +327,29 @@ static int axnet_config(struct pcmcia_device *link)
341{ 327{
342 struct net_device *dev = link->priv; 328 struct net_device *dev = link->priv;
343 axnet_dev_t *info = PRIV(dev); 329 axnet_dev_t *info = PRIV(dev);
344 int i, j, j2, last_ret, last_fn; 330 int i, j, j2, ret;
345 331
346 DEBUG(0, "axnet_config(0x%p)\n", link); 332 dev_dbg(&link->dev, "axnet_config(0x%p)\n", link);
347 333
348 /* don't trust the CIS on this; Linksys got it wrong */ 334 /* don't trust the CIS on this; Linksys got it wrong */
349 link->conf.Present = 0x63; 335 link->conf.Present = 0x63;
350 last_ret = pcmcia_loop_config(link, axnet_configcheck, NULL); 336 ret = pcmcia_loop_config(link, axnet_configcheck, NULL);
351 if (last_ret != 0) { 337 if (ret != 0)
352 cs_error(link, RequestIO, last_ret);
353 goto failed; 338 goto failed;
354 }
355 339
356 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 340 ret = pcmcia_request_irq(link, &link->irq);
341 if (ret)
342 goto failed;
357 343
358 if (link->io.NumPorts2 == 8) { 344 if (link->io.NumPorts2 == 8) {
359 link->conf.Attributes |= CONF_ENABLE_SPKR; 345 link->conf.Attributes |= CONF_ENABLE_SPKR;
360 link->conf.Status = CCSR_AUDIO_ENA; 346 link->conf.Status = CCSR_AUDIO_ENA;
361 } 347 }
362 348
363 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 349 ret = pcmcia_request_configuration(link, &link->conf);
350 if (ret)
351 goto failed;
352
364 dev->irq = link->irq.AssignedIRQ; 353 dev->irq = link->irq.AssignedIRQ;
365 dev->base_addr = link->io.BasePort1; 354 dev->base_addr = link->io.BasePort1;
366 355
@@ -410,7 +399,7 @@ static int axnet_config(struct pcmcia_device *link)
410 399
411 info->phy_id = (i < 32) ? i : -1; 400 info->phy_id = (i < 32) ? i : -1;
412 link->dev_node = &info->node; 401 link->dev_node = &info->node;
413 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 402 SET_NETDEV_DEV(dev, &link->dev);
414 403
415 if (register_netdev(dev) != 0) { 404 if (register_netdev(dev) != 0) {
416 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n"); 405 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
@@ -426,14 +415,12 @@ static int axnet_config(struct pcmcia_device *link)
426 dev->base_addr, dev->irq, 415 dev->base_addr, dev->irq,
427 dev->dev_addr); 416 dev->dev_addr);
428 if (info->phy_id != -1) { 417 if (info->phy_id != -1) {
429 DEBUG(0, " MII transceiver at index %d, status %x.\n", info->phy_id, j); 418 dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n", info->phy_id, j);
430 } else { 419 } else {
431 printk(KERN_NOTICE " No MII transceivers found!\n"); 420 printk(KERN_NOTICE " No MII transceivers found!\n");
432 } 421 }
433 return 0; 422 return 0;
434 423
435cs_failed:
436 cs_error(link, last_fn, last_ret);
437failed: 424failed:
438 axnet_release(link); 425 axnet_release(link);
439 return -ENODEV; 426 return -ENODEV;
@@ -543,7 +530,7 @@ static int axnet_open(struct net_device *dev)
543 struct pcmcia_device *link = info->p_dev; 530 struct pcmcia_device *link = info->p_dev;
544 unsigned int nic_base = dev->base_addr; 531 unsigned int nic_base = dev->base_addr;
545 532
546 DEBUG(2, "axnet_open('%s')\n", dev->name); 533 dev_dbg(&link->dev, "axnet_open('%s')\n", dev->name);
547 534
548 if (!pcmcia_dev_present(link)) 535 if (!pcmcia_dev_present(link))
549 return -ENODEV; 536 return -ENODEV;
@@ -572,7 +559,7 @@ static int axnet_close(struct net_device *dev)
572 axnet_dev_t *info = PRIV(dev); 559 axnet_dev_t *info = PRIV(dev);
573 struct pcmcia_device *link = info->p_dev; 560 struct pcmcia_device *link = info->p_dev;
574 561
575 DEBUG(2, "axnet_close('%s')\n", dev->name); 562 dev_dbg(&link->dev, "axnet_close('%s')\n", dev->name);
576 563
577 ax_close(dev); 564 ax_close(dev);
578 free_irq(dev->irq, dev); 565 free_irq(dev->irq, dev);
@@ -741,10 +728,8 @@ static void block_input(struct net_device *dev, int count,
741 int xfer_count = count; 728 int xfer_count = count;
742 char *buf = skb->data; 729 char *buf = skb->data;
743 730
744#ifdef PCMCIA_DEBUG
745 if ((ei_debug > 4) && (count != 4)) 731 if ((ei_debug > 4) && (count != 4))
746 printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4); 732 pr_debug("%s: [bi=%d]\n", dev->name, count+4);
747#endif
748 outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); 733 outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
749 outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); 734 outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
750 outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); 735 outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
@@ -762,10 +747,7 @@ static void block_output(struct net_device *dev, int count,
762{ 747{
763 unsigned int nic_base = dev->base_addr; 748 unsigned int nic_base = dev->base_addr;
764 749
765#ifdef PCMCIA_DEBUG 750 pr_debug("%s: [bo=%d]\n", dev->name, count);
766 if (ei_debug > 4)
767 printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count);
768#endif
769 751
770 /* Round the count up for word writes. Do we need to do this? 752 /* Round the count up for word writes. Do we need to do this?
771 What effect will an odd byte count have on the 8390? 753 What effect will an odd byte count have on the 8390?
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 7b5c77b7bd27..21d9c9d815d1 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -53,11 +53,7 @@
53 53
54#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n" 54#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n"
55 55
56#ifdef PCMCIA_DEBUG 56#ifdef DEBUG
57
58static int pc_debug = PCMCIA_DEBUG;
59module_param(pc_debug, int, 0);
60#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
61 57
62static void regdump(struct net_device *dev) 58static void regdump(struct net_device *dev)
63{ 59{
@@ -92,7 +88,6 @@ static void regdump(struct net_device *dev)
92 88
93#else 89#else
94 90
95#define DEBUG(n, args...) do { } while (0)
96static inline void regdump(struct net_device *dev) { } 91static inline void regdump(struct net_device *dev) { }
97 92
98#endif 93#endif
@@ -144,7 +139,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
144 struct net_device *dev; 139 struct net_device *dev;
145 struct arcnet_local *lp; 140 struct arcnet_local *lp;
146 141
147 DEBUG(0, "com20020_attach()\n"); 142 dev_dbg(&p_dev->dev, "com20020_attach()\n");
148 143
149 /* Create new network device */ 144 /* Create new network device */
150 info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); 145 info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
@@ -169,11 +164,10 @@ static int com20020_probe(struct pcmcia_device *p_dev)
169 p_dev->io.NumPorts1 = 16; 164 p_dev->io.NumPorts1 = 16;
170 p_dev->io.IOAddrLines = 16; 165 p_dev->io.IOAddrLines = 16;
171 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 166 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
172 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
173 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 167 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
174 p_dev->conf.IntType = INT_MEMORY_AND_IO; 168 p_dev->conf.IntType = INT_MEMORY_AND_IO;
175 169
176 p_dev->irq.Instance = info->dev = dev; 170 info->dev = dev;
177 p_dev->priv = info; 171 p_dev->priv = info;
178 172
179 return com20020_config(p_dev); 173 return com20020_config(p_dev);
@@ -198,12 +192,12 @@ static void com20020_detach(struct pcmcia_device *link)
198 struct com20020_dev_t *info = link->priv; 192 struct com20020_dev_t *info = link->priv;
199 struct net_device *dev = info->dev; 193 struct net_device *dev = info->dev;
200 194
201 DEBUG(1,"detach...\n"); 195 dev_dbg(&link->dev, "detach...\n");
202 196
203 DEBUG(0, "com20020_detach(0x%p)\n", link); 197 dev_dbg(&link->dev, "com20020_detach\n");
204 198
205 if (link->dev_node) { 199 if (link->dev_node) {
206 DEBUG(1,"unregister...\n"); 200 dev_dbg(&link->dev, "unregister...\n");
207 201
208 unregister_netdev(dev); 202 unregister_netdev(dev);
209 203
@@ -218,16 +212,16 @@ static void com20020_detach(struct pcmcia_device *link)
218 com20020_release(link); 212 com20020_release(link);
219 213
220 /* Unlink device structure, free bits */ 214 /* Unlink device structure, free bits */
221 DEBUG(1,"unlinking...\n"); 215 dev_dbg(&link->dev, "unlinking...\n");
222 if (link->priv) 216 if (link->priv)
223 { 217 {
224 dev = info->dev; 218 dev = info->dev;
225 if (dev) 219 if (dev)
226 { 220 {
227 DEBUG(1,"kfree...\n"); 221 dev_dbg(&link->dev, "kfree...\n");
228 free_netdev(dev); 222 free_netdev(dev);
229 } 223 }
230 DEBUG(1,"kfree2...\n"); 224 dev_dbg(&link->dev, "kfree2...\n");
231 kfree(info); 225 kfree(info);
232 } 226 }
233 227
@@ -241,25 +235,22 @@ static void com20020_detach(struct pcmcia_device *link)
241 235
242======================================================================*/ 236======================================================================*/
243 237
244#define CS_CHECK(fn, ret) \
245do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
246
247static int com20020_config(struct pcmcia_device *link) 238static int com20020_config(struct pcmcia_device *link)
248{ 239{
249 struct arcnet_local *lp; 240 struct arcnet_local *lp;
250 com20020_dev_t *info; 241 com20020_dev_t *info;
251 struct net_device *dev; 242 struct net_device *dev;
252 int i, last_ret, last_fn; 243 int i, ret;
253 int ioaddr; 244 int ioaddr;
254 245
255 info = link->priv; 246 info = link->priv;
256 dev = info->dev; 247 dev = info->dev;
257 248
258 DEBUG(1,"config...\n"); 249 dev_dbg(&link->dev, "config...\n");
259 250
260 DEBUG(0, "com20020_config(0x%p)\n", link); 251 dev_dbg(&link->dev, "com20020_config\n");
261 252
262 DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1); 253 dev_dbg(&link->dev, "baseport1 is %Xh\n", link->io.BasePort1);
263 i = -ENODEV; 254 i = -ENODEV;
264 if (!link->io.BasePort1) 255 if (!link->io.BasePort1)
265 { 256 {
@@ -276,26 +267,27 @@ static int com20020_config(struct pcmcia_device *link)
276 267
277 if (i != 0) 268 if (i != 0)
278 { 269 {
279 DEBUG(1,"arcnet: requestIO failed totally!\n"); 270 dev_dbg(&link->dev, "requestIO failed totally!\n");
280 goto failed; 271 goto failed;
281 } 272 }
282 273
283 ioaddr = dev->base_addr = link->io.BasePort1; 274 ioaddr = dev->base_addr = link->io.BasePort1;
284 DEBUG(1,"arcnet: got ioaddr %Xh\n", ioaddr); 275 dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
285 276
286 DEBUG(1,"arcnet: request IRQ %d (%Xh/%Xh)\n", 277 dev_dbg(&link->dev, "request IRQ %d\n",
287 link->irq.AssignedIRQ, 278 link->irq.AssignedIRQ);
288 link->irq.IRQInfo1, link->irq.IRQInfo2);
289 i = pcmcia_request_irq(link, &link->irq); 279 i = pcmcia_request_irq(link, &link->irq);
290 if (i != 0) 280 if (i != 0)
291 { 281 {
292 DEBUG(1,"arcnet: requestIRQ failed totally!\n"); 282 dev_dbg(&link->dev, "requestIRQ failed totally!\n");
293 goto failed; 283 goto failed;
294 } 284 }
295 285
296 dev->irq = link->irq.AssignedIRQ; 286 dev->irq = link->irq.AssignedIRQ;
297 287
298 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 288 ret = pcmcia_request_configuration(link, &link->conf);
289 if (ret)
290 goto failed;
299 291
300 if (com20020_check(dev)) 292 if (com20020_check(dev))
301 { 293 {
@@ -308,26 +300,25 @@ static int com20020_config(struct pcmcia_device *link)
308 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */ 300 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
309 301
310 link->dev_node = &info->node; 302 link->dev_node = &info->node;
311 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 303 SET_NETDEV_DEV(dev, &link->dev);
312 304
313 i = com20020_found(dev, 0); /* calls register_netdev */ 305 i = com20020_found(dev, 0); /* calls register_netdev */
314 306
315 if (i != 0) { 307 if (i != 0) {
316 DEBUG(1,KERN_NOTICE "com20020_cs: com20020_found() failed\n"); 308 dev_printk(KERN_NOTICE, &link->dev,
309 "com20020_cs: com20020_found() failed\n");
317 link->dev_node = NULL; 310 link->dev_node = NULL;
318 goto failed; 311 goto failed;
319 } 312 }
320 313
321 strcpy(info->node.dev_name, dev->name); 314 strcpy(info->node.dev_name, dev->name);
322 315
323 DEBUG(1,KERN_INFO "%s: port %#3lx, irq %d\n", 316 dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n",
324 dev->name, dev->base_addr, dev->irq); 317 dev->name, dev->base_addr, dev->irq);
325 return 0; 318 return 0;
326 319
327cs_failed:
328 cs_error(link, last_fn, last_ret);
329failed: 320failed:
330 DEBUG(1,"com20020_config failed...\n"); 321 dev_dbg(&link->dev, "com20020_config failed...\n");
331 com20020_release(link); 322 com20020_release(link);
332 return -ENODEV; 323 return -ENODEV;
333} /* com20020_config */ 324} /* com20020_config */
@@ -342,7 +333,7 @@ failed:
342 333
343static void com20020_release(struct pcmcia_device *link) 334static void com20020_release(struct pcmcia_device *link)
344{ 335{
345 DEBUG(0, "com20020_release(0x%p)\n", link); 336 dev_dbg(&link->dev, "com20020_release\n");
346 pcmcia_disable_device(link); 337 pcmcia_disable_device(link);
347} 338}
348 339
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index a6961215cd56..8ad8384fc1c0 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -72,13 +72,6 @@ MODULE_LICENSE("GPL");
72/* 0:4KB*2 TX buffer else:8KB*2 TX buffer */ 72/* 0:4KB*2 TX buffer else:8KB*2 TX buffer */
73INT_MODULE_PARM(sram_config, 0); 73INT_MODULE_PARM(sram_config, 0);
74 74
75#ifdef PCMCIA_DEBUG
76INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
77#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
78static char *version = DRV_NAME ".c " DRV_VERSION " 2002/03/23";
79#else
80#define DEBUG(n, args...)
81#endif
82 75
83/*====================================================================*/ 76/*====================================================================*/
84/* 77/*
@@ -245,7 +238,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
245 local_info_t *lp; 238 local_info_t *lp;
246 struct net_device *dev; 239 struct net_device *dev;
247 240
248 DEBUG(0, "fmvj18x_attach()\n"); 241 dev_dbg(&link->dev, "fmvj18x_attach()\n");
249 242
250 /* Make up a FMVJ18x specific data structure */ 243 /* Make up a FMVJ18x specific data structure */
251 dev = alloc_etherdev(sizeof(local_info_t)); 244 dev = alloc_etherdev(sizeof(local_info_t));
@@ -262,10 +255,8 @@ static int fmvj18x_probe(struct pcmcia_device *link)
262 link->io.IOAddrLines = 5; 255 link->io.IOAddrLines = 5;
263 256
264 /* Interrupt setup */ 257 /* Interrupt setup */
265 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 258 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
266 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
267 link->irq.Handler = fjn_interrupt; 259 link->irq.Handler = fjn_interrupt;
268 link->irq.Instance = dev;
269 260
270 /* General socket configuration */ 261 /* General socket configuration */
271 link->conf.Attributes = CONF_ENABLE_IRQ; 262 link->conf.Attributes = CONF_ENABLE_IRQ;
@@ -285,7 +276,7 @@ static void fmvj18x_detach(struct pcmcia_device *link)
285{ 276{
286 struct net_device *dev = link->priv; 277 struct net_device *dev = link->priv;
287 278
288 DEBUG(0, "fmvj18x_detach(0x%p)\n", link); 279 dev_dbg(&link->dev, "fmvj18x_detach\n");
289 280
290 if (link->dev_node) 281 if (link->dev_node)
291 unregister_netdev(dev); 282 unregister_netdev(dev);
@@ -297,9 +288,6 @@ static void fmvj18x_detach(struct pcmcia_device *link)
297 288
298/*====================================================================*/ 289/*====================================================================*/
299 290
300#define CS_CHECK(fn, ret) \
301do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
302
303static int mfc_try_io_port(struct pcmcia_device *link) 291static int mfc_try_io_port(struct pcmcia_device *link)
304{ 292{
305 int i, ret; 293 int i, ret;
@@ -341,33 +329,38 @@ static int ungermann_try_io_port(struct pcmcia_device *link)
341 return ret; /* RequestIO failed */ 329 return ret; /* RequestIO failed */
342} 330}
343 331
332static int fmvj18x_ioprobe(struct pcmcia_device *p_dev,
333 cistpl_cftable_entry_t *cfg,
334 cistpl_cftable_entry_t *dflt,
335 unsigned int vcc,
336 void *priv_data)
337{
338 return 0; /* strange, but that's what the code did already before... */
339}
340
344static int fmvj18x_config(struct pcmcia_device *link) 341static int fmvj18x_config(struct pcmcia_device *link)
345{ 342{
346 struct net_device *dev = link->priv; 343 struct net_device *dev = link->priv;
347 local_info_t *lp = netdev_priv(dev); 344 local_info_t *lp = netdev_priv(dev);
348 tuple_t tuple; 345 int i, ret;
349 cisparse_t parse;
350 u_short buf[32];
351 int i, last_fn = 0, last_ret = 0, ret;
352 unsigned int ioaddr; 346 unsigned int ioaddr;
353 cardtype_t cardtype; 347 cardtype_t cardtype;
354 char *card_name = "unknown"; 348 char *card_name = "unknown";
355 u_char *node_id; 349 u8 *buf;
350 size_t len;
351 u_char buggybuf[32];
352
353 dev_dbg(&link->dev, "fmvj18x_config\n");
356 354
357 DEBUG(0, "fmvj18x_config(0x%p)\n", link); 355 len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf);
356 kfree(buf);
358 357
359 tuple.TupleData = (u_char *)buf; 358 if (len) {
360 tuple.TupleDataMax = 64;
361 tuple.TupleOffset = 0;
362 tuple.DesiredTuple = CISTPL_FUNCE;
363 tuple.TupleOffset = 0;
364 if (pcmcia_get_first_tuple(link, &tuple) == 0) {
365 /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */ 359 /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */
366 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 360 ret = pcmcia_loop_config(link, fmvj18x_ioprobe, NULL);
367 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 361 if (ret != 0)
368 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); 362 goto failed;
369 CS_CHECK(ParseTuple, pcmcia_parse_tuple(&tuple, &parse)); 363
370 link->conf.ConfigIndex = parse.cftable_entry.index;
371 switch (link->manf_id) { 364 switch (link->manf_id) {
372 case MANFID_TDK: 365 case MANFID_TDK:
373 cardtype = TDK; 366 cardtype = TDK;
@@ -433,17 +426,24 @@ static int fmvj18x_config(struct pcmcia_device *link)
433 426
434 if (link->io.NumPorts2 != 0) { 427 if (link->io.NumPorts2 != 0) {
435 link->irq.Attributes = 428 link->irq.Attributes =
436 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT; 429 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
437 ret = mfc_try_io_port(link); 430 ret = mfc_try_io_port(link);
438 if (ret != 0) goto cs_failed; 431 if (ret != 0) goto failed;
439 } else if (cardtype == UNGERMANN) { 432 } else if (cardtype == UNGERMANN) {
440 ret = ungermann_try_io_port(link); 433 ret = ungermann_try_io_port(link);
441 if (ret != 0) goto cs_failed; 434 if (ret != 0) goto failed;
442 } else { 435 } else {
443 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); 436 ret = pcmcia_request_io(link, &link->io);
437 if (ret)
438 goto failed;
444 } 439 }
445 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 440 ret = pcmcia_request_irq(link, &link->irq);
446 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 441 if (ret)
442 goto failed;
443 ret = pcmcia_request_configuration(link, &link->conf);
444 if (ret)
445 goto failed;
446
447 dev->irq = link->irq.AssignedIRQ; 447 dev->irq = link->irq.AssignedIRQ;
448 dev->base_addr = link->io.BasePort1; 448 dev->base_addr = link->io.BasePort1;
449 449
@@ -474,21 +474,21 @@ static int fmvj18x_config(struct pcmcia_device *link)
474 case CONTEC: 474 case CONTEC:
475 case NEC: 475 case NEC:
476 case KME: 476 case KME:
477 tuple.DesiredTuple = CISTPL_FUNCE;
478 tuple.TupleOffset = 0;
479 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
480 tuple.TupleOffset = 0;
481 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
482 if (cardtype == MBH10304) { 477 if (cardtype == MBH10304) {
483 /* MBH10304's CIS_FUNCE is corrupted */
484 node_id = &(tuple.TupleData[5]);
485 card_name = "FMV-J182"; 478 card_name = "FMV-J182";
486 } else { 479
487 while (tuple.TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID ) { 480 len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf);
488 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple)); 481 if (len < 11) {
489 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); 482 kfree(buf);
483 goto failed;
490 } 484 }
491 node_id = &(tuple.TupleData[2]); 485 /* Read MACID from CIS */
486 for (i = 5; i < 11; i++)
487 dev->dev_addr[i] = buf[i];
488 kfree(buf);
489 } else {
490 if (pcmcia_get_mac_from_cis(link, dev))
491 goto failed;
492 if( cardtype == TDK ) { 492 if( cardtype == TDK ) {
493 card_name = "TDK LAK-CD021"; 493 card_name = "TDK LAK-CD021";
494 } else if( cardtype == LA501 ) { 494 } else if( cardtype == LA501 ) {
@@ -501,9 +501,6 @@ static int fmvj18x_config(struct pcmcia_device *link)
501 card_name = "C-NET(PC)C"; 501 card_name = "C-NET(PC)C";
502 } 502 }
503 } 503 }
504 /* Read MACID from CIS */
505 for (i = 0; i < 6; i++)
506 dev->dev_addr[i] = node_id[i];
507 break; 504 break;
508 case UNGERMANN: 505 case UNGERMANN:
509 /* Read MACID from register */ 506 /* Read MACID from register */
@@ -513,12 +510,12 @@ static int fmvj18x_config(struct pcmcia_device *link)
513 break; 510 break;
514 case XXX10304: 511 case XXX10304:
515 /* Read MACID from Buggy CIS */ 512 /* Read MACID from Buggy CIS */
516 if (fmvj18x_get_hwinfo(link, tuple.TupleData) == -1) { 513 if (fmvj18x_get_hwinfo(link, buggybuf) == -1) {
517 printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n"); 514 printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n");
518 goto failed; 515 goto failed;
519 } 516 }
520 for (i = 0 ; i < 6; i++) { 517 for (i = 0 ; i < 6; i++) {
521 dev->dev_addr[i] = tuple.TupleData[i]; 518 dev->dev_addr[i] = buggybuf[i];
522 } 519 }
523 card_name = "FMV-J182"; 520 card_name = "FMV-J182";
524 break; 521 break;
@@ -533,7 +530,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
533 530
534 lp->cardtype = cardtype; 531 lp->cardtype = cardtype;
535 link->dev_node = &lp->node; 532 link->dev_node = &lp->node;
536 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 533 SET_NETDEV_DEV(dev, &link->dev);
537 534
538 if (register_netdev(dev) != 0) { 535 if (register_netdev(dev) != 0) {
539 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n"); 536 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
@@ -551,9 +548,6 @@ static int fmvj18x_config(struct pcmcia_device *link)
551 548
552 return 0; 549 return 0;
553 550
554cs_failed:
555 /* All Card Services errors end up here */
556 cs_error(link, last_fn, last_ret);
557failed: 551failed:
558 fmvj18x_release(link); 552 fmvj18x_release(link);
559 return -ENODEV; 553 return -ENODEV;
@@ -571,16 +565,14 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
571 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 565 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
572 req.Base = 0; req.Size = 0; 566 req.Base = 0; req.Size = 0;
573 req.AccessSpeed = 0; 567 req.AccessSpeed = 0;
574 i = pcmcia_request_window(&link, &req, &link->win); 568 i = pcmcia_request_window(link, &req, &link->win);
575 if (i != 0) { 569 if (i != 0)
576 cs_error(link, RequestWindow, i);
577 return -1; 570 return -1;
578 }
579 571
580 base = ioremap(req.Base, req.Size); 572 base = ioremap(req.Base, req.Size);
581 mem.Page = 0; 573 mem.Page = 0;
582 mem.CardOffset = 0; 574 mem.CardOffset = 0;
583 pcmcia_map_mem_page(link->win, &mem); 575 pcmcia_map_mem_page(link, link->win, &mem);
584 576
585 /* 577 /*
586 * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format 578 * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format
@@ -605,9 +597,7 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
605 } 597 }
606 598
607 iounmap(base); 599 iounmap(base);
608 j = pcmcia_release_window(link->win); 600 j = pcmcia_release_window(link, link->win);
609 if (j != 0)
610 cs_error(link, ReleaseWindow, j);
611 return (i != 0x200) ? 0 : -1; 601 return (i != 0x200) ? 0 : -1;
612 602
613} /* fmvj18x_get_hwinfo */ 603} /* fmvj18x_get_hwinfo */
@@ -626,11 +616,9 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
626 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 616 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
627 req.Base = 0; req.Size = 0; 617 req.Base = 0; req.Size = 0;
628 req.AccessSpeed = 0; 618 req.AccessSpeed = 0;
629 i = pcmcia_request_window(&link, &req, &link->win); 619 i = pcmcia_request_window(link, &req, &link->win);
630 if (i != 0) { 620 if (i != 0)
631 cs_error(link, RequestWindow, i);
632 return -1; 621 return -1;
633 }
634 622
635 lp->base = ioremap(req.Base, req.Size); 623 lp->base = ioremap(req.Base, req.Size);
636 if (lp->base == NULL) { 624 if (lp->base == NULL) {
@@ -640,11 +628,10 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
640 628
641 mem.Page = 0; 629 mem.Page = 0;
642 mem.CardOffset = 0; 630 mem.CardOffset = 0;
643 i = pcmcia_map_mem_page(link->win, &mem); 631 i = pcmcia_map_mem_page(link, link->win, &mem);
644 if (i != 0) { 632 if (i != 0) {
645 iounmap(lp->base); 633 iounmap(lp->base);
646 lp->base = NULL; 634 lp->base = NULL;
647 cs_error(link, MapMemPage, i);
648 return -1; 635 return -1;
649 } 636 }
650 637
@@ -671,15 +658,13 @@ static void fmvj18x_release(struct pcmcia_device *link)
671 u_char __iomem *tmp; 658 u_char __iomem *tmp;
672 int j; 659 int j;
673 660
674 DEBUG(0, "fmvj18x_release(0x%p)\n", link); 661 dev_dbg(&link->dev, "fmvj18x_release\n");
675 662
676 if (lp->base != NULL) { 663 if (lp->base != NULL) {
677 tmp = lp->base; 664 tmp = lp->base;
678 lp->base = NULL; /* set NULL before iounmap */ 665 lp->base = NULL; /* set NULL before iounmap */
679 iounmap(tmp); 666 iounmap(tmp);
680 j = pcmcia_release_window(link->win); 667 j = pcmcia_release_window(link, link->win);
681 if (j != 0)
682 cs_error(link, ReleaseWindow, j);
683 } 668 }
684 669
685 pcmcia_disable_device(link); 670 pcmcia_disable_device(link);
@@ -788,8 +773,8 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
788 outb(tx_stat, ioaddr + TX_STATUS); 773 outb(tx_stat, ioaddr + TX_STATUS);
789 outb(rx_stat, ioaddr + RX_STATUS); 774 outb(rx_stat, ioaddr + RX_STATUS);
790 775
791 DEBUG(4, "%s: interrupt, rx_status %02x.\n", dev->name, rx_stat); 776 pr_debug("%s: interrupt, rx_status %02x.\n", dev->name, rx_stat);
792 DEBUG(4, " tx_status %02x.\n", tx_stat); 777 pr_debug(" tx_status %02x.\n", tx_stat);
793 778
794 if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { 779 if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
795 /* there is packet(s) in rx buffer */ 780 /* there is packet(s) in rx buffer */
@@ -809,8 +794,8 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
809 } 794 }
810 netif_wake_queue(dev); 795 netif_wake_queue(dev);
811 } 796 }
812 DEBUG(4, "%s: exiting interrupt,\n", dev->name); 797 pr_debug("%s: exiting interrupt,\n", dev->name);
813 DEBUG(4, " tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat); 798 pr_debug(" tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat);
814 799
815 outb(D_TX_INTR, ioaddr + TX_INTR); 800 outb(D_TX_INTR, ioaddr + TX_INTR);
816 outb(D_RX_INTR, ioaddr + RX_INTR); 801 outb(D_RX_INTR, ioaddr + RX_INTR);
@@ -882,7 +867,7 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
882 return NETDEV_TX_BUSY; 867 return NETDEV_TX_BUSY;
883 } 868 }
884 869
885 DEBUG(4, "%s: Transmitting a packet of length %lu.\n", 870 pr_debug("%s: Transmitting a packet of length %lu.\n",
886 dev->name, (unsigned long)skb->len); 871 dev->name, (unsigned long)skb->len);
887 dev->stats.tx_bytes += skb->len; 872 dev->stats.tx_bytes += skb->len;
888 873
@@ -937,7 +922,7 @@ static void fjn_reset(struct net_device *dev)
937 unsigned int ioaddr = dev->base_addr; 922 unsigned int ioaddr = dev->base_addr;
938 int i; 923 int i;
939 924
940 DEBUG(4, "fjn_reset(%s) called.\n",dev->name); 925 pr_debug("fjn_reset(%s) called.\n",dev->name);
941 926
942 /* Reset controller */ 927 /* Reset controller */
943 if( sram_config == 0 ) 928 if( sram_config == 0 )
@@ -1015,13 +1000,13 @@ static void fjn_rx(struct net_device *dev)
1015 unsigned int ioaddr = dev->base_addr; 1000 unsigned int ioaddr = dev->base_addr;
1016 int boguscount = 10; /* 5 -> 10: by agy 19940922 */ 1001 int boguscount = 10; /* 5 -> 10: by agy 19940922 */
1017 1002
1018 DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n", 1003 pr_debug("%s: in rx_packet(), rx_status %02x.\n",
1019 dev->name, inb(ioaddr + RX_STATUS)); 1004 dev->name, inb(ioaddr + RX_STATUS));
1020 1005
1021 while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { 1006 while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
1022 u_short status = inw(ioaddr + DATAPORT); 1007 u_short status = inw(ioaddr + DATAPORT);
1023 1008
1024 DEBUG(4, "%s: Rxing packet mode %02x status %04x.\n", 1009 pr_debug("%s: Rxing packet mode %02x status %04x.\n",
1025 dev->name, inb(ioaddr + RX_MODE), status); 1010 dev->name, inb(ioaddr + RX_MODE), status);
1026#ifndef final_version 1011#ifndef final_version
1027 if (status == 0) { 1012 if (status == 0) {
@@ -1061,16 +1046,14 @@ static void fjn_rx(struct net_device *dev)
1061 (pkt_len + 1) >> 1); 1046 (pkt_len + 1) >> 1);
1062 skb->protocol = eth_type_trans(skb, dev); 1047 skb->protocol = eth_type_trans(skb, dev);
1063 1048
1064#ifdef PCMCIA_DEBUG 1049 {
1065 if (pc_debug > 5) {
1066 int i; 1050 int i;
1067 printk(KERN_DEBUG "%s: Rxed packet of length %d: ", 1051 pr_debug("%s: Rxed packet of length %d: ",
1068 dev->name, pkt_len); 1052 dev->name, pkt_len);
1069 for (i = 0; i < 14; i++) 1053 for (i = 0; i < 14; i++)
1070 printk(" %02x", skb->data[i]); 1054 pr_debug(" %02x", skb->data[i]);
1071 printk(".\n"); 1055 pr_debug(".\n");
1072 } 1056 }
1073#endif
1074 1057
1075 netif_rx(skb); 1058 netif_rx(skb);
1076 dev->stats.rx_packets++; 1059 dev->stats.rx_packets++;
@@ -1094,7 +1077,7 @@ static void fjn_rx(struct net_device *dev)
1094 } 1077 }
1095 1078
1096 if (i > 0) 1079 if (i > 0)
1097 DEBUG(5, "%s: Exint Rx packet with mode %02x after " 1080 pr_debug("%s: Exint Rx packet with mode %02x after "
1098 "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i); 1081 "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
1099 } 1082 }
1100*/ 1083*/
@@ -1112,24 +1095,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
1112 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); 1095 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
1113} 1096}
1114 1097
1115#ifdef PCMCIA_DEBUG
1116static u32 netdev_get_msglevel(struct net_device *dev)
1117{
1118 return pc_debug;
1119}
1120
1121static void netdev_set_msglevel(struct net_device *dev, u32 level)
1122{
1123 pc_debug = level;
1124}
1125#endif /* PCMCIA_DEBUG */
1126
1127static const struct ethtool_ops netdev_ethtool_ops = { 1098static const struct ethtool_ops netdev_ethtool_ops = {
1128 .get_drvinfo = netdev_get_drvinfo, 1099 .get_drvinfo = netdev_get_drvinfo,
1129#ifdef PCMCIA_DEBUG
1130 .get_msglevel = netdev_get_msglevel,
1131 .set_msglevel = netdev_set_msglevel,
1132#endif /* PCMCIA_DEBUG */
1133}; 1100};
1134 1101
1135static int fjn_config(struct net_device *dev, struct ifmap *map){ 1102static int fjn_config(struct net_device *dev, struct ifmap *map){
@@ -1141,7 +1108,7 @@ static int fjn_open(struct net_device *dev)
1141 struct local_info_t *lp = netdev_priv(dev); 1108 struct local_info_t *lp = netdev_priv(dev);
1142 struct pcmcia_device *link = lp->p_dev; 1109 struct pcmcia_device *link = lp->p_dev;
1143 1110
1144 DEBUG(4, "fjn_open('%s').\n", dev->name); 1111 pr_debug("fjn_open('%s').\n", dev->name);
1145 1112
1146 if (!pcmcia_dev_present(link)) 1113 if (!pcmcia_dev_present(link))
1147 return -ENODEV; 1114 return -ENODEV;
@@ -1167,7 +1134,7 @@ static int fjn_close(struct net_device *dev)
1167 struct pcmcia_device *link = lp->p_dev; 1134 struct pcmcia_device *link = lp->p_dev;
1168 unsigned int ioaddr = dev->base_addr; 1135 unsigned int ioaddr = dev->base_addr;
1169 1136
1170 DEBUG(4, "fjn_close('%s').\n", dev->name); 1137 pr_debug("fjn_close('%s').\n", dev->name);
1171 1138
1172 lp->open_time = 0; 1139 lp->open_time = 0;
1173 netif_stop_queue(dev); 1140 netif_stop_queue(dev);
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 06618af1a468..37f4a6fdc3ef 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -69,17 +69,6 @@
69#define PCMCIA 69#define PCMCIA
70#include "../tokenring/ibmtr.c" 70#include "../tokenring/ibmtr.c"
71 71
72#ifdef PCMCIA_DEBUG
73static int pc_debug = PCMCIA_DEBUG;
74module_param(pc_debug, int, 0);
75#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
76static char *version =
77"ibmtr_cs.c 1.10 1996/01/06 05:19:00 (Steve Kipisz)\n"
78" 2.2.7 1999/05/03 12:00:00 (Mike Phillips)\n"
79" 2.4.2 2001/30/28 Midnight (Burt Silverman)\n";
80#else
81#define DEBUG(n, args...)
82#endif
83 72
84/*====================================================================*/ 73/*====================================================================*/
85 74
@@ -130,6 +119,12 @@ static const struct ethtool_ops netdev_ethtool_ops = {
130 .get_drvinfo = netdev_get_drvinfo, 119 .get_drvinfo = netdev_get_drvinfo,
131}; 120};
132 121
122static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) {
123 ibmtr_dev_t *info = dev_id;
124 struct net_device *dev = info->dev;
125 return tok_interrupt(irq, dev);
126};
127
133/*====================================================================== 128/*======================================================================
134 129
135 ibmtr_attach() creates an "instance" of the driver, allocating 130 ibmtr_attach() creates an "instance" of the driver, allocating
@@ -143,7 +138,7 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
143 ibmtr_dev_t *info; 138 ibmtr_dev_t *info;
144 struct net_device *dev; 139 struct net_device *dev;
145 140
146 DEBUG(0, "ibmtr_attach()\n"); 141 dev_dbg(&link->dev, "ibmtr_attach()\n");
147 142
148 /* Create new token-ring device */ 143 /* Create new token-ring device */
149 info = kzalloc(sizeof(*info), GFP_KERNEL); 144 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -161,14 +156,13 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
161 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 156 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
162 link->io.NumPorts1 = 4; 157 link->io.NumPorts1 = 4;
163 link->io.IOAddrLines = 16; 158 link->io.IOAddrLines = 16;
164 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 159 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
165 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 160 link->irq.Handler = ibmtr_interrupt;
166 link->irq.Handler = &tok_interrupt;
167 link->conf.Attributes = CONF_ENABLE_IRQ; 161 link->conf.Attributes = CONF_ENABLE_IRQ;
168 link->conf.IntType = INT_MEMORY_AND_IO; 162 link->conf.IntType = INT_MEMORY_AND_IO;
169 link->conf.Present = PRESENT_OPTION; 163 link->conf.Present = PRESENT_OPTION;
170 164
171 link->irq.Instance = info->dev = dev; 165 info->dev = dev;
172 166
173 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 167 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
174 168
@@ -190,7 +184,7 @@ static void ibmtr_detach(struct pcmcia_device *link)
190 struct net_device *dev = info->dev; 184 struct net_device *dev = info->dev;
191 struct tok_info *ti = netdev_priv(dev); 185 struct tok_info *ti = netdev_priv(dev);
192 186
193 DEBUG(0, "ibmtr_detach(0x%p)\n", link); 187 dev_dbg(&link->dev, "ibmtr_detach\n");
194 188
195 /* 189 /*
196 * When the card removal interrupt hits tok_interrupt(), 190 * When the card removal interrupt hits tok_interrupt(),
@@ -217,9 +211,6 @@ static void ibmtr_detach(struct pcmcia_device *link)
217 211
218======================================================================*/ 212======================================================================*/
219 213
220#define CS_CHECK(fn, ret) \
221do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
222
223static int __devinit ibmtr_config(struct pcmcia_device *link) 214static int __devinit ibmtr_config(struct pcmcia_device *link)
224{ 215{
225 ibmtr_dev_t *info = link->priv; 216 ibmtr_dev_t *info = link->priv;
@@ -227,9 +218,9 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
227 struct tok_info *ti = netdev_priv(dev); 218 struct tok_info *ti = netdev_priv(dev);
228 win_req_t req; 219 win_req_t req;
229 memreq_t mem; 220 memreq_t mem;
230 int i, last_ret, last_fn; 221 int i, ret;
231 222
232 DEBUG(0, "ibmtr_config(0x%p)\n", link); 223 dev_dbg(&link->dev, "ibmtr_config\n");
233 224
234 link->conf.ConfigIndex = 0x61; 225 link->conf.ConfigIndex = 0x61;
235 226
@@ -241,11 +232,15 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
241 if (i != 0) { 232 if (i != 0) {
242 /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */ 233 /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */
243 link->io.BasePort1 = 0xA24; 234 link->io.BasePort1 = 0xA24;
244 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); 235 ret = pcmcia_request_io(link, &link->io);
236 if (ret)
237 goto failed;
245 } 238 }
246 dev->base_addr = link->io.BasePort1; 239 dev->base_addr = link->io.BasePort1;
247 240
248 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 241 ret = pcmcia_request_irq(link, &link->irq);
242 if (ret)
243 goto failed;
249 dev->irq = link->irq.AssignedIRQ; 244 dev->irq = link->irq.AssignedIRQ;
250 ti->irq = link->irq.AssignedIRQ; 245 ti->irq = link->irq.AssignedIRQ;
251 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq); 246 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
@@ -256,11 +251,15 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
256 req.Base = 0; 251 req.Base = 0;
257 req.Size = 0x2000; 252 req.Size = 0x2000;
258 req.AccessSpeed = 250; 253 req.AccessSpeed = 250;
259 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); 254 ret = pcmcia_request_window(link, &req, &link->win);
255 if (ret)
256 goto failed;
260 257
261 mem.CardOffset = mmiobase; 258 mem.CardOffset = mmiobase;
262 mem.Page = 0; 259 mem.Page = 0;
263 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); 260 ret = pcmcia_map_mem_page(link, link->win, &mem);
261 if (ret)
262 goto failed;
264 ti->mmio = ioremap(req.Base, req.Size); 263 ti->mmio = ioremap(req.Base, req.Size);
265 264
266 /* Allocate the SRAM memory window */ 265 /* Allocate the SRAM memory window */
@@ -269,17 +268,23 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
269 req.Base = 0; 268 req.Base = 0;
270 req.Size = sramsize * 1024; 269 req.Size = sramsize * 1024;
271 req.AccessSpeed = 250; 270 req.AccessSpeed = 250;
272 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &info->sram_win_handle)); 271 ret = pcmcia_request_window(link, &req, &info->sram_win_handle);
272 if (ret)
273 goto failed;
273 274
274 mem.CardOffset = srambase; 275 mem.CardOffset = srambase;
275 mem.Page = 0; 276 mem.Page = 0;
276 CS_CHECK(MapMemPage, pcmcia_map_mem_page(info->sram_win_handle, &mem)); 277 ret = pcmcia_map_mem_page(link, info->sram_win_handle, &mem);
278 if (ret)
279 goto failed;
277 280
278 ti->sram_base = mem.CardOffset >> 12; 281 ti->sram_base = mem.CardOffset >> 12;
279 ti->sram_virt = ioremap(req.Base, req.Size); 282 ti->sram_virt = ioremap(req.Base, req.Size);
280 ti->sram_phys = req.Base; 283 ti->sram_phys = req.Base;
281 284
282 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 285 ret = pcmcia_request_configuration(link, &link->conf);
286 if (ret)
287 goto failed;
283 288
284 /* Set up the Token-Ring Controller Configuration Register and 289 /* Set up the Token-Ring Controller Configuration Register and
285 turn on the card. Check the "Local Area Network Credit Card 290 turn on the card. Check the "Local Area Network Credit Card
@@ -287,7 +292,7 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
287 ibmtr_hw_setup(dev, mmiobase); 292 ibmtr_hw_setup(dev, mmiobase);
288 293
289 link->dev_node = &info->node; 294 link->dev_node = &info->node;
290 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 295 SET_NETDEV_DEV(dev, &link->dev);
291 296
292 i = ibmtr_probe_card(dev); 297 i = ibmtr_probe_card(dev);
293 if (i != 0) { 298 if (i != 0) {
@@ -305,8 +310,6 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
305 dev->dev_addr); 310 dev->dev_addr);
306 return 0; 311 return 0;
307 312
308cs_failed:
309 cs_error(link, last_fn, last_ret);
310failed: 313failed:
311 ibmtr_release(link); 314 ibmtr_release(link);
312 return -ENODEV; 315 return -ENODEV;
@@ -325,12 +328,12 @@ static void ibmtr_release(struct pcmcia_device *link)
325 ibmtr_dev_t *info = link->priv; 328 ibmtr_dev_t *info = link->priv;
326 struct net_device *dev = info->dev; 329 struct net_device *dev = info->dev;
327 330
328 DEBUG(0, "ibmtr_release(0x%p)\n", link); 331 dev_dbg(&link->dev, "ibmtr_release\n");
329 332
330 if (link->win) { 333 if (link->win) {
331 struct tok_info *ti = netdev_priv(dev); 334 struct tok_info *ti = netdev_priv(dev);
332 iounmap(ti->mmio); 335 iounmap(ti->mmio);
333 pcmcia_release_window(info->sram_win_handle); 336 pcmcia_release_window(link, info->sram_win_handle);
334 } 337 }
335 pcmcia_disable_device(link); 338 pcmcia_disable_device(link);
336} 339}
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index b12e69592d18..8a5ae3b182ed 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -381,13 +381,6 @@ typedef struct _mace_private {
381Private Global Variables 381Private Global Variables
382---------------------------------------------------------------------------- */ 382---------------------------------------------------------------------------- */
383 383
384#ifdef PCMCIA_DEBUG
385static char rcsid[] =
386"nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao";
387static char *version =
388DRV_NAME " " DRV_VERSION " (Roger C. Pao)";
389#endif
390
391static const char *if_names[]={ 384static const char *if_names[]={
392 "Auto", "10baseT", "BNC", 385 "Auto", "10baseT", "BNC",
393}; 386};
@@ -406,12 +399,6 @@ MODULE_LICENSE("GPL");
406/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */ 399/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */
407INT_MODULE_PARM(if_port, 0); 400INT_MODULE_PARM(if_port, 0);
408 401
409#ifdef PCMCIA_DEBUG
410INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
411#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
412#else
413#define DEBUG(n, args...)
414#endif
415 402
416/* ---------------------------------------------------------------------------- 403/* ----------------------------------------------------------------------------
417Function Prototypes 404Function Prototypes
@@ -462,8 +449,7 @@ static int nmclan_probe(struct pcmcia_device *link)
462 mace_private *lp; 449 mace_private *lp;
463 struct net_device *dev; 450 struct net_device *dev;
464 451
465 DEBUG(0, "nmclan_attach()\n"); 452 dev_dbg(&link->dev, "nmclan_attach()\n");
466 DEBUG(1, "%s\n", rcsid);
467 453
468 /* Create new ethernet device */ 454 /* Create new ethernet device */
469 dev = alloc_etherdev(sizeof(mace_private)); 455 dev = alloc_etherdev(sizeof(mace_private));
@@ -477,10 +463,8 @@ static int nmclan_probe(struct pcmcia_device *link)
477 link->io.NumPorts1 = 32; 463 link->io.NumPorts1 = 32;
478 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 464 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
479 link->io.IOAddrLines = 5; 465 link->io.IOAddrLines = 5;
480 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 466 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
481 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
482 link->irq.Handler = mace_interrupt; 467 link->irq.Handler = mace_interrupt;
483 link->irq.Instance = dev;
484 link->conf.Attributes = CONF_ENABLE_IRQ; 468 link->conf.Attributes = CONF_ENABLE_IRQ;
485 link->conf.IntType = INT_MEMORY_AND_IO; 469 link->conf.IntType = INT_MEMORY_AND_IO;
486 link->conf.ConfigIndex = 1; 470 link->conf.ConfigIndex = 1;
@@ -507,7 +491,7 @@ static void nmclan_detach(struct pcmcia_device *link)
507{ 491{
508 struct net_device *dev = link->priv; 492 struct net_device *dev = link->priv;
509 493
510 DEBUG(0, "nmclan_detach(0x%p)\n", link); 494 dev_dbg(&link->dev, "nmclan_detach\n");
511 495
512 if (link->dev_node) 496 if (link->dev_node)
513 unregister_netdev(dev); 497 unregister_netdev(dev);
@@ -654,37 +638,40 @@ nmclan_config
654 ethernet device available to the system. 638 ethernet device available to the system.
655---------------------------------------------------------------------------- */ 639---------------------------------------------------------------------------- */
656 640
657#define CS_CHECK(fn, ret) \
658 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
659
660static int nmclan_config(struct pcmcia_device *link) 641static int nmclan_config(struct pcmcia_device *link)
661{ 642{
662 struct net_device *dev = link->priv; 643 struct net_device *dev = link->priv;
663 mace_private *lp = netdev_priv(dev); 644 mace_private *lp = netdev_priv(dev);
664 tuple_t tuple; 645 u8 *buf;
665 u_char buf[64]; 646 size_t len;
666 int i, last_ret, last_fn; 647 int i, ret;
667 unsigned int ioaddr; 648 unsigned int ioaddr;
668 649
669 DEBUG(0, "nmclan_config(0x%p)\n", link); 650 dev_dbg(&link->dev, "nmclan_config\n");
651
652 ret = pcmcia_request_io(link, &link->io);
653 if (ret)
654 goto failed;
655 ret = pcmcia_request_irq(link, &link->irq);
656 if (ret)
657 goto failed;
658 ret = pcmcia_request_configuration(link, &link->conf);
659 if (ret)
660 goto failed;
670 661
671 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
672 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
673 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
674 dev->irq = link->irq.AssignedIRQ; 662 dev->irq = link->irq.AssignedIRQ;
675 dev->base_addr = link->io.BasePort1; 663 dev->base_addr = link->io.BasePort1;
676 664
677 ioaddr = dev->base_addr; 665 ioaddr = dev->base_addr;
678 666
679 /* Read the ethernet address from the CIS. */ 667 /* Read the ethernet address from the CIS. */
680 tuple.DesiredTuple = 0x80 /* CISTPL_CFTABLE_ENTRY_MISC */; 668 len = pcmcia_get_tuple(link, 0x80, &buf);
681 tuple.TupleData = buf; 669 if (!buf || len < ETHER_ADDR_LEN) {
682 tuple.TupleDataMax = 64; 670 kfree(buf);
683 tuple.TupleOffset = 0; 671 goto failed;
684 tuple.Attributes = 0; 672 }
685 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 673 memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
686 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); 674 kfree(buf);
687 memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN);
688 675
689 /* Verify configuration by reading the MACE ID. */ 676 /* Verify configuration by reading the MACE ID. */
690 { 677 {
@@ -693,7 +680,7 @@ static int nmclan_config(struct pcmcia_device *link)
693 sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL); 680 sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL);
694 sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH); 681 sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH);
695 if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) { 682 if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) {
696 DEBUG(0, "nmclan_cs configured: mace id=%x %x\n", 683 dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n",
697 sig[0], sig[1]); 684 sig[0], sig[1]);
698 } else { 685 } else {
699 printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should" 686 printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should"
@@ -712,7 +699,7 @@ static int nmclan_config(struct pcmcia_device *link)
712 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n"); 699 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
713 700
714 link->dev_node = &lp->node; 701 link->dev_node = &lp->node;
715 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 702 SET_NETDEV_DEV(dev, &link->dev);
716 703
717 i = register_netdev(dev); 704 i = register_netdev(dev);
718 if (i != 0) { 705 if (i != 0) {
@@ -729,8 +716,6 @@ static int nmclan_config(struct pcmcia_device *link)
729 dev->dev_addr); 716 dev->dev_addr);
730 return 0; 717 return 0;
731 718
732cs_failed:
733 cs_error(link, last_fn, last_ret);
734failed: 719failed:
735 nmclan_release(link); 720 nmclan_release(link);
736 return -ENODEV; 721 return -ENODEV;
@@ -744,7 +729,7 @@ nmclan_release
744---------------------------------------------------------------------------- */ 729---------------------------------------------------------------------------- */
745static void nmclan_release(struct pcmcia_device *link) 730static void nmclan_release(struct pcmcia_device *link)
746{ 731{
747 DEBUG(0, "nmclan_release(0x%p)\n", link); 732 dev_dbg(&link->dev, "nmclan_release\n");
748 pcmcia_disable_device(link); 733 pcmcia_disable_device(link);
749} 734}
750 735
@@ -795,7 +780,7 @@ static void nmclan_reset(struct net_device *dev)
795 /* Reset Xilinx */ 780 /* Reset Xilinx */
796 reg.Action = CS_WRITE; 781 reg.Action = CS_WRITE;
797 reg.Offset = CISREG_COR; 782 reg.Offset = CISREG_COR;
798 DEBUG(1, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n", 783 dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n",
799 OrigCorValue); 784 OrigCorValue);
800 reg.Value = COR_SOFT_RESET; 785 reg.Value = COR_SOFT_RESET;
801 pcmcia_access_configuration_register(link, &reg); 786 pcmcia_access_configuration_register(link, &reg);
@@ -872,7 +857,7 @@ static int mace_close(struct net_device *dev)
872 mace_private *lp = netdev_priv(dev); 857 mace_private *lp = netdev_priv(dev);
873 struct pcmcia_device *link = lp->p_dev; 858 struct pcmcia_device *link = lp->p_dev;
874 859
875 DEBUG(2, "%s: shutting down ethercard.\n", dev->name); 860 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
876 861
877 /* Mask off all interrupts from the MACE chip. */ 862 /* Mask off all interrupts from the MACE chip. */
878 outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR); 863 outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR);
@@ -891,24 +876,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
891 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); 876 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
892} 877}
893 878
894#ifdef PCMCIA_DEBUG
895static u32 netdev_get_msglevel(struct net_device *dev)
896{
897 return pc_debug;
898}
899
900static void netdev_set_msglevel(struct net_device *dev, u32 level)
901{
902 pc_debug = level;
903}
904#endif /* PCMCIA_DEBUG */
905
906static const struct ethtool_ops netdev_ethtool_ops = { 879static const struct ethtool_ops netdev_ethtool_ops = {
907 .get_drvinfo = netdev_get_drvinfo, 880 .get_drvinfo = netdev_get_drvinfo,
908#ifdef PCMCIA_DEBUG
909 .get_msglevel = netdev_get_msglevel,
910 .set_msglevel = netdev_set_msglevel,
911#endif /* PCMCIA_DEBUG */
912}; 881};
913 882
914/* ---------------------------------------------------------------------------- 883/* ----------------------------------------------------------------------------
@@ -946,7 +915,7 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
946 915
947 netif_stop_queue(dev); 916 netif_stop_queue(dev);
948 917
949 DEBUG(3, "%s: mace_start_xmit(length = %ld) called.\n", 918 pr_debug("%s: mace_start_xmit(length = %ld) called.\n",
950 dev->name, (long)skb->len); 919 dev->name, (long)skb->len);
951 920
952#if (!TX_INTERRUPTABLE) 921#if (!TX_INTERRUPTABLE)
@@ -1008,7 +977,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
1008 int IntrCnt = MACE_MAX_IR_ITERATIONS; 977 int IntrCnt = MACE_MAX_IR_ITERATIONS;
1009 978
1010 if (dev == NULL) { 979 if (dev == NULL) {
1011 DEBUG(2, "mace_interrupt(): irq 0x%X for unknown device.\n", 980 pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
1012 irq); 981 irq);
1013 return IRQ_NONE; 982 return IRQ_NONE;
1014 } 983 }
@@ -1031,7 +1000,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
1031 } 1000 }
1032 1001
1033 if (!netif_device_present(dev)) { 1002 if (!netif_device_present(dev)) {
1034 DEBUG(2, "%s: interrupt from dead card\n", dev->name); 1003 pr_debug("%s: interrupt from dead card\n", dev->name);
1035 return IRQ_NONE; 1004 return IRQ_NONE;
1036 } 1005 }
1037 1006
@@ -1039,7 +1008,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
1039 /* WARNING: MACE_IR is a READ/CLEAR port! */ 1008 /* WARNING: MACE_IR is a READ/CLEAR port! */
1040 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); 1009 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
1041 1010
1042 DEBUG(3, "mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); 1011 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
1043 1012
1044 if (status & MACE_IR_RCVINT) { 1013 if (status & MACE_IR_RCVINT) {
1045 mace_rx(dev, MACE_MAX_RX_ITERATIONS); 1014 mace_rx(dev, MACE_MAX_RX_ITERATIONS);
@@ -1158,7 +1127,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1158 ) { 1127 ) {
1159 rx_status = inw(ioaddr + AM2150_RCV); 1128 rx_status = inw(ioaddr + AM2150_RCV);
1160 1129
1161 DEBUG(3, "%s: in mace_rx(), framecnt 0x%X, rx_status" 1130 pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status"
1162 " 0x%X.\n", dev->name, rx_framecnt, rx_status); 1131 " 0x%X.\n", dev->name, rx_framecnt, rx_status);
1163 1132
1164 if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */ 1133 if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
@@ -1185,7 +1154,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1185 lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV); 1154 lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
1186 /* rcv collision count */ 1155 /* rcv collision count */
1187 1156
1188 DEBUG(3, " receiving packet size 0x%X rx_status" 1157 pr_debug(" receiving packet size 0x%X rx_status"
1189 " 0x%X.\n", pkt_len, rx_status); 1158 " 0x%X.\n", pkt_len, rx_status);
1190 1159
1191 skb = dev_alloc_skb(pkt_len+2); 1160 skb = dev_alloc_skb(pkt_len+2);
@@ -1204,7 +1173,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1204 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ 1173 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
1205 continue; 1174 continue;
1206 } else { 1175 } else {
1207 DEBUG(1, "%s: couldn't allocate a sk_buff of size" 1176 pr_debug("%s: couldn't allocate a sk_buff of size"
1208 " %d.\n", dev->name, pkt_len); 1177 " %d.\n", dev->name, pkt_len);
1209 lp->linux_stats.rx_dropped++; 1178 lp->linux_stats.rx_dropped++;
1210 } 1179 }
@@ -1220,28 +1189,28 @@ pr_linux_stats
1220---------------------------------------------------------------------------- */ 1189---------------------------------------------------------------------------- */
1221static void pr_linux_stats(struct net_device_stats *pstats) 1190static void pr_linux_stats(struct net_device_stats *pstats)
1222{ 1191{
1223 DEBUG(2, "pr_linux_stats\n"); 1192 pr_debug("pr_linux_stats\n");
1224 DEBUG(2, " rx_packets=%-7ld tx_packets=%ld\n", 1193 pr_debug(" rx_packets=%-7ld tx_packets=%ld\n",
1225 (long)pstats->rx_packets, (long)pstats->tx_packets); 1194 (long)pstats->rx_packets, (long)pstats->tx_packets);
1226 DEBUG(2, " rx_errors=%-7ld tx_errors=%ld\n", 1195 pr_debug(" rx_errors=%-7ld tx_errors=%ld\n",
1227 (long)pstats->rx_errors, (long)pstats->tx_errors); 1196 (long)pstats->rx_errors, (long)pstats->tx_errors);
1228 DEBUG(2, " rx_dropped=%-7ld tx_dropped=%ld\n", 1197 pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n",
1229 (long)pstats->rx_dropped, (long)pstats->tx_dropped); 1198 (long)pstats->rx_dropped, (long)pstats->tx_dropped);
1230 DEBUG(2, " multicast=%-7ld collisions=%ld\n", 1199 pr_debug(" multicast=%-7ld collisions=%ld\n",
1231 (long)pstats->multicast, (long)pstats->collisions); 1200 (long)pstats->multicast, (long)pstats->collisions);
1232 1201
1233 DEBUG(2, " rx_length_errors=%-7ld rx_over_errors=%ld\n", 1202 pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n",
1234 (long)pstats->rx_length_errors, (long)pstats->rx_over_errors); 1203 (long)pstats->rx_length_errors, (long)pstats->rx_over_errors);
1235 DEBUG(2, " rx_crc_errors=%-7ld rx_frame_errors=%ld\n", 1204 pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n",
1236 (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors); 1205 (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors);
1237 DEBUG(2, " rx_fifo_errors=%-7ld rx_missed_errors=%ld\n", 1206 pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n",
1238 (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors); 1207 (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors);
1239 1208
1240 DEBUG(2, " tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n", 1209 pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n",
1241 (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors); 1210 (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors);
1242 DEBUG(2, " tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n", 1211 pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n",
1243 (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors); 1212 (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors);
1244 DEBUG(2, " tx_window_errors=%ld\n", 1213 pr_debug(" tx_window_errors=%ld\n",
1245 (long)pstats->tx_window_errors); 1214 (long)pstats->tx_window_errors);
1246} /* pr_linux_stats */ 1215} /* pr_linux_stats */
1247 1216
@@ -1250,48 +1219,48 @@ pr_mace_stats
1250---------------------------------------------------------------------------- */ 1219---------------------------------------------------------------------------- */
1251static void pr_mace_stats(mace_statistics *pstats) 1220static void pr_mace_stats(mace_statistics *pstats)
1252{ 1221{
1253 DEBUG(2, "pr_mace_stats\n"); 1222 pr_debug("pr_mace_stats\n");
1254 1223
1255 DEBUG(2, " xmtsv=%-7d uflo=%d\n", 1224 pr_debug(" xmtsv=%-7d uflo=%d\n",
1256 pstats->xmtsv, pstats->uflo); 1225 pstats->xmtsv, pstats->uflo);
1257 DEBUG(2, " lcol=%-7d more=%d\n", 1226 pr_debug(" lcol=%-7d more=%d\n",
1258 pstats->lcol, pstats->more); 1227 pstats->lcol, pstats->more);
1259 DEBUG(2, " one=%-7d defer=%d\n", 1228 pr_debug(" one=%-7d defer=%d\n",
1260 pstats->one, pstats->defer); 1229 pstats->one, pstats->defer);
1261 DEBUG(2, " lcar=%-7d rtry=%d\n", 1230 pr_debug(" lcar=%-7d rtry=%d\n",
1262 pstats->lcar, pstats->rtry); 1231 pstats->lcar, pstats->rtry);
1263 1232
1264 /* MACE_XMTRC */ 1233 /* MACE_XMTRC */
1265 DEBUG(2, " exdef=%-7d xmtrc=%d\n", 1234 pr_debug(" exdef=%-7d xmtrc=%d\n",
1266 pstats->exdef, pstats->xmtrc); 1235 pstats->exdef, pstats->xmtrc);
1267 1236
1268 /* RFS1--Receive Status (RCVSTS) */ 1237 /* RFS1--Receive Status (RCVSTS) */
1269 DEBUG(2, " oflo=%-7d clsn=%d\n", 1238 pr_debug(" oflo=%-7d clsn=%d\n",
1270 pstats->oflo, pstats->clsn); 1239 pstats->oflo, pstats->clsn);
1271 DEBUG(2, " fram=%-7d fcs=%d\n", 1240 pr_debug(" fram=%-7d fcs=%d\n",
1272 pstats->fram, pstats->fcs); 1241 pstats->fram, pstats->fcs);
1273 1242
1274 /* RFS2--Runt Packet Count (RNTPC) */ 1243 /* RFS2--Runt Packet Count (RNTPC) */
1275 /* RFS3--Receive Collision Count (RCVCC) */ 1244 /* RFS3--Receive Collision Count (RCVCC) */
1276 DEBUG(2, " rfs_rntpc=%-7d rfs_rcvcc=%d\n", 1245 pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n",
1277 pstats->rfs_rntpc, pstats->rfs_rcvcc); 1246 pstats->rfs_rntpc, pstats->rfs_rcvcc);
1278 1247
1279 /* MACE_IR */ 1248 /* MACE_IR */
1280 DEBUG(2, " jab=%-7d babl=%d\n", 1249 pr_debug(" jab=%-7d babl=%d\n",
1281 pstats->jab, pstats->babl); 1250 pstats->jab, pstats->babl);
1282 DEBUG(2, " cerr=%-7d rcvcco=%d\n", 1251 pr_debug(" cerr=%-7d rcvcco=%d\n",
1283 pstats->cerr, pstats->rcvcco); 1252 pstats->cerr, pstats->rcvcco);
1284 DEBUG(2, " rntpco=%-7d mpco=%d\n", 1253 pr_debug(" rntpco=%-7d mpco=%d\n",
1285 pstats->rntpco, pstats->mpco); 1254 pstats->rntpco, pstats->mpco);
1286 1255
1287 /* MACE_MPC */ 1256 /* MACE_MPC */
1288 DEBUG(2, " mpc=%d\n", pstats->mpc); 1257 pr_debug(" mpc=%d\n", pstats->mpc);
1289 1258
1290 /* MACE_RNTPC */ 1259 /* MACE_RNTPC */
1291 DEBUG(2, " rntpc=%d\n", pstats->rntpc); 1260 pr_debug(" rntpc=%d\n", pstats->rntpc);
1292 1261
1293 /* MACE_RCVCC */ 1262 /* MACE_RCVCC */
1294 DEBUG(2, " rcvcc=%d\n", pstats->rcvcc); 1263 pr_debug(" rcvcc=%d\n", pstats->rcvcc);
1295 1264
1296} /* pr_mace_stats */ 1265} /* pr_mace_stats */
1297 1266
@@ -1360,7 +1329,7 @@ static struct net_device_stats *mace_get_stats(struct net_device *dev)
1360 1329
1361 update_stats(dev->base_addr, dev); 1330 update_stats(dev->base_addr, dev);
1362 1331
1363 DEBUG(1, "%s: updating the statistics.\n", dev->name); 1332 pr_debug("%s: updating the statistics.\n", dev->name);
1364 pr_linux_stats(&lp->linux_stats); 1333 pr_linux_stats(&lp->linux_stats);
1365 pr_mace_stats(&lp->mace_stats); 1334 pr_mace_stats(&lp->mace_stats);
1366 1335
@@ -1427,7 +1396,7 @@ static void BuildLAF(int *ladrf, int *adr)
1427 ladrf[byte] |= (1 << (hashcode & 7)); 1396 ladrf[byte] |= (1 << (hashcode & 7));
1428 1397
1429#ifdef PCMCIA_DEBUG 1398#ifdef PCMCIA_DEBUG
1430 if (pc_debug > 2) 1399 if (0)
1431 printk(KERN_DEBUG " adr =%pM\n", adr); 1400 printk(KERN_DEBUG " adr =%pM\n", adr);
1432 printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); 1401 printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
1433 for (i = 0; i < 8; i++) 1402 for (i = 0; i < 8; i++)
@@ -1454,12 +1423,12 @@ static void restore_multicast_list(struct net_device *dev)
1454 unsigned int ioaddr = dev->base_addr; 1423 unsigned int ioaddr = dev->base_addr;
1455 int i; 1424 int i;
1456 1425
1457 DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", 1426 pr_debug("%s: restoring Rx mode to %d addresses.\n",
1458 dev->name, num_addrs); 1427 dev->name, num_addrs);
1459 1428
1460 if (num_addrs > 0) { 1429 if (num_addrs > 0) {
1461 1430
1462 DEBUG(1, "Attempt to restore multicast list detected.\n"); 1431 pr_debug("Attempt to restore multicast list detected.\n");
1463 1432
1464 mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR); 1433 mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR);
1465 /* Poll ADDRCHG bit */ 1434 /* Poll ADDRCHG bit */
@@ -1511,11 +1480,11 @@ static void set_multicast_list(struct net_device *dev)
1511 struct dev_mc_list *dmi = dev->mc_list; 1480 struct dev_mc_list *dmi = dev->mc_list;
1512 1481
1513#ifdef PCMCIA_DEBUG 1482#ifdef PCMCIA_DEBUG
1514 if (pc_debug > 1) { 1483 {
1515 static int old; 1484 static int old;
1516 if (dev->mc_count != old) { 1485 if (dev->mc_count != old) {
1517 old = dev->mc_count; 1486 old = dev->mc_count;
1518 DEBUG(0, "%s: setting Rx mode to %d addresses.\n", 1487 pr_debug("%s: setting Rx mode to %d addresses.\n",
1519 dev->name, old); 1488 dev->name, old);
1520 } 1489 }
1521 } 1490 }
@@ -1546,7 +1515,7 @@ static void restore_multicast_list(struct net_device *dev)
1546 unsigned int ioaddr = dev->base_addr; 1515 unsigned int ioaddr = dev->base_addr;
1547 mace_private *lp = netdev_priv(dev); 1516 mace_private *lp = netdev_priv(dev);
1548 1517
1549 DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", dev->name, 1518 pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name,
1550 lp->multicast_num_addrs); 1519 lp->multicast_num_addrs);
1551 1520
1552 if (dev->flags & IFF_PROMISC) { 1521 if (dev->flags & IFF_PROMISC) {
@@ -1567,11 +1536,11 @@ static void set_multicast_list(struct net_device *dev)
1567 mace_private *lp = netdev_priv(dev); 1536 mace_private *lp = netdev_priv(dev);
1568 1537
1569#ifdef PCMCIA_DEBUG 1538#ifdef PCMCIA_DEBUG
1570 if (pc_debug > 1) { 1539 {
1571 static int old; 1540 static int old;
1572 if (dev->mc_count != old) { 1541 if (dev->mc_count != old) {
1573 old = dev->mc_count; 1542 old = dev->mc_count;
1574 DEBUG(0, "%s: setting Rx mode to %d addresses.\n", 1543 pr_debug("%s: setting Rx mode to %d addresses.\n",
1575 dev->name, old); 1544 dev->name, old);
1576 } 1545 }
1577 } 1546 }
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 347eaee855c0..2d26b6ca28b9 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -71,15 +71,6 @@
71 71
72static const char *if_names[] = { "auto", "10baseT", "10base2"}; 72static const char *if_names[] = { "auto", "10baseT", "10base2"};
73 73
74#ifdef PCMCIA_DEBUG
75static int pc_debug = PCMCIA_DEBUG;
76module_param(pc_debug, int, 0);
77#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
78static char *version =
79"pcnet_cs.c 1.153 2003/11/09 18:53:09 (David Hinds)";
80#else
81#define DEBUG(n, args...)
82#endif
83 74
84/*====================================================================*/ 75/*====================================================================*/
85 76
@@ -265,7 +256,7 @@ static int pcnet_probe(struct pcmcia_device *link)
265 pcnet_dev_t *info; 256 pcnet_dev_t *info;
266 struct net_device *dev; 257 struct net_device *dev;
267 258
268 DEBUG(0, "pcnet_attach()\n"); 259 dev_dbg(&link->dev, "pcnet_attach()\n");
269 260
270 /* Create new ethernet device */ 261 /* Create new ethernet device */
271 dev = __alloc_ei_netdev(sizeof(pcnet_dev_t)); 262 dev = __alloc_ei_netdev(sizeof(pcnet_dev_t));
@@ -275,7 +266,6 @@ static int pcnet_probe(struct pcmcia_device *link)
275 link->priv = dev; 266 link->priv = dev;
276 267
277 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 268 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
278 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
279 link->conf.Attributes = CONF_ENABLE_IRQ; 269 link->conf.Attributes = CONF_ENABLE_IRQ;
280 link->conf.IntType = INT_MEMORY_AND_IO; 270 link->conf.IntType = INT_MEMORY_AND_IO;
281 271
@@ -297,7 +287,7 @@ static void pcnet_detach(struct pcmcia_device *link)
297{ 287{
298 struct net_device *dev = link->priv; 288 struct net_device *dev = link->priv;
299 289
300 DEBUG(0, "pcnet_detach(0x%p)\n", link); 290 dev_dbg(&link->dev, "pcnet_detach\n");
301 291
302 if (link->dev_node) 292 if (link->dev_node)
303 unregister_netdev(dev); 293 unregister_netdev(dev);
@@ -326,17 +316,15 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
326 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 316 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
327 req.Base = 0; req.Size = 0; 317 req.Base = 0; req.Size = 0;
328 req.AccessSpeed = 0; 318 req.AccessSpeed = 0;
329 i = pcmcia_request_window(&link, &req, &link->win); 319 i = pcmcia_request_window(link, &req, &link->win);
330 if (i != 0) { 320 if (i != 0)
331 cs_error(link, RequestWindow, i);
332 return NULL; 321 return NULL;
333 }
334 322
335 virt = ioremap(req.Base, req.Size); 323 virt = ioremap(req.Base, req.Size);
336 mem.Page = 0; 324 mem.Page = 0;
337 for (i = 0; i < NR_INFO; i++) { 325 for (i = 0; i < NR_INFO; i++) {
338 mem.CardOffset = hw_info[i].offset & ~(req.Size-1); 326 mem.CardOffset = hw_info[i].offset & ~(req.Size-1);
339 pcmcia_map_mem_page(link->win, &mem); 327 pcmcia_map_mem_page(link, link->win, &mem);
340 base = &virt[hw_info[i].offset & (req.Size-1)]; 328 base = &virt[hw_info[i].offset & (req.Size-1)];
341 if ((readb(base+0) == hw_info[i].a0) && 329 if ((readb(base+0) == hw_info[i].a0) &&
342 (readb(base+2) == hw_info[i].a1) && 330 (readb(base+2) == hw_info[i].a1) &&
@@ -348,9 +336,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
348 } 336 }
349 337
350 iounmap(virt); 338 iounmap(virt);
351 j = pcmcia_release_window(link->win); 339 j = pcmcia_release_window(link, link->win);
352 if (j != 0)
353 cs_error(link, ReleaseWindow, j);
354 return (i < NR_INFO) ? hw_info+i : NULL; 340 return (i < NR_INFO) ? hw_info+i : NULL;
355} /* get_hwinfo */ 341} /* get_hwinfo */
356 342
@@ -495,9 +481,6 @@ static hw_info_t *get_hwired(struct pcmcia_device *link)
495 481
496======================================================================*/ 482======================================================================*/
497 483
498#define CS_CHECK(fn, ret) \
499do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
500
501static int try_io_port(struct pcmcia_device *link) 484static int try_io_port(struct pcmcia_device *link)
502{ 485{
503 int j, ret; 486 int j, ret;
@@ -567,19 +550,19 @@ static int pcnet_config(struct pcmcia_device *link)
567{ 550{
568 struct net_device *dev = link->priv; 551 struct net_device *dev = link->priv;
569 pcnet_dev_t *info = PRIV(dev); 552 pcnet_dev_t *info = PRIV(dev);
570 int last_ret, last_fn, start_pg, stop_pg, cm_offset; 553 int ret, start_pg, stop_pg, cm_offset;
571 int has_shmem = 0; 554 int has_shmem = 0;
572 hw_info_t *local_hw_info; 555 hw_info_t *local_hw_info;
573 556
574 DEBUG(0, "pcnet_config(0x%p)\n", link); 557 dev_dbg(&link->dev, "pcnet_config\n");
575 558
576 last_ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem); 559 ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem);
577 if (last_ret) { 560 if (ret)
578 cs_error(link, RequestIO, last_ret);
579 goto failed; 561 goto failed;
580 }
581 562
582 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 563 ret = pcmcia_request_irq(link, &link->irq);
564 if (ret)
565 goto failed;
583 566
584 if (link->io.NumPorts2 == 8) { 567 if (link->io.NumPorts2 == 8) {
585 link->conf.Attributes |= CONF_ENABLE_SPKR; 568 link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -589,7 +572,9 @@ static int pcnet_config(struct pcmcia_device *link)
589 (link->card_id == PRODID_IBM_HOME_AND_AWAY)) 572 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
590 link->conf.ConfigIndex |= 0x10; 573 link->conf.ConfigIndex |= 0x10;
591 574
592 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 575 ret = pcmcia_request_configuration(link, &link->conf);
576 if (ret)
577 goto failed;
593 dev->irq = link->irq.AssignedIRQ; 578 dev->irq = link->irq.AssignedIRQ;
594 dev->base_addr = link->io.BasePort1; 579 dev->base_addr = link->io.BasePort1;
595 if (info->flags & HAS_MISC_REG) { 580 if (info->flags & HAS_MISC_REG) {
@@ -660,7 +645,7 @@ static int pcnet_config(struct pcmcia_device *link)
660 mii_phy_probe(dev); 645 mii_phy_probe(dev);
661 646
662 link->dev_node = &info->node; 647 link->dev_node = &info->node;
663 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 648 SET_NETDEV_DEV(dev, &link->dev);
664 649
665 if (register_netdev(dev) != 0) { 650 if (register_netdev(dev) != 0) {
666 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n"); 651 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
@@ -687,8 +672,6 @@ static int pcnet_config(struct pcmcia_device *link)
687 printk(" hw_addr %pM\n", dev->dev_addr); 672 printk(" hw_addr %pM\n", dev->dev_addr);
688 return 0; 673 return 0;
689 674
690cs_failed:
691 cs_error(link, last_fn, last_ret);
692failed: 675failed:
693 pcnet_release(link); 676 pcnet_release(link);
694 return -ENODEV; 677 return -ENODEV;
@@ -706,7 +689,7 @@ static void pcnet_release(struct pcmcia_device *link)
706{ 689{
707 pcnet_dev_t *info = PRIV(link->priv); 690 pcnet_dev_t *info = PRIV(link->priv);
708 691
709 DEBUG(0, "pcnet_release(0x%p)\n", link); 692 dev_dbg(&link->dev, "pcnet_release\n");
710 693
711 if (info->flags & USE_SHMEM) 694 if (info->flags & USE_SHMEM)
712 iounmap(info->base); 695 iounmap(info->base);
@@ -960,7 +943,7 @@ static void mii_phy_probe(struct net_device *dev)
960 phyid = tmp << 16; 943 phyid = tmp << 16;
961 phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2); 944 phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2);
962 phyid &= MII_PHYID_REV_MASK; 945 phyid &= MII_PHYID_REV_MASK;
963 DEBUG(0, "%s: MII at %d is 0x%08x\n", dev->name, i, phyid); 946 pr_debug("%s: MII at %d is 0x%08x\n", dev->name, i, phyid);
964 if (phyid == AM79C9XX_HOME_PHY) { 947 if (phyid == AM79C9XX_HOME_PHY) {
965 info->pna_phy = i; 948 info->pna_phy = i;
966 } else if (phyid != AM79C9XX_ETH_PHY) { 949 } else if (phyid != AM79C9XX_ETH_PHY) {
@@ -976,7 +959,7 @@ static int pcnet_open(struct net_device *dev)
976 struct pcmcia_device *link = info->p_dev; 959 struct pcmcia_device *link = info->p_dev;
977 unsigned int nic_base = dev->base_addr; 960 unsigned int nic_base = dev->base_addr;
978 961
979 DEBUG(2, "pcnet_open('%s')\n", dev->name); 962 dev_dbg(&link->dev, "pcnet_open('%s')\n", dev->name);
980 963
981 if (!pcmcia_dev_present(link)) 964 if (!pcmcia_dev_present(link))
982 return -ENODEV; 965 return -ENODEV;
@@ -1008,7 +991,7 @@ static int pcnet_close(struct net_device *dev)
1008 pcnet_dev_t *info = PRIV(dev); 991 pcnet_dev_t *info = PRIV(dev);
1009 struct pcmcia_device *link = info->p_dev; 992 struct pcmcia_device *link = info->p_dev;
1010 993
1011 DEBUG(2, "pcnet_close('%s')\n", dev->name); 994 dev_dbg(&link->dev, "pcnet_close('%s')\n", dev->name);
1012 995
1013 ei_close(dev); 996 ei_close(dev);
1014 free_irq(dev->irq, dev); 997 free_irq(dev->irq, dev);
@@ -1251,10 +1234,8 @@ static void dma_block_input(struct net_device *dev, int count,
1251 int xfer_count = count; 1234 int xfer_count = count;
1252 char *buf = skb->data; 1235 char *buf = skb->data;
1253 1236
1254#ifdef PCMCIA_DEBUG
1255 if ((ei_debug > 4) && (count != 4)) 1237 if ((ei_debug > 4) && (count != 4))
1256 printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4); 1238 pr_debug("%s: [bi=%d]\n", dev->name, count+4);
1257#endif
1258 if (ei_status.dmaing) { 1239 if (ei_status.dmaing) {
1259 printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." 1240 printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
1260 "[DMAstat:%1x][irqlock:%1x]\n", 1241 "[DMAstat:%1x][irqlock:%1x]\n",
@@ -1495,7 +1476,7 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1495 pcnet_dev_t *info = PRIV(dev); 1476 pcnet_dev_t *info = PRIV(dev);
1496 win_req_t req; 1477 win_req_t req;
1497 memreq_t mem; 1478 memreq_t mem;
1498 int i, window_size, offset, last_ret, last_fn; 1479 int i, window_size, offset, ret;
1499 1480
1500 window_size = (stop_pg - start_pg) << 8; 1481 window_size = (stop_pg - start_pg) << 8;
1501 if (window_size > 32 * 1024) 1482 if (window_size > 32 * 1024)
@@ -1509,13 +1490,17 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1509 req.Attributes |= WIN_USE_WAIT; 1490 req.Attributes |= WIN_USE_WAIT;
1510 req.Base = 0; req.Size = window_size; 1491 req.Base = 0; req.Size = window_size;
1511 req.AccessSpeed = mem_speed; 1492 req.AccessSpeed = mem_speed;
1512 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); 1493 ret = pcmcia_request_window(link, &req, &link->win);
1494 if (ret)
1495 goto failed;
1513 1496
1514 mem.CardOffset = (start_pg << 8) + cm_offset; 1497 mem.CardOffset = (start_pg << 8) + cm_offset;
1515 offset = mem.CardOffset % window_size; 1498 offset = mem.CardOffset % window_size;
1516 mem.CardOffset -= offset; 1499 mem.CardOffset -= offset;
1517 mem.Page = 0; 1500 mem.Page = 0;
1518 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); 1501 ret = pcmcia_map_mem_page(link, link->win, &mem);
1502 if (ret)
1503 goto failed;
1519 1504
1520 /* Try scribbling on the buffer */ 1505 /* Try scribbling on the buffer */
1521 info->base = ioremap(req.Base, window_size); 1506 info->base = ioremap(req.Base, window_size);
@@ -1527,8 +1512,8 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1527 pcnet_reset_8390(dev); 1512 pcnet_reset_8390(dev);
1528 if (i != (TX_PAGES<<8)) { 1513 if (i != (TX_PAGES<<8)) {
1529 iounmap(info->base); 1514 iounmap(info->base);
1530 pcmcia_release_window(link->win); 1515 pcmcia_release_window(link, link->win);
1531 info->base = NULL; link->win = NULL; 1516 info->base = NULL; link->win = 0;
1532 goto failed; 1517 goto failed;
1533 } 1518 }
1534 1519
@@ -1549,8 +1534,6 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1549 info->flags |= USE_SHMEM; 1534 info->flags |= USE_SHMEM;
1550 return 0; 1535 return 0;
1551 1536
1552cs_failed:
1553 cs_error(link, last_fn, last_ret);
1554failed: 1537failed:
1555 return 1; 1538 return 1;
1556} 1539}
@@ -1795,7 +1778,6 @@ static int __init init_pcnet_cs(void)
1795 1778
1796static void __exit exit_pcnet_cs(void) 1779static void __exit exit_pcnet_cs(void)
1797{ 1780{
1798 DEBUG(0, "pcnet_cs: unloading\n");
1799 pcmcia_unregister_driver(&pcnet_driver); 1781 pcmcia_unregister_driver(&pcnet_driver);
1800} 1782}
1801 1783
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 117b083a10cb..cc4853bc0253 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -79,14 +79,6 @@ MODULE_FIRMWARE(FIRMWARE_NAME);
79*/ 79*/
80INT_MODULE_PARM(if_port, 0); 80INT_MODULE_PARM(if_port, 0);
81 81
82#ifdef PCMCIA_DEBUG
83INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
84static const char *version =
85"smc91c92_cs.c 1.123 2006/11/09 Donald Becker, becker@scyld.com.\n";
86#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
87#else
88#define DEBUG(n, args...)
89#endif
90 82
91#define DRV_NAME "smc91c92_cs" 83#define DRV_NAME "smc91c92_cs"
92#define DRV_VERSION "1.123" 84#define DRV_VERSION "1.123"
@@ -126,12 +118,6 @@ struct smc_private {
126 int rx_ovrn; 118 int rx_ovrn;
127}; 119};
128 120
129struct smc_cfg_mem {
130 tuple_t tuple;
131 cisparse_t parse;
132 u_char buf[255];
133};
134
135/* Special definitions for Megahertz multifunction cards */ 121/* Special definitions for Megahertz multifunction cards */
136#define MEGAHERTZ_ISR 0x0380 122#define MEGAHERTZ_ISR 0x0380
137 123
@@ -329,7 +315,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
329 struct smc_private *smc; 315 struct smc_private *smc;
330 struct net_device *dev; 316 struct net_device *dev;
331 317
332 DEBUG(0, "smc91c92_attach()\n"); 318 dev_dbg(&link->dev, "smc91c92_attach()\n");
333 319
334 /* Create new ethernet device */ 320 /* Create new ethernet device */
335 dev = alloc_etherdev(sizeof(struct smc_private)); 321 dev = alloc_etherdev(sizeof(struct smc_private));
@@ -343,10 +329,8 @@ static int smc91c92_probe(struct pcmcia_device *link)
343 link->io.NumPorts1 = 16; 329 link->io.NumPorts1 = 16;
344 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 330 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
345 link->io.IOAddrLines = 4; 331 link->io.IOAddrLines = 4;
346 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 332 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
347 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
348 link->irq.Handler = &smc_interrupt; 333 link->irq.Handler = &smc_interrupt;
349 link->irq.Instance = dev;
350 link->conf.Attributes = CONF_ENABLE_IRQ; 334 link->conf.Attributes = CONF_ENABLE_IRQ;
351 link->conf.IntType = INT_MEMORY_AND_IO; 335 link->conf.IntType = INT_MEMORY_AND_IO;
352 336
@@ -377,7 +361,7 @@ static void smc91c92_detach(struct pcmcia_device *link)
377{ 361{
378 struct net_device *dev = link->priv; 362 struct net_device *dev = link->priv;
379 363
380 DEBUG(0, "smc91c92_detach(0x%p)\n", link); 364 dev_dbg(&link->dev, "smc91c92_detach\n");
381 365
382 if (link->dev_node) 366 if (link->dev_node)
383 unregister_netdev(dev); 367 unregister_netdev(dev);
@@ -408,34 +392,7 @@ static int cvt_ascii_address(struct net_device *dev, char *s)
408 return 0; 392 return 0;
409} 393}
410 394
411/*====================================================================*/ 395/*====================================================================
412
413static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
414 cisparse_t *parse)
415{
416 int i;
417
418 i = pcmcia_get_first_tuple(handle, tuple);
419 if (i != 0)
420 return i;
421 i = pcmcia_get_tuple_data(handle, tuple);
422 if (i != 0)
423 return i;
424 return pcmcia_parse_tuple(tuple, parse);
425}
426
427static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
428 cisparse_t *parse)
429{
430 int i;
431
432 if ((i = pcmcia_get_next_tuple(handle, tuple)) != 0 ||
433 (i = pcmcia_get_tuple_data(handle, tuple)) != 0)
434 return i;
435 return pcmcia_parse_tuple(tuple, parse);
436}
437
438/*======================================================================
439 396
440 Configuration stuff for Megahertz cards 397 Configuration stuff for Megahertz cards
441 398
@@ -490,19 +447,14 @@ static int mhz_mfc_config(struct pcmcia_device *link)
490{ 447{
491 struct net_device *dev = link->priv; 448 struct net_device *dev = link->priv;
492 struct smc_private *smc = netdev_priv(dev); 449 struct smc_private *smc = netdev_priv(dev);
493 struct smc_cfg_mem *cfg_mem;
494 win_req_t req; 450 win_req_t req;
495 memreq_t mem; 451 memreq_t mem;
496 int i; 452 int i;
497 453
498 cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL);
499 if (!cfg_mem)
500 return -ENOMEM;
501
502 link->conf.Attributes |= CONF_ENABLE_SPKR; 454 link->conf.Attributes |= CONF_ENABLE_SPKR;
503 link->conf.Status = CCSR_AUDIO_ENA; 455 link->conf.Status = CCSR_AUDIO_ENA;
504 link->irq.Attributes = 456 link->irq.Attributes =
505 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT; 457 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
506 link->io.IOAddrLines = 16; 458 link->io.IOAddrLines = 16;
507 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 459 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
508 link->io.NumPorts2 = 8; 460 link->io.NumPorts2 = 8;
@@ -510,91 +462,80 @@ static int mhz_mfc_config(struct pcmcia_device *link)
510 /* The Megahertz combo cards have modem-like CIS entries, so 462 /* The Megahertz combo cards have modem-like CIS entries, so
511 we have to explicitly try a bunch of port combinations. */ 463 we have to explicitly try a bunch of port combinations. */
512 if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL)) 464 if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL))
513 goto free_cfg_mem; 465 return -ENODEV;
466
514 dev->base_addr = link->io.BasePort1; 467 dev->base_addr = link->io.BasePort1;
515 468
516 /* Allocate a memory window, for accessing the ISR */ 469 /* Allocate a memory window, for accessing the ISR */
517 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 470 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
518 req.Base = req.Size = 0; 471 req.Base = req.Size = 0;
519 req.AccessSpeed = 0; 472 req.AccessSpeed = 0;
520 i = pcmcia_request_window(&link, &req, &link->win); 473 i = pcmcia_request_window(link, &req, &link->win);
521 if (i != 0) 474 if (i != 0)
522 goto free_cfg_mem; 475 return -ENODEV;
476
523 smc->base = ioremap(req.Base, req.Size); 477 smc->base = ioremap(req.Base, req.Size);
524 mem.CardOffset = mem.Page = 0; 478 mem.CardOffset = mem.Page = 0;
525 if (smc->manfid == MANFID_MOTOROLA) 479 if (smc->manfid == MANFID_MOTOROLA)
526 mem.CardOffset = link->conf.ConfigBase; 480 mem.CardOffset = link->conf.ConfigBase;
527 i = pcmcia_map_mem_page(link->win, &mem); 481 i = pcmcia_map_mem_page(link, link->win, &mem);
528 482
529 if ((i == 0) && 483 if ((i == 0) &&
530 (smc->manfid == MANFID_MEGAHERTZ) && 484 (smc->manfid == MANFID_MEGAHERTZ) &&
531 (smc->cardid == PRODID_MEGAHERTZ_EM3288)) 485 (smc->cardid == PRODID_MEGAHERTZ_EM3288))
532 mhz_3288_power(link); 486 mhz_3288_power(link);
533 487
534free_cfg_mem: 488 return 0;
535 kfree(cfg_mem);
536 return -ENODEV;
537} 489}
538 490
539static int mhz_setup(struct pcmcia_device *link) 491static int pcmcia_get_versmac(struct pcmcia_device *p_dev,
492 tuple_t *tuple,
493 void *priv)
540{ 494{
541 struct net_device *dev = link->priv; 495 struct net_device *dev = priv;
542 struct smc_cfg_mem *cfg_mem; 496 cisparse_t parse;
543 tuple_t *tuple;
544 cisparse_t *parse;
545 u_char *buf, *station_addr;
546 int rc;
547 497
548 cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL); 498 if (pcmcia_parse_tuple(tuple, &parse))
549 if (!cfg_mem) 499 return -EINVAL;
550 return -1;
551 500
552 tuple = &cfg_mem->tuple; 501 if ((parse.version_1.ns > 3) &&
553 parse = &cfg_mem->parse; 502 (cvt_ascii_address(dev,
554 buf = cfg_mem->buf; 503 (parse.version_1.str + parse.version_1.ofs[3]))))
504 return 0;
555 505
556 tuple->Attributes = tuple->TupleOffset = 0; 506 return -EINVAL;
557 tuple->TupleData = (cisdata_t *)buf; 507};
558 tuple->TupleDataMax = 255; 508
509static int mhz_setup(struct pcmcia_device *link)
510{
511 struct net_device *dev = link->priv;
512 size_t len;
513 u8 *buf;
514 int rc;
559 515
560 /* Read the station address from the CIS. It is stored as the last 516 /* Read the station address from the CIS. It is stored as the last
561 (fourth) string in the Version 1 Version/ID tuple. */ 517 (fourth) string in the Version 1 Version/ID tuple. */
562 tuple->DesiredTuple = CISTPL_VERS_1; 518 if ((link->prod_id[3]) &&
563 if (first_tuple(link, tuple, parse) != 0) { 519 (cvt_ascii_address(dev, link->prod_id[3]) == 0))
564 rc = -1; 520 return 0;
565 goto free_cfg_mem; 521
566 } 522 /* Workarounds for broken cards start here. */
567 /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */ 523 /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
568 if (next_tuple(link, tuple, parse) != 0) 524 if (!pcmcia_loop_tuple(link, CISTPL_VERS_1, pcmcia_get_versmac, dev))
569 first_tuple(link, tuple, parse); 525 return 0;
570 if (parse->version_1.ns > 3) {
571 station_addr = parse->version_1.str + parse->version_1.ofs[3];
572 if (cvt_ascii_address(dev, station_addr) == 0) {
573 rc = 0;
574 goto free_cfg_mem;
575 }
576 }
577 526
578 /* Another possibility: for the EM3288, in a special tuple */ 527 /* Another possibility: for the EM3288, in a special tuple */
579 tuple->DesiredTuple = 0x81;
580 if (pcmcia_get_first_tuple(link, tuple) != 0) {
581 rc = -1;
582 goto free_cfg_mem;
583 }
584 if (pcmcia_get_tuple_data(link, tuple) != 0) {
585 rc = -1;
586 goto free_cfg_mem;
587 }
588 buf[12] = '\0';
589 if (cvt_ascii_address(dev, buf) == 0) {
590 rc = 0;
591 goto free_cfg_mem;
592 }
593 rc = -1; 528 rc = -1;
594free_cfg_mem: 529 len = pcmcia_get_tuple(link, 0x81, &buf);
595 kfree(cfg_mem); 530 if (buf && len >= 13) {
596 return rc; 531 buf[12] = '\0';
597} 532 if (cvt_ascii_address(dev, buf))
533 rc = 0;
534 }
535 kfree(buf);
536
537 return rc;
538};
598 539
599/*====================================================================== 540/*======================================================================
600 541
@@ -684,58 +625,21 @@ static int smc_config(struct pcmcia_device *link)
684 return i; 625 return i;
685} 626}
686 627
628
687static int smc_setup(struct pcmcia_device *link) 629static int smc_setup(struct pcmcia_device *link)
688{ 630{
689 struct net_device *dev = link->priv; 631 struct net_device *dev = link->priv;
690 struct smc_cfg_mem *cfg_mem;
691 tuple_t *tuple;
692 cisparse_t *parse;
693 cistpl_lan_node_id_t *node_id;
694 u_char *buf, *station_addr;
695 int i, rc;
696
697 cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL);
698 if (!cfg_mem)
699 return -ENOMEM;
700
701 tuple = &cfg_mem->tuple;
702 parse = &cfg_mem->parse;
703 buf = cfg_mem->buf;
704
705 tuple->Attributes = tuple->TupleOffset = 0;
706 tuple->TupleData = (cisdata_t *)buf;
707 tuple->TupleDataMax = 255;
708 632
709 /* Check for a LAN function extension tuple */ 633 /* Check for a LAN function extension tuple */
710 tuple->DesiredTuple = CISTPL_FUNCE; 634 if (!pcmcia_get_mac_from_cis(link, dev))
711 i = first_tuple(link, tuple, parse); 635 return 0;
712 while (i == 0) { 636
713 if (parse->funce.type == CISTPL_FUNCE_LAN_NODE_ID)
714 break;
715 i = next_tuple(link, tuple, parse);
716 }
717 if (i == 0) {
718 node_id = (cistpl_lan_node_id_t *)parse->funce.data;
719 if (node_id->nb == 6) {
720 for (i = 0; i < 6; i++)
721 dev->dev_addr[i] = node_id->id[i];
722 rc = 0;
723 goto free_cfg_mem;
724 }
725 }
726 /* Try the third string in the Version 1 Version/ID tuple. */ 637 /* Try the third string in the Version 1 Version/ID tuple. */
727 if (link->prod_id[2]) { 638 if (link->prod_id[2]) {
728 station_addr = link->prod_id[2]; 639 if (cvt_ascii_address(dev, link->prod_id[2]) == 0)
729 if (cvt_ascii_address(dev, station_addr) == 0) { 640 return 0;
730 rc = 0;
731 goto free_cfg_mem;
732 }
733 } 641 }
734 642 return -1;
735 rc = -1;
736free_cfg_mem:
737 kfree(cfg_mem);
738 return rc;
739} 643}
740 644
741/*====================================================================*/ 645/*====================================================================*/
@@ -749,7 +653,7 @@ static int osi_config(struct pcmcia_device *link)
749 link->conf.Attributes |= CONF_ENABLE_SPKR; 653 link->conf.Attributes |= CONF_ENABLE_SPKR;
750 link->conf.Status = CCSR_AUDIO_ENA; 654 link->conf.Status = CCSR_AUDIO_ENA;
751 link->irq.Attributes = 655 link->irq.Attributes =
752 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT; 656 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
753 link->io.NumPorts1 = 64; 657 link->io.NumPorts1 = 64;
754 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 658 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
755 link->io.NumPorts2 = 8; 659 link->io.NumPorts2 = 8;
@@ -794,41 +698,31 @@ static int osi_load_firmware(struct pcmcia_device *link)
794 return err; 698 return err;
795} 699}
796 700
797static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) 701static int pcmcia_osi_mac(struct pcmcia_device *p_dev,
702 tuple_t *tuple,
703 void *priv)
798{ 704{
799 struct net_device *dev = link->priv; 705 struct net_device *dev = priv;
800 struct smc_cfg_mem *cfg_mem; 706 int i;
801 tuple_t *tuple;
802 u_char *buf;
803 int i, rc;
804 707
805 cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL); 708 if (tuple->TupleDataLen < 8)
806 if (!cfg_mem) 709 return -EINVAL;
807 return -1; 710 if (tuple->TupleData[0] != 0x04)
711 return -EINVAL;
712 for (i = 0; i < 6; i++)
713 dev->dev_addr[i] = tuple->TupleData[i+2];
714 return 0;
715};
808 716
809 tuple = &cfg_mem->tuple;
810 buf = cfg_mem->buf;
811 717
812 tuple->Attributes = TUPLE_RETURN_COMMON; 718static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
813 tuple->TupleData = (cisdata_t *)buf; 719{
814 tuple->TupleDataMax = 255; 720 struct net_device *dev = link->priv;
815 tuple->TupleOffset = 0; 721 int rc;
816 722
817 /* Read the station address from tuple 0x90, subtuple 0x04 */ 723 /* Read the station address from tuple 0x90, subtuple 0x04 */
818 tuple->DesiredTuple = 0x90; 724 if (pcmcia_loop_tuple(link, 0x90, pcmcia_osi_mac, dev))
819 i = pcmcia_get_first_tuple(link, tuple); 725 return -1;
820 while (i == 0) {
821 i = pcmcia_get_tuple_data(link, tuple);
822 if ((i != 0) || (buf[0] == 0x04))
823 break;
824 i = pcmcia_get_next_tuple(link, tuple);
825 }
826 if (i != 0) {
827 rc = -1;
828 goto free_cfg_mem;
829 }
830 for (i = 0; i < 6; i++)
831 dev->dev_addr[i] = buf[i+2];
832 726
833 if (((manfid == MANFID_OSITECH) && 727 if (((manfid == MANFID_OSITECH) &&
834 (cardid == PRODID_OSITECH_SEVEN)) || 728 (cardid == PRODID_OSITECH_SEVEN)) ||
@@ -836,20 +730,17 @@ static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
836 (cardid == PRODID_PSION_NET100))) { 730 (cardid == PRODID_PSION_NET100))) {
837 rc = osi_load_firmware(link); 731 rc = osi_load_firmware(link);
838 if (rc) 732 if (rc)
839 goto free_cfg_mem; 733 return rc;
840 } else if (manfid == MANFID_OSITECH) { 734 } else if (manfid == MANFID_OSITECH) {
841 /* Make sure both functions are powered up */ 735 /* Make sure both functions are powered up */
842 set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR); 736 set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR);
843 /* Now, turn on the interrupt for both card functions */ 737 /* Now, turn on the interrupt for both card functions */
844 set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR); 738 set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR);
845 DEBUG(2, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n", 739 dev_dbg(&link->dev, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n",
846 inw(link->io.BasePort1 + OSITECH_AUI_PWR), 740 inw(link->io.BasePort1 + OSITECH_AUI_PWR),
847 inw(link->io.BasePort1 + OSITECH_RESET_ISR)); 741 inw(link->io.BasePort1 + OSITECH_RESET_ISR));
848 } 742 }
849 rc = 0; 743 return 0;
850free_cfg_mem:
851 kfree(cfg_mem);
852 return rc;
853} 744}
854 745
855static int smc91c92_suspend(struct pcmcia_device *link) 746static int smc91c92_suspend(struct pcmcia_device *link)
@@ -959,12 +850,6 @@ static int check_sig(struct pcmcia_device *link)
959 850
960======================================================================*/ 851======================================================================*/
961 852
962#define CS_EXIT_TEST(ret, svc, label) \
963if (ret != 0) { \
964 cs_error(link, svc, ret); \
965 goto label; \
966}
967
968static int smc91c92_config(struct pcmcia_device *link) 853static int smc91c92_config(struct pcmcia_device *link)
969{ 854{
970 struct net_device *dev = link->priv; 855 struct net_device *dev = link->priv;
@@ -974,7 +859,7 @@ static int smc91c92_config(struct pcmcia_device *link)
974 unsigned int ioaddr; 859 unsigned int ioaddr;
975 u_long mir; 860 u_long mir;
976 861
977 DEBUG(0, "smc91c92_config(0x%p)\n", link); 862 dev_dbg(&link->dev, "smc91c92_config\n");
978 863
979 smc->manfid = link->manf_id; 864 smc->manfid = link->manf_id;
980 smc->cardid = link->card_id; 865 smc->cardid = link->card_id;
@@ -990,12 +875,15 @@ static int smc91c92_config(struct pcmcia_device *link)
990 } else { 875 } else {
991 i = smc_config(link); 876 i = smc_config(link);
992 } 877 }
993 CS_EXIT_TEST(i, RequestIO, config_failed); 878 if (i)
879 goto config_failed;
994 880
995 i = pcmcia_request_irq(link, &link->irq); 881 i = pcmcia_request_irq(link, &link->irq);
996 CS_EXIT_TEST(i, RequestIRQ, config_failed); 882 if (i)
883 goto config_failed;
997 i = pcmcia_request_configuration(link, &link->conf); 884 i = pcmcia_request_configuration(link, &link->conf);
998 CS_EXIT_TEST(i, RequestConfiguration, config_failed); 885 if (i)
886 goto config_failed;
999 887
1000 if (smc->manfid == MANFID_MOTOROLA) 888 if (smc->manfid == MANFID_MOTOROLA)
1001 mot_config(link); 889 mot_config(link);
@@ -1074,7 +962,7 @@ static int smc91c92_config(struct pcmcia_device *link)
1074 } 962 }
1075 963
1076 link->dev_node = &smc->node; 964 link->dev_node = &smc->node;
1077 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 965 SET_NETDEV_DEV(dev, &link->dev);
1078 966
1079 if (register_netdev(dev) != 0) { 967 if (register_netdev(dev) != 0) {
1080 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n"); 968 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
@@ -1100,7 +988,7 @@ static int smc91c92_config(struct pcmcia_device *link)
1100 988
1101 if (smc->cfg & CFG_MII_SELECT) { 989 if (smc->cfg & CFG_MII_SELECT) {
1102 if (smc->mii_if.phy_id != -1) { 990 if (smc->mii_if.phy_id != -1) {
1103 DEBUG(0, " MII transceiver at index %d, status %x.\n", 991 dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n",
1104 smc->mii_if.phy_id, j); 992 smc->mii_if.phy_id, j);
1105 } else { 993 } else {
1106 printk(KERN_NOTICE " No MII transceivers found!\n"); 994 printk(KERN_NOTICE " No MII transceivers found!\n");
@@ -1110,7 +998,7 @@ static int smc91c92_config(struct pcmcia_device *link)
1110 998
1111config_undo: 999config_undo:
1112 unregister_netdev(dev); 1000 unregister_netdev(dev);
1113config_failed: /* CS_EXIT_TEST() calls jump to here... */ 1001config_failed:
1114 smc91c92_release(link); 1002 smc91c92_release(link);
1115 return -ENODEV; 1003 return -ENODEV;
1116} /* smc91c92_config */ 1004} /* smc91c92_config */
@@ -1125,7 +1013,7 @@ config_failed: /* CS_EXIT_TEST() calls jump to here... */
1125 1013
1126static void smc91c92_release(struct pcmcia_device *link) 1014static void smc91c92_release(struct pcmcia_device *link)
1127{ 1015{
1128 DEBUG(0, "smc91c92_release(0x%p)\n", link); 1016 dev_dbg(&link->dev, "smc91c92_release\n");
1129 if (link->win) { 1017 if (link->win) {
1130 struct net_device *dev = link->priv; 1018 struct net_device *dev = link->priv;
1131 struct smc_private *smc = netdev_priv(dev); 1019 struct smc_private *smc = netdev_priv(dev);
@@ -1222,10 +1110,10 @@ static int smc_open(struct net_device *dev)
1222 struct smc_private *smc = netdev_priv(dev); 1110 struct smc_private *smc = netdev_priv(dev);
1223 struct pcmcia_device *link = smc->p_dev; 1111 struct pcmcia_device *link = smc->p_dev;
1224 1112
1225#ifdef PCMCIA_DEBUG 1113 dev_dbg(&link->dev, "%s: smc_open(%p), ID/Window %4.4x.\n",
1226 DEBUG(0, "%s: smc_open(%p), ID/Window %4.4x.\n",
1227 dev->name, dev, inw(dev->base_addr + BANK_SELECT)); 1114 dev->name, dev, inw(dev->base_addr + BANK_SELECT));
1228 if (pc_debug > 1) smc_dump(dev); 1115#ifdef PCMCIA_DEBUG
1116 smc_dump(dev);
1229#endif 1117#endif
1230 1118
1231 /* Check that the PCMCIA card is still here. */ 1119 /* Check that the PCMCIA card is still here. */
@@ -1260,7 +1148,7 @@ static int smc_close(struct net_device *dev)
1260 struct pcmcia_device *link = smc->p_dev; 1148 struct pcmcia_device *link = smc->p_dev;
1261 unsigned int ioaddr = dev->base_addr; 1149 unsigned int ioaddr = dev->base_addr;
1262 1150
1263 DEBUG(0, "%s: smc_close(), status %4.4x.\n", 1151 dev_dbg(&link->dev, "%s: smc_close(), status %4.4x.\n",
1264 dev->name, inw(ioaddr + BANK_SELECT)); 1152 dev->name, inw(ioaddr + BANK_SELECT));
1265 1153
1266 netif_stop_queue(dev); 1154 netif_stop_queue(dev);
@@ -1327,7 +1215,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
1327 u_char *buf = skb->data; 1215 u_char *buf = skb->data;
1328 u_int length = skb->len; /* The chip will pad to ethernet min. */ 1216 u_int length = skb->len; /* The chip will pad to ethernet min. */
1329 1217
1330 DEBUG(2, "%s: Trying to xmit packet of length %d.\n", 1218 pr_debug("%s: Trying to xmit packet of length %d.\n",
1331 dev->name, length); 1219 dev->name, length);
1332 1220
1333 /* send the packet length: +6 for status word, length, and ctl */ 1221 /* send the packet length: +6 for status word, length, and ctl */
@@ -1382,7 +1270,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
1382 1270
1383 netif_stop_queue(dev); 1271 netif_stop_queue(dev);
1384 1272
1385 DEBUG(2, "%s: smc_start_xmit(length = %d) called," 1273 pr_debug("%s: smc_start_xmit(length = %d) called,"
1386 " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2)); 1274 " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2));
1387 1275
1388 if (smc->saved_skb) { 1276 if (smc->saved_skb) {
@@ -1429,7 +1317,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
1429 } 1317 }
1430 1318
1431 /* Otherwise defer until the Tx-space-allocated interrupt. */ 1319 /* Otherwise defer until the Tx-space-allocated interrupt. */
1432 DEBUG(2, "%s: memory allocation deferred.\n", dev->name); 1320 pr_debug("%s: memory allocation deferred.\n", dev->name);
1433 outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); 1321 outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
1434 spin_unlock_irqrestore(&smc->lock, flags); 1322 spin_unlock_irqrestore(&smc->lock, flags);
1435 1323
@@ -1494,7 +1382,7 @@ static void smc_eph_irq(struct net_device *dev)
1494 1382
1495 SMC_SELECT_BANK(0); 1383 SMC_SELECT_BANK(0);
1496 ephs = inw(ioaddr + EPH); 1384 ephs = inw(ioaddr + EPH);
1497 DEBUG(2, "%s: Ethernet protocol handler interrupt, status" 1385 pr_debug("%s: Ethernet protocol handler interrupt, status"
1498 " %4.4x.\n", dev->name, ephs); 1386 " %4.4x.\n", dev->name, ephs);
1499 /* Could be a counter roll-over warning: update stats. */ 1387 /* Could be a counter roll-over warning: update stats. */
1500 card_stats = inw(ioaddr + COUNTER); 1388 card_stats = inw(ioaddr + COUNTER);
@@ -1534,7 +1422,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1534 1422
1535 ioaddr = dev->base_addr; 1423 ioaddr = dev->base_addr;
1536 1424
1537 DEBUG(3, "%s: SMC91c92 interrupt %d at %#x.\n", dev->name, 1425 pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
1538 irq, ioaddr); 1426 irq, ioaddr);
1539 1427
1540 spin_lock(&smc->lock); 1428 spin_lock(&smc->lock);
@@ -1543,7 +1431,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1543 if ((saved_bank & 0xff00) != 0x3300) { 1431 if ((saved_bank & 0xff00) != 0x3300) {
1544 /* The device does not exist -- the card could be off-line, or 1432 /* The device does not exist -- the card could be off-line, or
1545 maybe it has been ejected. */ 1433 maybe it has been ejected. */
1546 DEBUG(1, "%s: SMC91c92 interrupt %d for non-existent" 1434 pr_debug("%s: SMC91c92 interrupt %d for non-existent"
1547 "/ejected device.\n", dev->name, irq); 1435 "/ejected device.\n", dev->name, irq);
1548 handled = 0; 1436 handled = 0;
1549 goto irq_done; 1437 goto irq_done;
@@ -1557,7 +1445,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1557 1445
1558 do { /* read the status flag, and mask it */ 1446 do { /* read the status flag, and mask it */
1559 status = inw(ioaddr + INTERRUPT) & 0xff; 1447 status = inw(ioaddr + INTERRUPT) & 0xff;
1560 DEBUG(3, "%s: Status is %#2.2x (mask %#2.2x).\n", dev->name, 1448 pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name,
1561 status, mask); 1449 status, mask);
1562 if ((status & mask) == 0) { 1450 if ((status & mask) == 0) {
1563 if (bogus_cnt == INTR_WORK) 1451 if (bogus_cnt == INTR_WORK)
@@ -1602,7 +1490,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1602 smc_eph_irq(dev); 1490 smc_eph_irq(dev);
1603 } while (--bogus_cnt); 1491 } while (--bogus_cnt);
1604 1492
1605 DEBUG(3, " Restoring saved registers mask %2.2x bank %4.4x" 1493 pr_debug(" Restoring saved registers mask %2.2x bank %4.4x"
1606 " pointer %4.4x.\n", mask, saved_bank, saved_pointer); 1494 " pointer %4.4x.\n", mask, saved_bank, saved_pointer);
1607 1495
1608 /* restore state register */ 1496 /* restore state register */
@@ -1610,7 +1498,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1610 outw(saved_pointer, ioaddr + POINTER); 1498 outw(saved_pointer, ioaddr + POINTER);
1611 SMC_SELECT_BANK(saved_bank); 1499 SMC_SELECT_BANK(saved_bank);
1612 1500
1613 DEBUG(3, "%s: Exiting interrupt IRQ%d.\n", dev->name, irq); 1501 pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq);
1614 1502
1615irq_done: 1503irq_done:
1616 1504
@@ -1661,7 +1549,7 @@ static void smc_rx(struct net_device *dev)
1661 rx_status = inw(ioaddr + DATA_1); 1549 rx_status = inw(ioaddr + DATA_1);
1662 packet_length = inw(ioaddr + DATA_1) & 0x07ff; 1550 packet_length = inw(ioaddr + DATA_1) & 0x07ff;
1663 1551
1664 DEBUG(2, "%s: Receive status %4.4x length %d.\n", 1552 pr_debug("%s: Receive status %4.4x length %d.\n",
1665 dev->name, rx_status, packet_length); 1553 dev->name, rx_status, packet_length);
1666 1554
1667 if (!(rx_status & RS_ERRORS)) { 1555 if (!(rx_status & RS_ERRORS)) {
@@ -1672,7 +1560,7 @@ static void smc_rx(struct net_device *dev)
1672 skb = dev_alloc_skb(packet_length+2); 1560 skb = dev_alloc_skb(packet_length+2);
1673 1561
1674 if (skb == NULL) { 1562 if (skb == NULL) {
1675 DEBUG(1, "%s: Low memory, packet dropped.\n", dev->name); 1563 pr_debug("%s: Low memory, packet dropped.\n", dev->name);
1676 dev->stats.rx_dropped++; 1564 dev->stats.rx_dropped++;
1677 outw(MC_RELEASE, ioaddr + MMU_CMD); 1565 outw(MC_RELEASE, ioaddr + MMU_CMD);
1678 return; 1566 return;
@@ -1832,7 +1720,7 @@ static void smc_reset(struct net_device *dev)
1832 struct smc_private *smc = netdev_priv(dev); 1720 struct smc_private *smc = netdev_priv(dev);
1833 int i; 1721 int i;
1834 1722
1835 DEBUG(0, "%s: smc91c92 reset called.\n", dev->name); 1723 pr_debug("%s: smc91c92 reset called.\n", dev->name);
1836 1724
1837 /* The first interaction must be a write to bring the chip out 1725 /* The first interaction must be a write to bring the chip out
1838 of sleep mode. */ 1726 of sleep mode. */
@@ -2149,18 +2037,6 @@ static u32 smc_get_link(struct net_device *dev)
2149 return ret; 2037 return ret;
2150} 2038}
2151 2039
2152#ifdef PCMCIA_DEBUG
2153static u32 smc_get_msglevel(struct net_device *dev)
2154{
2155 return pc_debug;
2156}
2157
2158static void smc_set_msglevel(struct net_device *dev, u32 val)
2159{
2160 pc_debug = val;
2161}
2162#endif
2163
2164static int smc_nway_reset(struct net_device *dev) 2040static int smc_nway_reset(struct net_device *dev)
2165{ 2041{
2166 struct smc_private *smc = netdev_priv(dev); 2042 struct smc_private *smc = netdev_priv(dev);
@@ -2184,10 +2060,6 @@ static const struct ethtool_ops ethtool_ops = {
2184 .get_settings = smc_get_settings, 2060 .get_settings = smc_get_settings,
2185 .set_settings = smc_set_settings, 2061 .set_settings = smc_set_settings,
2186 .get_link = smc_get_link, 2062 .get_link = smc_get_link,
2187#ifdef PCMCIA_DEBUG
2188 .get_msglevel = smc_get_msglevel,
2189 .set_msglevel = smc_set_msglevel,
2190#endif
2191 .nway_reset = smc_nway_reset, 2063 .nway_reset = smc_nway_reset,
2192}; 2064};
2193 2065
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 187da21f720b..a2eda28f903e 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -211,20 +211,6 @@ enum xirc_cmd { /* Commands */
211 211
212static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; 212static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
213 213
214/****************
215 * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
216 * you do not define PCMCIA_DEBUG at all, all the debug code will be
217 * left out. If you compile with PCMCIA_DEBUG=0, the debug code will
218 * be present but disabled -- but it can then be enabled for specific
219 * modules at load time with a 'pc_debug=#' option to insmod.
220 */
221#ifdef PCMCIA_DEBUG
222static int pc_debug = PCMCIA_DEBUG;
223module_param(pc_debug, int, 0);
224#define DEBUG(n, args...) if (pc_debug>(n)) printk(KDBG_XIRC args)
225#else
226#define DEBUG(n, args...)
227#endif
228 214
229#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: " 215#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: "
230#define KERR_XIRC KERN_ERR "xirc2ps_cs: " 216#define KERR_XIRC KERN_ERR "xirc2ps_cs: "
@@ -359,7 +345,7 @@ static void xirc_tx_timeout(struct net_device *dev);
359static void xirc2ps_tx_timeout_task(struct work_struct *work); 345static void xirc2ps_tx_timeout_task(struct work_struct *work);
360static void set_addresses(struct net_device *dev); 346static void set_addresses(struct net_device *dev);
361static void set_multicast_list(struct net_device *dev); 347static void set_multicast_list(struct net_device *dev);
362static int set_card_type(struct pcmcia_device *link, const void *s); 348static int set_card_type(struct pcmcia_device *link);
363static int do_config(struct net_device *dev, struct ifmap *map); 349static int do_config(struct net_device *dev, struct ifmap *map);
364static int do_open(struct net_device *dev); 350static int do_open(struct net_device *dev);
365static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 351static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -371,28 +357,6 @@ static void do_powerdown(struct net_device *dev);
371static int do_stop(struct net_device *dev); 357static int do_stop(struct net_device *dev);
372 358
373/*=============== Helper functions =========================*/ 359/*=============== Helper functions =========================*/
374static int
375first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
376{
377 int err;
378
379 if ((err = pcmcia_get_first_tuple(handle, tuple)) == 0 &&
380 (err = pcmcia_get_tuple_data(handle, tuple)) == 0)
381 err = pcmcia_parse_tuple(tuple, parse);
382 return err;
383}
384
385static int
386next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
387{
388 int err;
389
390 if ((err = pcmcia_get_next_tuple(handle, tuple)) == 0 &&
391 (err = pcmcia_get_tuple_data(handle, tuple)) == 0)
392 err = pcmcia_parse_tuple(tuple, parse);
393 return err;
394}
395
396#define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR) 360#define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR)
397#define GetByte(reg) ((unsigned)inb(ioaddr + (reg))) 361#define GetByte(reg) ((unsigned)inb(ioaddr + (reg)))
398#define GetWord(reg) ((unsigned)inw(ioaddr + (reg))) 362#define GetWord(reg) ((unsigned)inw(ioaddr + (reg)))
@@ -400,7 +364,7 @@ next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
400#define PutWord(reg,value) outw((value), ioaddr+(reg)) 364#define PutWord(reg,value) outw((value), ioaddr+(reg))
401 365
402/*====== Functions used for debugging =================================*/ 366/*====== Functions used for debugging =================================*/
403#if defined(PCMCIA_DEBUG) && 0 /* reading regs may change system status */ 367#if 0 /* reading regs may change system status */
404static void 368static void
405PrintRegisters(struct net_device *dev) 369PrintRegisters(struct net_device *dev)
406{ 370{
@@ -432,7 +396,7 @@ PrintRegisters(struct net_device *dev)
432 } 396 }
433 } 397 }
434} 398}
435#endif /* PCMCIA_DEBUG */ 399#endif /* 0 */
436 400
437/*============== MII Management functions ===============*/ 401/*============== MII Management functions ===============*/
438 402
@@ -576,7 +540,7 @@ xirc2ps_probe(struct pcmcia_device *link)
576 struct net_device *dev; 540 struct net_device *dev;
577 local_info_t *local; 541 local_info_t *local;
578 542
579 DEBUG(0, "attach()\n"); 543 dev_dbg(&link->dev, "attach()\n");
580 544
581 /* Allocate the device structure */ 545 /* Allocate the device structure */
582 dev = alloc_etherdev(sizeof(local_info_t)); 546 dev = alloc_etherdev(sizeof(local_info_t));
@@ -592,7 +556,6 @@ xirc2ps_probe(struct pcmcia_device *link)
592 link->conf.IntType = INT_MEMORY_AND_IO; 556 link->conf.IntType = INT_MEMORY_AND_IO;
593 link->conf.ConfigIndex = 1; 557 link->conf.ConfigIndex = 1;
594 link->irq.Handler = xirc2ps_interrupt; 558 link->irq.Handler = xirc2ps_interrupt;
595 link->irq.Instance = dev;
596 559
597 /* Fill in card specific entries */ 560 /* Fill in card specific entries */
598 dev->netdev_ops = &netdev_ops; 561 dev->netdev_ops = &netdev_ops;
@@ -615,7 +578,7 @@ xirc2ps_detach(struct pcmcia_device *link)
615{ 578{
616 struct net_device *dev = link->priv; 579 struct net_device *dev = link->priv;
617 580
618 DEBUG(0, "detach(0x%p)\n", link); 581 dev_dbg(&link->dev, "detach\n");
619 582
620 if (link->dev_node) 583 if (link->dev_node)
621 unregister_netdev(dev); 584 unregister_netdev(dev);
@@ -644,17 +607,25 @@ xirc2ps_detach(struct pcmcia_device *link)
644 * 607 *
645 */ 608 */
646static int 609static int
647set_card_type(struct pcmcia_device *link, const void *s) 610set_card_type(struct pcmcia_device *link)
648{ 611{
649 struct net_device *dev = link->priv; 612 struct net_device *dev = link->priv;
650 local_info_t *local = netdev_priv(dev); 613 local_info_t *local = netdev_priv(dev);
651 #ifdef PCMCIA_DEBUG 614 u8 *buf;
652 unsigned cisrev = ((const unsigned char *)s)[2]; 615 unsigned int cisrev, mediaid, prodid;
653 #endif 616 size_t len;
654 unsigned mediaid= ((const unsigned char *)s)[3]; 617
655 unsigned prodid = ((const unsigned char *)s)[4]; 618 len = pcmcia_get_tuple(link, CISTPL_MANFID, &buf);
619 if (len < 5) {
620 dev_err(&link->dev, "invalid CIS -- sorry\n");
621 return 0;
622 }
656 623
657 DEBUG(0, "cisrev=%02x mediaid=%02x prodid=%02x\n", 624 cisrev = buf[2];
625 mediaid = buf[3];
626 prodid = buf[4];
627
628 dev_dbg(&link->dev, "cisrev=%02x mediaid=%02x prodid=%02x\n",
658 cisrev, mediaid, prodid); 629 cisrev, mediaid, prodid);
659 630
660 local->mohawk = 0; 631 local->mohawk = 0;
@@ -761,6 +732,26 @@ xirc2ps_config_check(struct pcmcia_device *p_dev,
761 732
762} 733}
763 734
735
736static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev,
737 tuple_t *tuple,
738 void *priv)
739{
740 struct net_device *dev = priv;
741 int i;
742
743 if (tuple->TupleDataLen != 13)
744 return -EINVAL;
745 if ((tuple->TupleData[0] != 2) || (tuple->TupleData[1] != 1) ||
746 (tuple->TupleData[2] != 6))
747 return -EINVAL;
748 /* another try (James Lehmer's CE2 version 4.1)*/
749 for (i = 2; i < 6; i++)
750 dev->dev_addr[i] = tuple->TupleData[i+2];
751 return 0;
752};
753
754
764/**************** 755/****************
765 * xirc2ps_config() is scheduled to run after a CARD_INSERTION event 756 * xirc2ps_config() is scheduled to run after a CARD_INSERTION event
766 * is received, to configure the PCMCIA socket, and to make the 757 * is received, to configure the PCMCIA socket, and to make the
@@ -772,33 +763,21 @@ xirc2ps_config(struct pcmcia_device * link)
772 struct net_device *dev = link->priv; 763 struct net_device *dev = link->priv;
773 local_info_t *local = netdev_priv(dev); 764 local_info_t *local = netdev_priv(dev);
774 unsigned int ioaddr; 765 unsigned int ioaddr;
775 tuple_t tuple; 766 int err;
776 cisparse_t parse; 767 u8 *buf;
777 int err, i; 768 size_t len;
778 u_char buf[64];
779 cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data;
780 769
781 local->dingo_ccr = NULL; 770 local->dingo_ccr = NULL;
782 771
783 DEBUG(0, "config(0x%p)\n", link); 772 dev_dbg(&link->dev, "config\n");
784
785 /*
786 * This reads the card's CONFIG tuple to find its configuration
787 * registers.
788 */
789 tuple.Attributes = 0;
790 tuple.TupleData = buf;
791 tuple.TupleDataMax = 64;
792 tuple.TupleOffset = 0;
793 773
794 /* Is this a valid card */ 774 /* Is this a valid card */
795 tuple.DesiredTuple = CISTPL_MANFID; 775 if (link->has_manf_id == 0) {
796 if ((err=first_tuple(link, &tuple, &parse))) {
797 printk(KNOT_XIRC "manfid not found in CIS\n"); 776 printk(KNOT_XIRC "manfid not found in CIS\n");
798 goto failure; 777 goto failure;
799 } 778 }
800 779
801 switch(parse.manfid.manf) { 780 switch (link->manf_id) {
802 case MANFID_XIRCOM: 781 case MANFID_XIRCOM:
803 local->manf_str = "Xircom"; 782 local->manf_str = "Xircom";
804 break; 783 break;
@@ -817,65 +796,44 @@ xirc2ps_config(struct pcmcia_device * link)
817 break; 796 break;
818 default: 797 default:
819 printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n", 798 printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n",
820 (unsigned)parse.manfid.manf); 799 (unsigned)link->manf_id);
821 goto failure; 800 goto failure;
822 } 801 }
823 DEBUG(0, "found %s card\n", local->manf_str); 802 dev_dbg(&link->dev, "found %s card\n", local->manf_str);
824 803
825 if (!set_card_type(link, buf)) { 804 if (!set_card_type(link)) {
826 printk(KNOT_XIRC "this card is not supported\n"); 805 printk(KNOT_XIRC "this card is not supported\n");
827 goto failure; 806 goto failure;
828 } 807 }
829 808
830 /* get the ethernet address from the CIS */ 809 /* get the ethernet address from the CIS */
831 tuple.DesiredTuple = CISTPL_FUNCE; 810 err = pcmcia_get_mac_from_cis(link, dev);
832 for (err = first_tuple(link, &tuple, &parse); !err; 811
833 err = next_tuple(link, &tuple, &parse)) { 812 /* not found: try to get the node-id from tuple 0x89 */
834 /* Once I saw two CISTPL_FUNCE_LAN_NODE_ID entries: 813 if (err) {
835 * the first one with a length of zero the second correct - 814 len = pcmcia_get_tuple(link, 0x89, &buf);
836 * so I skip all entries with length 0 */ 815 /* data layout looks like tuple 0x22 */
837 if (parse.funce.type == CISTPL_FUNCE_LAN_NODE_ID && 816 if (buf && len == 8) {
838 ((cistpl_lan_node_id_t *)parse.funce.data)->nb) 817 if (*buf == CISTPL_FUNCE_LAN_NODE_ID) {
839 break; 818 int i;
840 } 819 for (i = 2; i < 6; i++)
841 if (err) { /* not found: try to get the node-id from tuple 0x89 */ 820 dev->dev_addr[i] = buf[i+2];
842 tuple.DesiredTuple = 0x89; /* data layout looks like tuple 0x22 */ 821 } else
843 if ((err = pcmcia_get_first_tuple(link, &tuple)) == 0 && 822 err = -1;
844 (err = pcmcia_get_tuple_data(link, &tuple)) == 0) {
845 if (tuple.TupleDataLen == 8 && *buf == CISTPL_FUNCE_LAN_NODE_ID)
846 memcpy(&parse, buf, 8);
847 else
848 err = -1;
849 }
850 }
851 if (err) { /* another try (James Lehmer's CE2 version 4.1)*/
852 tuple.DesiredTuple = CISTPL_FUNCE;
853 for (err = first_tuple(link, &tuple, &parse); !err;
854 err = next_tuple(link, &tuple, &parse)) {
855 if (parse.funce.type == 0x02 && parse.funce.data[0] == 1 &&
856 parse.funce.data[1] == 6 && tuple.TupleDataLen == 13) {
857 buf[1] = 4;
858 memcpy(&parse, buf+1, 8);
859 break;
860 } 823 }
861 } 824 kfree(buf);
862 } 825 }
826
827 if (err)
828 err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev);
829
863 if (err) { 830 if (err) {
864 printk(KNOT_XIRC "node-id not found in CIS\n"); 831 printk(KNOT_XIRC "node-id not found in CIS\n");
865 goto failure; 832 goto failure;
866 } 833 }
867 node_id = (cistpl_lan_node_id_t *)parse.funce.data;
868 if (node_id->nb != 6) {
869 printk(KNOT_XIRC "malformed node-id in CIS\n");
870 goto failure;
871 }
872 for (i=0; i < 6; i++)
873 dev->dev_addr[i] = node_id->id[i];
874 834
875 link->io.IOAddrLines =10; 835 link->io.IOAddrLines =10;
876 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 836 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
877 link->irq.Attributes = IRQ_HANDLE_PRESENT;
878 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
879 if (local->modem) { 837 if (local->modem) {
880 int pass; 838 int pass;
881 839
@@ -916,10 +874,8 @@ xirc2ps_config(struct pcmcia_device * link)
916 goto port_found; 874 goto port_found;
917 } 875 }
918 link->io.BasePort1 = 0; /* let CS decide */ 876 link->io.BasePort1 = 0; /* let CS decide */
919 if ((err=pcmcia_request_io(link, &link->io))) { 877 if ((err=pcmcia_request_io(link, &link->io)))
920 cs_error(link, RequestIO, err);
921 goto config_error; 878 goto config_error;
922 }
923 } 879 }
924 port_found: 880 port_found:
925 if (err) 881 if (err)
@@ -929,19 +885,15 @@ xirc2ps_config(struct pcmcia_device * link)
929 * Now allocate an interrupt line. Note that this does not 885 * Now allocate an interrupt line. Note that this does not
930 * actually assign a handler to the interrupt. 886 * actually assign a handler to the interrupt.
931 */ 887 */
932 if ((err=pcmcia_request_irq(link, &link->irq))) { 888 if ((err=pcmcia_request_irq(link, &link->irq)))
933 cs_error(link, RequestIRQ, err);
934 goto config_error; 889 goto config_error;
935 }
936 890
937 /**************** 891 /****************
938 * This actually configures the PCMCIA socket -- setting up 892 * This actually configures the PCMCIA socket -- setting up
939 * the I/O windows and the interrupt mapping. 893 * the I/O windows and the interrupt mapping.
940 */ 894 */
941 if ((err=pcmcia_request_configuration(link, &link->conf))) { 895 if ((err=pcmcia_request_configuration(link, &link->conf)))
942 cs_error(link, RequestConfiguration, err);
943 goto config_error; 896 goto config_error;
944 }
945 897
946 if (local->dingo) { 898 if (local->dingo) {
947 conf_reg_t reg; 899 conf_reg_t reg;
@@ -956,17 +908,13 @@ xirc2ps_config(struct pcmcia_device * link)
956 reg.Action = CS_WRITE; 908 reg.Action = CS_WRITE;
957 reg.Offset = CISREG_IOBASE_0; 909 reg.Offset = CISREG_IOBASE_0;
958 reg.Value = link->io.BasePort2 & 0xff; 910 reg.Value = link->io.BasePort2 & 0xff;
959 if ((err = pcmcia_access_configuration_register(link, &reg))) { 911 if ((err = pcmcia_access_configuration_register(link, &reg)))
960 cs_error(link, AccessConfigurationRegister, err);
961 goto config_error; 912 goto config_error;
962 }
963 reg.Action = CS_WRITE; 913 reg.Action = CS_WRITE;
964 reg.Offset = CISREG_IOBASE_1; 914 reg.Offset = CISREG_IOBASE_1;
965 reg.Value = (link->io.BasePort2 >> 8) & 0xff; 915 reg.Value = (link->io.BasePort2 >> 8) & 0xff;
966 if ((err = pcmcia_access_configuration_register(link, &reg))) { 916 if ((err = pcmcia_access_configuration_register(link, &reg)))
967 cs_error(link, AccessConfigurationRegister, err);
968 goto config_error; 917 goto config_error;
969 }
970 918
971 /* There is no config entry for the Ethernet part which 919 /* There is no config entry for the Ethernet part which
972 * is at 0x0800. So we allocate a window into the attribute 920 * is at 0x0800. So we allocate a window into the attribute
@@ -975,17 +923,14 @@ xirc2ps_config(struct pcmcia_device * link)
975 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 923 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
976 req.Base = req.Size = 0; 924 req.Base = req.Size = 0;
977 req.AccessSpeed = 0; 925 req.AccessSpeed = 0;
978 if ((err = pcmcia_request_window(&link, &req, &link->win))) { 926 if ((err = pcmcia_request_window(link, &req, &link->win)))
979 cs_error(link, RequestWindow, err);
980 goto config_error; 927 goto config_error;
981 } 928
982 local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800; 929 local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800;
983 mem.CardOffset = 0x0; 930 mem.CardOffset = 0x0;
984 mem.Page = 0; 931 mem.Page = 0;
985 if ((err = pcmcia_map_mem_page(link->win, &mem))) { 932 if ((err = pcmcia_map_mem_page(link, link->win, &mem)))
986 cs_error(link, MapMemPage, err);
987 goto config_error; 933 goto config_error;
988 }
989 934
990 /* Setup the CCRs; there are no infos in the CIS about the Ethernet 935 /* Setup the CCRs; there are no infos in the CIS about the Ethernet
991 * part. 936 * part.
@@ -1044,7 +989,7 @@ xirc2ps_config(struct pcmcia_device * link)
1044 do_reset(dev, 1); /* a kludge to make the cem56 work */ 989 do_reset(dev, 1); /* a kludge to make the cem56 work */
1045 990
1046 link->dev_node = &local->node; 991 link->dev_node = &local->node;
1047 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 992 SET_NETDEV_DEV(dev, &link->dev);
1048 993
1049 if ((err=register_netdev(dev))) { 994 if ((err=register_netdev(dev))) {
1050 printk(KNOT_XIRC "register_netdev() failed\n"); 995 printk(KNOT_XIRC "register_netdev() failed\n");
@@ -1077,7 +1022,7 @@ xirc2ps_config(struct pcmcia_device * link)
1077static void 1022static void
1078xirc2ps_release(struct pcmcia_device *link) 1023xirc2ps_release(struct pcmcia_device *link)
1079{ 1024{
1080 DEBUG(0, "release(0x%p)\n", link); 1025 dev_dbg(&link->dev, "release\n");
1081 1026
1082 if (link->win) { 1027 if (link->win) {
1083 struct net_device *dev = link->priv; 1028 struct net_device *dev = link->priv;
@@ -1144,7 +1089,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
1144 PutByte(XIRCREG_CR, 0); 1089 PutByte(XIRCREG_CR, 0);
1145 } 1090 }
1146 1091
1147 DEBUG(6, "%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr); 1092 pr_debug("%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr);
1148 1093
1149 saved_page = GetByte(XIRCREG_PR); 1094 saved_page = GetByte(XIRCREG_PR);
1150 /* Read the ISR to see whats the cause for the interrupt. 1095 /* Read the ISR to see whats the cause for the interrupt.
@@ -1154,7 +1099,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
1154 bytes_rcvd = 0; 1099 bytes_rcvd = 0;
1155 loop_entry: 1100 loop_entry:
1156 if (int_status == 0xff) { /* card may be ejected */ 1101 if (int_status == 0xff) { /* card may be ejected */
1157 DEBUG(3, "%s: interrupt %d for dead card\n", dev->name, irq); 1102 pr_debug("%s: interrupt %d for dead card\n", dev->name, irq);
1158 goto leave; 1103 goto leave;
1159 } 1104 }
1160 eth_status = GetByte(XIRCREG_ESR); 1105 eth_status = GetByte(XIRCREG_ESR);
@@ -1167,7 +1112,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
1167 PutByte(XIRCREG40_TXST0, 0); 1112 PutByte(XIRCREG40_TXST0, 0);
1168 PutByte(XIRCREG40_TXST1, 0); 1113 PutByte(XIRCREG40_TXST1, 0);
1169 1114
1170 DEBUG(3, "%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n", 1115 pr_debug("%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n",
1171 dev->name, int_status, eth_status, rx_status, tx_status); 1116 dev->name, int_status, eth_status, rx_status, tx_status);
1172 1117
1173 /***** receive section ******/ 1118 /***** receive section ******/
@@ -1178,14 +1123,14 @@ xirc2ps_interrupt(int irq, void *dev_id)
1178 /* too many bytes received during this int, drop the rest of the 1123 /* too many bytes received during this int, drop the rest of the
1179 * packets */ 1124 * packets */
1180 dev->stats.rx_dropped++; 1125 dev->stats.rx_dropped++;
1181 DEBUG(2, "%s: RX drop, too much done\n", dev->name); 1126 pr_debug("%s: RX drop, too much done\n", dev->name);
1182 } else if (rsr & PktRxOk) { 1127 } else if (rsr & PktRxOk) {
1183 struct sk_buff *skb; 1128 struct sk_buff *skb;
1184 1129
1185 pktlen = GetWord(XIRCREG0_RBC); 1130 pktlen = GetWord(XIRCREG0_RBC);
1186 bytes_rcvd += pktlen; 1131 bytes_rcvd += pktlen;
1187 1132
1188 DEBUG(5, "rsr=%#02x packet_length=%u\n", rsr, pktlen); 1133 pr_debug("rsr=%#02x packet_length=%u\n", rsr, pktlen);
1189 1134
1190 skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */ 1135 skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
1191 if (!skb) { 1136 if (!skb) {
@@ -1253,19 +1198,19 @@ xirc2ps_interrupt(int irq, void *dev_id)
1253 dev->stats.multicast++; 1198 dev->stats.multicast++;
1254 } 1199 }
1255 } else { /* bad packet */ 1200 } else { /* bad packet */
1256 DEBUG(5, "rsr=%#02x\n", rsr); 1201 pr_debug("rsr=%#02x\n", rsr);
1257 } 1202 }
1258 if (rsr & PktTooLong) { 1203 if (rsr & PktTooLong) {
1259 dev->stats.rx_frame_errors++; 1204 dev->stats.rx_frame_errors++;
1260 DEBUG(3, "%s: Packet too long\n", dev->name); 1205 pr_debug("%s: Packet too long\n", dev->name);
1261 } 1206 }
1262 if (rsr & CRCErr) { 1207 if (rsr & CRCErr) {
1263 dev->stats.rx_crc_errors++; 1208 dev->stats.rx_crc_errors++;
1264 DEBUG(3, "%s: CRC error\n", dev->name); 1209 pr_debug("%s: CRC error\n", dev->name);
1265 } 1210 }
1266 if (rsr & AlignErr) { 1211 if (rsr & AlignErr) {
1267 dev->stats.rx_fifo_errors++; /* okay ? */ 1212 dev->stats.rx_fifo_errors++; /* okay ? */
1268 DEBUG(3, "%s: Alignment error\n", dev->name); 1213 pr_debug("%s: Alignment error\n", dev->name);
1269 } 1214 }
1270 1215
1271 /* clear the received/dropped/error packet */ 1216 /* clear the received/dropped/error packet */
@@ -1277,7 +1222,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
1277 if (rx_status & 0x10) { /* Receive overrun */ 1222 if (rx_status & 0x10) { /* Receive overrun */
1278 dev->stats.rx_over_errors++; 1223 dev->stats.rx_over_errors++;
1279 PutByte(XIRCREG_CR, ClearRxOvrun); 1224 PutByte(XIRCREG_CR, ClearRxOvrun);
1280 DEBUG(3, "receive overrun cleared\n"); 1225 pr_debug("receive overrun cleared\n");
1281 } 1226 }
1282 1227
1283 /***** transmit section ******/ 1228 /***** transmit section ******/
@@ -1290,13 +1235,13 @@ xirc2ps_interrupt(int irq, void *dev_id)
1290 if (nn < n) /* rollover */ 1235 if (nn < n) /* rollover */
1291 dev->stats.tx_packets += 256 - n; 1236 dev->stats.tx_packets += 256 - n;
1292 else if (n == nn) { /* happens sometimes - don't know why */ 1237 else if (n == nn) { /* happens sometimes - don't know why */
1293 DEBUG(0, "PTR not changed?\n"); 1238 pr_debug("PTR not changed?\n");
1294 } else 1239 } else
1295 dev->stats.tx_packets += lp->last_ptr_value - n; 1240 dev->stats.tx_packets += lp->last_ptr_value - n;
1296 netif_wake_queue(dev); 1241 netif_wake_queue(dev);
1297 } 1242 }
1298 if (tx_status & 0x0002) { /* Execessive collissions */ 1243 if (tx_status & 0x0002) { /* Execessive collissions */
1299 DEBUG(0, "tx restarted due to execssive collissions\n"); 1244 pr_debug("tx restarted due to execssive collissions\n");
1300 PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ 1245 PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */
1301 } 1246 }
1302 if (tx_status & 0x0040) 1247 if (tx_status & 0x0040)
@@ -1315,14 +1260,14 @@ xirc2ps_interrupt(int irq, void *dev_id)
1315 maxrx_bytes = 2000; 1260 maxrx_bytes = 2000;
1316 else if (maxrx_bytes > 22000) 1261 else if (maxrx_bytes > 22000)
1317 maxrx_bytes = 22000; 1262 maxrx_bytes = 22000;
1318 DEBUG(1, "set maxrx=%u (rcvd=%u ticks=%lu)\n", 1263 pr_debug("set maxrx=%u (rcvd=%u ticks=%lu)\n",
1319 maxrx_bytes, bytes_rcvd, duration); 1264 maxrx_bytes, bytes_rcvd, duration);
1320 } else if (!duration && maxrx_bytes < 22000) { 1265 } else if (!duration && maxrx_bytes < 22000) {
1321 /* now much faster */ 1266 /* now much faster */
1322 maxrx_bytes += 2000; 1267 maxrx_bytes += 2000;
1323 if (maxrx_bytes > 22000) 1268 if (maxrx_bytes > 22000)
1324 maxrx_bytes = 22000; 1269 maxrx_bytes = 22000;
1325 DEBUG(1, "set maxrx=%u\n", maxrx_bytes); 1270 pr_debug("set maxrx=%u\n", maxrx_bytes);
1326 } 1271 }
1327 } 1272 }
1328 1273
@@ -1372,7 +1317,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
1372 unsigned freespace; 1317 unsigned freespace;
1373 unsigned pktlen = skb->len; 1318 unsigned pktlen = skb->len;
1374 1319
1375 DEBUG(1, "do_start_xmit(skb=%p, dev=%p) len=%u\n", 1320 pr_debug("do_start_xmit(skb=%p, dev=%p) len=%u\n",
1376 skb, dev, pktlen); 1321 skb, dev, pktlen);
1377 1322
1378 1323
@@ -1398,7 +1343,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
1398 freespace &= 0x7fff; 1343 freespace &= 0x7fff;
1399 /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */ 1344 /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */
1400 okay = pktlen +2 < freespace; 1345 okay = pktlen +2 < freespace;
1401 DEBUG(2 + (okay ? 2 : 0), "%s: avail. tx space=%u%s\n", 1346 pr_debug("%s: avail. tx space=%u%s\n",
1402 dev->name, freespace, okay ? " (okay)":" (not enough)"); 1347 dev->name, freespace, okay ? " (okay)":" (not enough)");
1403 if (!okay) { /* not enough space */ 1348 if (!okay) { /* not enough space */
1404 return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */ 1349 return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */
@@ -1500,7 +1445,7 @@ do_config(struct net_device *dev, struct ifmap *map)
1500{ 1445{
1501 local_info_t *local = netdev_priv(dev); 1446 local_info_t *local = netdev_priv(dev);
1502 1447
1503 DEBUG(0, "do_config(%p)\n", dev); 1448 pr_debug("do_config(%p)\n", dev);
1504 if (map->port != 255 && map->port != dev->if_port) { 1449 if (map->port != 255 && map->port != dev->if_port) {
1505 if (map->port > 4) 1450 if (map->port > 4)
1506 return -EINVAL; 1451 return -EINVAL;
@@ -1527,7 +1472,7 @@ do_open(struct net_device *dev)
1527 local_info_t *lp = netdev_priv(dev); 1472 local_info_t *lp = netdev_priv(dev);
1528 struct pcmcia_device *link = lp->p_dev; 1473 struct pcmcia_device *link = lp->p_dev;
1529 1474
1530 DEBUG(0, "do_open(%p)\n", dev); 1475 dev_dbg(&link->dev, "do_open(%p)\n", dev);
1531 1476
1532 /* Check that the PCMCIA card is still here. */ 1477 /* Check that the PCMCIA card is still here. */
1533 /* Physical device present signature. */ 1478 /* Physical device present signature. */
@@ -1561,7 +1506,7 @@ do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1561 unsigned int ioaddr = dev->base_addr; 1506 unsigned int ioaddr = dev->base_addr;
1562 struct mii_ioctl_data *data = if_mii(rq); 1507 struct mii_ioctl_data *data = if_mii(rq);
1563 1508
1564 DEBUG(1, "%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n", 1509 pr_debug("%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n",
1565 dev->name, rq->ifr_ifrn.ifrn_name, cmd, 1510 dev->name, rq->ifr_ifrn.ifrn_name, cmd,
1566 data->phy_id, data->reg_num, data->val_in, data->val_out); 1511 data->phy_id, data->reg_num, data->val_in, data->val_out);
1567 1512
@@ -1610,7 +1555,7 @@ do_reset(struct net_device *dev, int full)
1610 unsigned int ioaddr = dev->base_addr; 1555 unsigned int ioaddr = dev->base_addr;
1611 unsigned value; 1556 unsigned value;
1612 1557
1613 DEBUG(0, "%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full); 1558 pr_debug("%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full);
1614 1559
1615 hardreset(dev); 1560 hardreset(dev);
1616 PutByte(XIRCREG_CR, SoftReset); /* set */ 1561 PutByte(XIRCREG_CR, SoftReset); /* set */
@@ -1648,8 +1593,8 @@ do_reset(struct net_device *dev, int full)
1648 } 1593 }
1649 msleep(40); /* wait 40 msec to let it complete */ 1594 msleep(40); /* wait 40 msec to let it complete */
1650 1595
1651 #ifdef PCMCIA_DEBUG 1596 #if 0
1652 if (pc_debug) { 1597 {
1653 SelectPage(0); 1598 SelectPage(0);
1654 value = GetByte(XIRCREG_ESR); /* read the ESR */ 1599 value = GetByte(XIRCREG_ESR); /* read the ESR */
1655 printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value); 1600 printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value);
@@ -1666,7 +1611,7 @@ do_reset(struct net_device *dev, int full)
1666 value |= DisableLinkPulse; 1611 value |= DisableLinkPulse;
1667 PutByte(XIRCREG1_ECR, value); 1612 PutByte(XIRCREG1_ECR, value);
1668 #endif 1613 #endif
1669 DEBUG(0, "%s: ECR is: %#02x\n", dev->name, value); 1614 pr_debug("%s: ECR is: %#02x\n", dev->name, value);
1670 1615
1671 SelectPage(0x42); 1616 SelectPage(0x42);
1672 PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */ 1617 PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */
@@ -1844,7 +1789,7 @@ do_powerdown(struct net_device *dev)
1844 1789
1845 unsigned int ioaddr = dev->base_addr; 1790 unsigned int ioaddr = dev->base_addr;
1846 1791
1847 DEBUG(0, "do_powerdown(%p)\n", dev); 1792 pr_debug("do_powerdown(%p)\n", dev);
1848 1793
1849 SelectPage(4); 1794 SelectPage(4);
1850 PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ 1795 PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
@@ -1858,7 +1803,7 @@ do_stop(struct net_device *dev)
1858 local_info_t *lp = netdev_priv(dev); 1803 local_info_t *lp = netdev_priv(dev);
1859 struct pcmcia_device *link = lp->p_dev; 1804 struct pcmcia_device *link = lp->p_dev;
1860 1805
1861 DEBUG(0, "do_stop(%p)\n", dev); 1806 dev_dbg(&link->dev, "do_stop(%p)\n", dev);
1862 1807
1863 if (!link) 1808 if (!link)
1864 return -ENODEV; 1809 return -ENODEV;
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index d0593ed9170e..f6036fb42319 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -43,21 +43,6 @@
43 43
44#include "airo.h" 44#include "airo.h"
45 45
46/*
47 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
48 you do not define PCMCIA_DEBUG at all, all the debug code will be
49 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
50 be present but disabled -- but it can then be enabled for specific
51 modules at load time with a 'pc_debug=#' option to insmod.
52*/
53#ifdef PCMCIA_DEBUG
54static int pc_debug = PCMCIA_DEBUG;
55module_param(pc_debug, int, 0);
56static char *version = "$Revision: 1.2 $";
57#define DEBUG(n, args...) if (pc_debug > (n)) printk(KERN_DEBUG args);
58#else
59#define DEBUG(n, args...)
60#endif
61 46
62/*====================================================================*/ 47/*====================================================================*/
63 48
@@ -145,11 +130,10 @@ static int airo_probe(struct pcmcia_device *p_dev)
145{ 130{
146 local_info_t *local; 131 local_info_t *local;
147 132
148 DEBUG(0, "airo_attach()\n"); 133 dev_dbg(&p_dev->dev, "airo_attach()\n");
149 134
150 /* Interrupt setup */ 135 /* Interrupt setup */
151 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 136 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
152 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
153 p_dev->irq.Handler = NULL; 137 p_dev->irq.Handler = NULL;
154 138
155 /* 139 /*
@@ -184,7 +168,7 @@ static int airo_probe(struct pcmcia_device *p_dev)
184 168
185static void airo_detach(struct pcmcia_device *link) 169static void airo_detach(struct pcmcia_device *link)
186{ 170{
187 DEBUG(0, "airo_detach(0x%p)\n", link); 171 dev_dbg(&link->dev, "airo_detach\n");
188 172
189 airo_release(link); 173 airo_release(link);
190 174
@@ -204,9 +188,6 @@ static void airo_detach(struct pcmcia_device *link)
204 188
205 ======================================================================*/ 189 ======================================================================*/
206 190
207#define CS_CHECK(fn, ret) \
208do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
209
210static int airo_cs_config_check(struct pcmcia_device *p_dev, 191static int airo_cs_config_check(struct pcmcia_device *p_dev,
211 cistpl_cftable_entry_t *cfg, 192 cistpl_cftable_entry_t *cfg,
212 cistpl_cftable_entry_t *dflt, 193 cistpl_cftable_entry_t *dflt,
@@ -275,11 +256,11 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
275 req->Base = mem->win[0].host_addr; 256 req->Base = mem->win[0].host_addr;
276 req->Size = mem->win[0].len; 257 req->Size = mem->win[0].len;
277 req->AccessSpeed = 0; 258 req->AccessSpeed = 0;
278 if (pcmcia_request_window(&p_dev, req, &p_dev->win) != 0) 259 if (pcmcia_request_window(p_dev, req, &p_dev->win) != 0)
279 return -ENODEV; 260 return -ENODEV;
280 map.Page = 0; 261 map.Page = 0;
281 map.CardOffset = mem->win[0].card_addr; 262 map.CardOffset = mem->win[0].card_addr;
282 if (pcmcia_map_mem_page(p_dev->win, &map) != 0) 263 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
283 return -ENODEV; 264 return -ENODEV;
284 } 265 }
285 /* If we got this far, we're cool! */ 266 /* If we got this far, we're cool! */
@@ -291,11 +272,11 @@ static int airo_config(struct pcmcia_device *link)
291{ 272{
292 local_info_t *dev; 273 local_info_t *dev;
293 win_req_t *req; 274 win_req_t *req;
294 int last_fn, last_ret; 275 int ret;
295 276
296 dev = link->priv; 277 dev = link->priv;
297 278
298 DEBUG(0, "airo_config(0x%p)\n", link); 279 dev_dbg(&link->dev, "airo_config\n");
299 280
300 req = kzalloc(sizeof(win_req_t), GFP_KERNEL); 281 req = kzalloc(sizeof(win_req_t), GFP_KERNEL);
301 if (!req) 282 if (!req)
@@ -315,8 +296,8 @@ static int airo_config(struct pcmcia_device *link)
315 * and most client drivers will only use the CIS to fill in 296 * and most client drivers will only use the CIS to fill in
316 * implementation-defined details. 297 * implementation-defined details.
317 */ 298 */
318 last_ret = pcmcia_loop_config(link, airo_cs_config_check, req); 299 ret = pcmcia_loop_config(link, airo_cs_config_check, req);
319 if (last_ret) 300 if (ret)
320 goto failed; 301 goto failed;
321 302
322 /* 303 /*
@@ -324,21 +305,25 @@ static int airo_config(struct pcmcia_device *link)
324 handler to the interrupt, unless the 'Handler' member of the 305 handler to the interrupt, unless the 'Handler' member of the
325 irq structure is initialized. 306 irq structure is initialized.
326 */ 307 */
327 if (link->conf.Attributes & CONF_ENABLE_IRQ) 308 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
328 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 309 ret = pcmcia_request_irq(link, &link->irq);
310 if (ret)
311 goto failed;
312 }
329 313
330 /* 314 /*
331 This actually configures the PCMCIA socket -- setting up 315 This actually configures the PCMCIA socket -- setting up
332 the I/O windows and the interrupt mapping, and putting the 316 the I/O windows and the interrupt mapping, and putting the
333 card and host interface into "Memory and IO" mode. 317 card and host interface into "Memory and IO" mode.
334 */ 318 */
335 CS_CHECK(RequestConfiguration, 319 ret = pcmcia_request_configuration(link, &link->conf);
336 pcmcia_request_configuration(link, &link->conf)); 320 if (ret)
321 goto failed;
337 ((local_info_t *)link->priv)->eth_dev = 322 ((local_info_t *)link->priv)->eth_dev =
338 init_airo_card(link->irq.AssignedIRQ, 323 init_airo_card(link->irq.AssignedIRQ,
339 link->io.BasePort1, 1, &handle_to_dev(link)); 324 link->io.BasePort1, 1, &link->dev);
340 if (!((local_info_t *)link->priv)->eth_dev) 325 if (!((local_info_t *)link->priv)->eth_dev)
341 goto cs_failed; 326 goto failed;
342 327
343 /* 328 /*
344 At this point, the dev_node_t structure(s) need to be 329 At this point, the dev_node_t structure(s) need to be
@@ -368,8 +353,6 @@ static int airo_config(struct pcmcia_device *link)
368 kfree(req); 353 kfree(req);
369 return 0; 354 return 0;
370 355
371 cs_failed:
372 cs_error(link, last_fn, last_ret);
373 failed: 356 failed:
374 airo_release(link); 357 airo_release(link);
375 kfree(req); 358 kfree(req);
@@ -386,7 +369,7 @@ static int airo_config(struct pcmcia_device *link)
386 369
387static void airo_release(struct pcmcia_device *link) 370static void airo_release(struct pcmcia_device *link)
388{ 371{
389 DEBUG(0, "airo_release(0x%p)\n", link); 372 dev_dbg(&link->dev, "airo_release\n");
390 pcmcia_disable_device(link); 373 pcmcia_disable_device(link);
391} 374}
392 375
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index ddaa859c3491..32407911842f 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -55,22 +55,6 @@
55 55
56#include "atmel.h" 56#include "atmel.h"
57 57
58/*
59 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
60 you do not define PCMCIA_DEBUG at all, all the debug code will be
61 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
62 be present but disabled -- but it can then be enabled for specific
63 modules at load time with a 'pc_debug=#' option to insmod.
64*/
65
66#ifdef PCMCIA_DEBUG
67static int pc_debug = PCMCIA_DEBUG;
68module_param(pc_debug, int, 0);
69static char *version = "$Revision: 1.2 $";
70#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
71#else
72#define DEBUG(n, args...)
73#endif
74 58
75/*====================================================================*/ 59/*====================================================================*/
76 60
@@ -155,11 +139,10 @@ static int atmel_probe(struct pcmcia_device *p_dev)
155{ 139{
156 local_info_t *local; 140 local_info_t *local;
157 141
158 DEBUG(0, "atmel_attach()\n"); 142 dev_dbg(&p_dev->dev, "atmel_attach()\n");
159 143
160 /* Interrupt setup */ 144 /* Interrupt setup */
161 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 145 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
162 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
163 p_dev->irq.Handler = NULL; 146 p_dev->irq.Handler = NULL;
164 147
165 /* 148 /*
@@ -194,7 +177,7 @@ static int atmel_probe(struct pcmcia_device *p_dev)
194 177
195static void atmel_detach(struct pcmcia_device *link) 178static void atmel_detach(struct pcmcia_device *link)
196{ 179{
197 DEBUG(0, "atmel_detach(0x%p)\n", link); 180 dev_dbg(&link->dev, "atmel_detach\n");
198 181
199 atmel_release(link); 182 atmel_release(link);
200 183
@@ -209,9 +192,6 @@ static void atmel_detach(struct pcmcia_device *link)
209 192
210 ======================================================================*/ 193 ======================================================================*/
211 194
212#define CS_CHECK(fn, ret) \
213do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
214
215/* Call-back function to interrogate PCMCIA-specific information 195/* Call-back function to interrogate PCMCIA-specific information
216 about the current existance of the card */ 196 about the current existance of the card */
217static int card_present(void *arg) 197static int card_present(void *arg)
@@ -275,13 +255,13 @@ static int atmel_config_check(struct pcmcia_device *p_dev,
275static int atmel_config(struct pcmcia_device *link) 255static int atmel_config(struct pcmcia_device *link)
276{ 256{
277 local_info_t *dev; 257 local_info_t *dev;
278 int last_fn, last_ret; 258 int ret;
279 struct pcmcia_device_id *did; 259 struct pcmcia_device_id *did;
280 260
281 dev = link->priv; 261 dev = link->priv;
282 did = dev_get_drvdata(&handle_to_dev(link)); 262 did = dev_get_drvdata(&link->dev);
283 263
284 DEBUG(0, "atmel_config(0x%p)\n", link); 264 dev_dbg(&link->dev, "atmel_config\n");
285 265
286 /* 266 /*
287 In this loop, we scan the CIS for configuration table entries, 267 In this loop, we scan the CIS for configuration table entries,
@@ -303,31 +283,36 @@ static int atmel_config(struct pcmcia_device *link)
303 handler to the interrupt, unless the 'Handler' member of the 283 handler to the interrupt, unless the 'Handler' member of the
304 irq structure is initialized. 284 irq structure is initialized.
305 */ 285 */
306 if (link->conf.Attributes & CONF_ENABLE_IRQ) 286 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
307 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 287 ret = pcmcia_request_irq(link, &link->irq);
288 if (ret)
289 goto failed;
290 }
308 291
309 /* 292 /*
310 This actually configures the PCMCIA socket -- setting up 293 This actually configures the PCMCIA socket -- setting up
311 the I/O windows and the interrupt mapping, and putting the 294 the I/O windows and the interrupt mapping, and putting the
312 card and host interface into "Memory and IO" mode. 295 card and host interface into "Memory and IO" mode.
313 */ 296 */
314 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 297 ret = pcmcia_request_configuration(link, &link->conf);
298 if (ret)
299 goto failed;
315 300
316 if (link->irq.AssignedIRQ == 0) { 301 if (link->irq.AssignedIRQ == 0) {
317 printk(KERN_ALERT 302 printk(KERN_ALERT
318 "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config."); 303 "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
319 goto cs_failed; 304 goto failed;
320 } 305 }
321 306
322 ((local_info_t*)link->priv)->eth_dev = 307 ((local_info_t*)link->priv)->eth_dev =
323 init_atmel_card(link->irq.AssignedIRQ, 308 init_atmel_card(link->irq.AssignedIRQ,
324 link->io.BasePort1, 309 link->io.BasePort1,
325 did ? did->driver_info : ATMEL_FW_TYPE_NONE, 310 did ? did->driver_info : ATMEL_FW_TYPE_NONE,
326 &handle_to_dev(link), 311 &link->dev,
327 card_present, 312 card_present,
328 link); 313 link);
329 if (!((local_info_t*)link->priv)->eth_dev) 314 if (!((local_info_t*)link->priv)->eth_dev)
330 goto cs_failed; 315 goto failed;
331 316
332 317
333 /* 318 /*
@@ -340,8 +325,6 @@ static int atmel_config(struct pcmcia_device *link)
340 325
341 return 0; 326 return 0;
342 327
343 cs_failed:
344 cs_error(link, last_fn, last_ret);
345 failed: 328 failed:
346 atmel_release(link); 329 atmel_release(link);
347 return -ENODEV; 330 return -ENODEV;
@@ -359,7 +342,7 @@ static void atmel_release(struct pcmcia_device *link)
359{ 342{
360 struct net_device *dev = ((local_info_t*)link->priv)->eth_dev; 343 struct net_device *dev = ((local_info_t*)link->priv)->eth_dev;
361 344
362 DEBUG(0, "atmel_release(0x%p)\n", link); 345 dev_dbg(&link->dev, "atmel_release\n");
363 346
364 if (dev) 347 if (dev)
365 stop_atmel_card(dev); 348 stop_atmel_card(dev);
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 6c3a74964ab8..984174bc7b0f 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -65,35 +65,15 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
65 struct ssb_bus *ssb; 65 struct ssb_bus *ssb;
66 win_req_t win; 66 win_req_t win;
67 memreq_t mem; 67 memreq_t mem;
68 tuple_t tuple;
69 cisparse_t parse;
70 int err = -ENOMEM; 68 int err = -ENOMEM;
71 int res = 0; 69 int res = 0;
72 unsigned char buf[64];
73 70
74 ssb = kzalloc(sizeof(*ssb), GFP_KERNEL); 71 ssb = kzalloc(sizeof(*ssb), GFP_KERNEL);
75 if (!ssb) 72 if (!ssb)
76 goto out_error; 73 goto out_error;
77 74
78 err = -ENODEV; 75 err = -ENODEV;
79 tuple.DesiredTuple = CISTPL_CONFIG;
80 tuple.Attributes = 0;
81 tuple.TupleData = buf;
82 tuple.TupleDataMax = sizeof(buf);
83 tuple.TupleOffset = 0;
84 76
85 res = pcmcia_get_first_tuple(dev, &tuple);
86 if (res != 0)
87 goto err_kfree_ssb;
88 res = pcmcia_get_tuple_data(dev, &tuple);
89 if (res != 0)
90 goto err_kfree_ssb;
91 res = pcmcia_parse_tuple(&tuple, &parse);
92 if (res != 0)
93 goto err_kfree_ssb;
94
95 dev->conf.ConfigBase = parse.config.base;
96 dev->conf.Present = parse.config.rmask[0];
97 dev->conf.Attributes = CONF_ENABLE_IRQ; 77 dev->conf.Attributes = CONF_ENABLE_IRQ;
98 dev->conf.IntType = INT_MEMORY_AND_IO; 78 dev->conf.IntType = INT_MEMORY_AND_IO;
99 79
@@ -107,20 +87,18 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
107 win.Base = 0; 87 win.Base = 0;
108 win.Size = SSB_CORE_SIZE; 88 win.Size = SSB_CORE_SIZE;
109 win.AccessSpeed = 250; 89 win.AccessSpeed = 250;
110 res = pcmcia_request_window(&dev, &win, &dev->win); 90 res = pcmcia_request_window(dev, &win, &dev->win);
111 if (res != 0) 91 if (res != 0)
112 goto err_kfree_ssb; 92 goto err_kfree_ssb;
113 93
114 mem.CardOffset = 0; 94 mem.CardOffset = 0;
115 mem.Page = 0; 95 mem.Page = 0;
116 res = pcmcia_map_mem_page(dev->win, &mem); 96 res = pcmcia_map_mem_page(dev, dev->win, &mem);
117 if (res != 0) 97 if (res != 0)
118 goto err_disable; 98 goto err_disable;
119 99
120 dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 100 dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
121 dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
122 dev->irq.Handler = NULL; /* The handler is registered later. */ 101 dev->irq.Handler = NULL; /* The handler is registered later. */
123 dev->irq.Instance = NULL;
124 res = pcmcia_request_irq(dev, &dev->irq); 102 res = pcmcia_request_irq(dev, &dev->irq);
125 if (res != 0) 103 if (res != 0)
126 goto err_disable; 104 goto err_disable;
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index ad8eab4a639b..c9640a3e02c9 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -274,9 +274,6 @@ static int sandisk_enable_wireless(struct net_device *dev)
274 conf_reg_t reg; 274 conf_reg_t reg;
275 struct hostap_interface *iface = netdev_priv(dev); 275 struct hostap_interface *iface = netdev_priv(dev);
276 local_info_t *local = iface->local; 276 local_info_t *local = iface->local;
277 tuple_t tuple;
278 cisparse_t *parse = NULL;
279 u_char buf[64];
280 struct hostap_cs_priv *hw_priv = local->hw_priv; 277 struct hostap_cs_priv *hw_priv = local->hw_priv;
281 278
282 if (hw_priv->link->io.NumPorts1 < 0x42) { 279 if (hw_priv->link->io.NumPorts1 < 0x42) {
@@ -285,28 +282,13 @@ static int sandisk_enable_wireless(struct net_device *dev)
285 goto done; 282 goto done;
286 } 283 }
287 284
288 parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL);
289 if (parse == NULL) {
290 ret = -ENOMEM;
291 goto done;
292 }
293
294 tuple.Attributes = TUPLE_RETURN_COMMON;
295 tuple.TupleData = buf;
296 tuple.TupleDataMax = sizeof(buf);
297 tuple.TupleOffset = 0;
298
299 if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) { 285 if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) {
300 /* No SanDisk manfid found */ 286 /* No SanDisk manfid found */
301 ret = -ENODEV; 287 ret = -ENODEV;
302 goto done; 288 goto done;
303 } 289 }
304 290
305 tuple.DesiredTuple = CISTPL_LONGLINK_MFC; 291 if (hw_priv->link->socket->functions < 2) {
306 if (pcmcia_get_first_tuple(hw_priv->link, &tuple) ||
307 pcmcia_get_tuple_data(hw_priv->link, &tuple) ||
308 pcmcia_parse_tuple(&tuple, parse) ||
309 parse->longlink_mfc.nfn < 2) {
310 /* No multi-function links found */ 292 /* No multi-function links found */
311 ret = -ENODEV; 293 ret = -ENODEV;
312 goto done; 294 goto done;
@@ -354,7 +336,6 @@ static int sandisk_enable_wireless(struct net_device *dev)
354 udelay(10); 336 udelay(10);
355 337
356done: 338done:
357 kfree(parse);
358 return ret; 339 return ret;
359} 340}
360 341
@@ -529,10 +510,6 @@ static void prism2_detach(struct pcmcia_device *link)
529} 510}
530 511
531 512
532#define CS_CHECK(fn, ret) \
533do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
534
535
536/* run after a CARD_INSERTION event is received to configure the PCMCIA 513/* run after a CARD_INSERTION event is received to configure the PCMCIA
537 * socket and make the device available to the system */ 514 * socket and make the device available to the system */
538 515
@@ -624,7 +601,6 @@ static int prism2_config(struct pcmcia_device *link)
624 struct hostap_interface *iface; 601 struct hostap_interface *iface;
625 local_info_t *local; 602 local_info_t *local;
626 int ret = 1; 603 int ret = 1;
627 int last_fn, last_ret;
628 struct hostap_cs_priv *hw_priv; 604 struct hostap_cs_priv *hw_priv;
629 605
630 PDEBUG(DEBUG_FLOW, "prism2_config()\n"); 606 PDEBUG(DEBUG_FLOW, "prism2_config()\n");
@@ -636,19 +612,18 @@ static int prism2_config(struct pcmcia_device *link)
636 } 612 }
637 613
638 /* Look for an appropriate configuration table entry in the CIS */ 614 /* Look for an appropriate configuration table entry in the CIS */
639 last_ret = pcmcia_loop_config(link, prism2_config_check, NULL); 615 ret = pcmcia_loop_config(link, prism2_config_check, NULL);
640 if (last_ret) { 616 if (ret) {
641 if (!ignore_cis_vcc) 617 if (!ignore_cis_vcc)
642 printk(KERN_ERR "GetNextTuple(): No matching " 618 printk(KERN_ERR "GetNextTuple(): No matching "
643 "CIS configuration. Maybe you need the " 619 "CIS configuration. Maybe you need the "
644 "ignore_cis_vcc=1 parameter.\n"); 620 "ignore_cis_vcc=1 parameter.\n");
645 cs_error(link, RequestIO, last_ret);
646 goto failed; 621 goto failed;
647 } 622 }
648 623
649 /* Need to allocate net_device before requesting IRQ handler */ 624 /* Need to allocate net_device before requesting IRQ handler */
650 dev = prism2_init_local_data(&prism2_pccard_funcs, 0, 625 dev = prism2_init_local_data(&prism2_pccard_funcs, 0,
651 &handle_to_dev(link)); 626 &link->dev);
652 if (dev == NULL) 627 if (dev == NULL)
653 goto failed; 628 goto failed;
654 link->priv = dev; 629 link->priv = dev;
@@ -666,13 +641,11 @@ static int prism2_config(struct pcmcia_device *link)
666 * irq structure is initialized. 641 * irq structure is initialized.
667 */ 642 */
668 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 643 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
669 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | 644 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
670 IRQ_HANDLE_PRESENT;
671 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
672 link->irq.Handler = prism2_interrupt; 645 link->irq.Handler = prism2_interrupt;
673 link->irq.Instance = dev; 646 ret = pcmcia_request_irq(link, &link->irq);
674 CS_CHECK(RequestIRQ, 647 if (ret)
675 pcmcia_request_irq(link, &link->irq)); 648 goto failed;
676 } 649 }
677 650
678 /* 651 /*
@@ -680,8 +653,9 @@ static int prism2_config(struct pcmcia_device *link)
680 * the I/O windows and the interrupt mapping, and putting the 653 * the I/O windows and the interrupt mapping, and putting the
681 * card and host interface into "Memory and IO" mode. 654 * card and host interface into "Memory and IO" mode.
682 */ 655 */
683 CS_CHECK(RequestConfiguration, 656 ret = pcmcia_request_configuration(link, &link->conf);
684 pcmcia_request_configuration(link, &link->conf)); 657 if (ret)
658 goto failed;
685 659
686 dev->irq = link->irq.AssignedIRQ; 660 dev->irq = link->irq.AssignedIRQ;
687 dev->base_addr = link->io.BasePort1; 661 dev->base_addr = link->io.BasePort1;
@@ -714,9 +688,6 @@ static int prism2_config(struct pcmcia_device *link)
714 } 688 }
715 return ret; 689 return ret;
716 690
717 cs_failed:
718 cs_error(link, last_fn, last_ret);
719
720 failed: 691 failed:
721 kfree(hw_priv); 692 kfree(hw_priv);
722 prism2_release((u_long)link); 693 prism2_release((u_long)link);
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 875516db319c..1f6cb58dd66c 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -591,7 +591,7 @@ static int if_cs_prog_helper(struct if_cs_card *card)
591 591
592 /* TODO: make firmware file configurable */ 592 /* TODO: make firmware file configurable */
593 ret = request_firmware(&fw, "libertas_cs_helper.fw", 593 ret = request_firmware(&fw, "libertas_cs_helper.fw",
594 &handle_to_dev(card->p_dev)); 594 &card->p_dev->dev);
595 if (ret) { 595 if (ret) {
596 lbs_pr_err("can't load helper firmware\n"); 596 lbs_pr_err("can't load helper firmware\n");
597 ret = -ENODEV; 597 ret = -ENODEV;
@@ -664,7 +664,7 @@ static int if_cs_prog_real(struct if_cs_card *card)
664 664
665 /* TODO: make firmware file configurable */ 665 /* TODO: make firmware file configurable */
666 ret = request_firmware(&fw, "libertas_cs.fw", 666 ret = request_firmware(&fw, "libertas_cs.fw",
667 &handle_to_dev(card->p_dev)); 667 &card->p_dev->dev);
668 if (ret) { 668 if (ret) {
669 lbs_pr_err("can't load firmware\n"); 669 lbs_pr_err("can't load firmware\n");
670 ret = -ENODEV; 670 ret = -ENODEV;
@@ -794,18 +794,37 @@ static void if_cs_release(struct pcmcia_device *p_dev)
794 * configure the card at this point -- we wait until we receive a card 794 * configure the card at this point -- we wait until we receive a card
795 * insertion event. 795 * insertion event.
796 */ 796 */
797
798static int if_cs_ioprobe(struct pcmcia_device *p_dev,
799 cistpl_cftable_entry_t *cfg,
800 cistpl_cftable_entry_t *dflt,
801 unsigned int vcc,
802 void *priv_data)
803{
804 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
805 p_dev->io.BasePort1 = cfg->io.win[0].base;
806 p_dev->io.NumPorts1 = cfg->io.win[0].len;
807
808 /* Do we need to allocate an interrupt? */
809 if (cfg->irq.IRQInfo1)
810 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
811
812 /* IO window settings */
813 if (cfg->io.nwin != 1) {
814 lbs_pr_err("wrong CIS (check number of IO windows)\n");
815 return -ENODEV;
816 }
817
818 /* This reserves IO space but doesn't actually enable it */
819 return pcmcia_request_io(p_dev, &p_dev->io);
820}
821
797static int if_cs_probe(struct pcmcia_device *p_dev) 822static int if_cs_probe(struct pcmcia_device *p_dev)
798{ 823{
799 int ret = -ENOMEM; 824 int ret = -ENOMEM;
800 unsigned int prod_id; 825 unsigned int prod_id;
801 struct lbs_private *priv; 826 struct lbs_private *priv;
802 struct if_cs_card *card; 827 struct if_cs_card *card;
803 /* CIS parsing */
804 tuple_t tuple;
805 cisparse_t parse;
806 cistpl_cftable_entry_t *cfg = &parse.cftable_entry;
807 cistpl_io_t *io = &cfg->io;
808 u_char buf[64];
809 828
810 lbs_deb_enter(LBS_DEB_CS); 829 lbs_deb_enter(LBS_DEB_CS);
811 830
@@ -819,48 +838,15 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
819 838
820 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 839 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
821 p_dev->irq.Handler = NULL; 840 p_dev->irq.Handler = NULL;
822 p_dev->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
823 841
824 p_dev->conf.Attributes = 0; 842 p_dev->conf.Attributes = 0;
825 p_dev->conf.IntType = INT_MEMORY_AND_IO; 843 p_dev->conf.IntType = INT_MEMORY_AND_IO;
826 844
827 tuple.Attributes = 0; 845 if (pcmcia_loop_config(p_dev, if_cs_ioprobe, NULL)) {
828 tuple.TupleData = buf; 846 lbs_pr_err("error in pcmcia_loop_config\n");
829 tuple.TupleDataMax = sizeof(buf);
830 tuple.TupleOffset = 0;
831
832 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
833 if ((ret = pcmcia_get_first_tuple(p_dev, &tuple)) != 0 ||
834 (ret = pcmcia_get_tuple_data(p_dev, &tuple)) != 0 ||
835 (ret = pcmcia_parse_tuple(&tuple, &parse)) != 0)
836 {
837 lbs_pr_err("error in pcmcia_get_first_tuple etc\n");
838 goto out1;
839 }
840
841 p_dev->conf.ConfigIndex = cfg->index;
842
843 /* Do we need to allocate an interrupt? */
844 if (cfg->irq.IRQInfo1) {
845 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
846 }
847
848 /* IO window settings */
849 if (cfg->io.nwin != 1) {
850 lbs_pr_err("wrong CIS (check number of IO windows)\n");
851 ret = -ENODEV;
852 goto out1; 847 goto out1;
853 } 848 }
854 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
855 p_dev->io.BasePort1 = io->win[0].base;
856 p_dev->io.NumPorts1 = io->win[0].len;
857 849
858 /* This reserves IO space but doesn't actually enable it */
859 ret = pcmcia_request_io(p_dev, &p_dev->io);
860 if (ret) {
861 lbs_pr_err("error in pcmcia_request_io\n");
862 goto out1;
863 }
864 850
865 /* 851 /*
866 * Allocate an interrupt line. Note that this does not assign 852 * Allocate an interrupt line. Note that this does not assign
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 38c1c9d2abb8..f27bb8367c98 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -109,7 +109,7 @@ orinoco_cs_probe(struct pcmcia_device *link)
109 struct orinoco_private *priv; 109 struct orinoco_private *priv;
110 struct orinoco_pccard *card; 110 struct orinoco_pccard *card;
111 111
112 priv = alloc_orinocodev(sizeof(*card), &handle_to_dev(link), 112 priv = alloc_orinocodev(sizeof(*card), &link->dev,
113 orinoco_cs_hard_reset, NULL); 113 orinoco_cs_hard_reset, NULL);
114 if (!priv) 114 if (!priv)
115 return -ENOMEM; 115 return -ENOMEM;
@@ -120,10 +120,8 @@ orinoco_cs_probe(struct pcmcia_device *link)
120 link->priv = priv; 120 link->priv = priv;
121 121
122 /* Interrupt setup */ 122 /* Interrupt setup */
123 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 123 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
124 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
125 link->irq.Handler = orinoco_interrupt; 124 link->irq.Handler = orinoco_interrupt;
126 link->irq.Instance = priv;
127 125
128 /* General socket configuration defaults can go here. In this 126 /* General socket configuration defaults can go here. In this
129 * client, we assume very little, and rely on the CIS for 127 * client, we assume very little, and rely on the CIS for
@@ -160,12 +158,6 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
160 * device available to the system. 158 * device available to the system.
161 */ 159 */
162 160
163#define CS_CHECK(fn, ret) do { \
164 last_fn = (fn); \
165 if ((last_ret = (ret)) != 0) \
166 goto cs_failed; \
167} while (0)
168
169static int orinoco_cs_config_check(struct pcmcia_device *p_dev, 161static int orinoco_cs_config_check(struct pcmcia_device *p_dev,
170 cistpl_cftable_entry_t *cfg, 162 cistpl_cftable_entry_t *cfg,
171 cistpl_cftable_entry_t *dflt, 163 cistpl_cftable_entry_t *dflt,
@@ -240,7 +232,7 @@ orinoco_cs_config(struct pcmcia_device *link)
240 struct orinoco_private *priv = link->priv; 232 struct orinoco_private *priv = link->priv;
241 struct orinoco_pccard *card = priv->card; 233 struct orinoco_pccard *card = priv->card;
242 hermes_t *hw = &priv->hw; 234 hermes_t *hw = &priv->hw;
243 int last_fn, last_ret; 235 int ret;
244 void __iomem *mem; 236 void __iomem *mem;
245 237
246 /* 238 /*
@@ -257,13 +249,12 @@ orinoco_cs_config(struct pcmcia_device *link)
257 * and most client drivers will only use the CIS to fill in 249 * and most client drivers will only use the CIS to fill in
258 * implementation-defined details. 250 * implementation-defined details.
259 */ 251 */
260 last_ret = pcmcia_loop_config(link, orinoco_cs_config_check, NULL); 252 ret = pcmcia_loop_config(link, orinoco_cs_config_check, NULL);
261 if (last_ret) { 253 if (ret) {
262 if (!ignore_cis_vcc) 254 if (!ignore_cis_vcc)
263 printk(KERN_ERR PFX "GetNextTuple(): No matching " 255 printk(KERN_ERR PFX "GetNextTuple(): No matching "
264 "CIS configuration. Maybe you need the " 256 "CIS configuration. Maybe you need the "
265 "ignore_cis_vcc=1 parameter.\n"); 257 "ignore_cis_vcc=1 parameter.\n");
266 cs_error(link, RequestIO, last_ret);
267 goto failed; 258 goto failed;
268 } 259 }
269 260
@@ -272,14 +263,16 @@ orinoco_cs_config(struct pcmcia_device *link)
272 * a handler to the interrupt, unless the 'Handler' member of 263 * a handler to the interrupt, unless the 'Handler' member of
273 * the irq structure is initialized. 264 * the irq structure is initialized.
274 */ 265 */
275 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 266 ret = pcmcia_request_irq(link, &link->irq);
267 if (ret)
268 goto failed;
276 269
277 /* We initialize the hermes structure before completing PCMCIA 270 /* We initialize the hermes structure before completing PCMCIA
278 * configuration just in case the interrupt handler gets 271 * configuration just in case the interrupt handler gets
279 * called. */ 272 * called. */
280 mem = ioport_map(link->io.BasePort1, link->io.NumPorts1); 273 mem = ioport_map(link->io.BasePort1, link->io.NumPorts1);
281 if (!mem) 274 if (!mem)
282 goto cs_failed; 275 goto failed;
283 276
284 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); 277 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
285 278
@@ -288,8 +281,9 @@ orinoco_cs_config(struct pcmcia_device *link)
288 * the I/O windows and the interrupt mapping, and putting the 281 * the I/O windows and the interrupt mapping, and putting the
289 * card and host interface into "Memory and IO" mode. 282 * card and host interface into "Memory and IO" mode.
290 */ 283 */
291 CS_CHECK(RequestConfiguration, 284 ret = pcmcia_request_configuration(link, &link->conf);
292 pcmcia_request_configuration(link, &link->conf)); 285 if (ret)
286 goto failed;
293 287
294 /* Ok, we have the configuration, prepare to register the netdev */ 288 /* Ok, we have the configuration, prepare to register the netdev */
295 card->node.major = card->node.minor = 0; 289 card->node.major = card->node.minor = 0;
@@ -315,9 +309,6 @@ orinoco_cs_config(struct pcmcia_device *link)
315 * net_device has been registered */ 309 * net_device has been registered */
316 return 0; 310 return 0;
317 311
318 cs_failed:
319 cs_error(link, last_fn, last_ret);
320
321 failed: 312 failed:
322 orinoco_cs_release(link); 313 orinoco_cs_release(link);
323 return -ENODEV; 314 return -ENODEV;
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index c361310b885d..59bda240fdc2 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -73,9 +73,6 @@ static void spectrum_cs_release(struct pcmcia_device *link);
73#define HCR_MEM16 0x10 /* memory width bit, should be preserved */ 73#define HCR_MEM16 0x10 /* memory width bit, should be preserved */
74 74
75 75
76#define CS_CHECK(fn, ret) \
77 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
78
79/* 76/*
80 * Reset the card using configuration registers COR and CCSR. 77 * Reset the card using configuration registers COR and CCSR.
81 * If IDLE is 1, stop the firmware, so that it can be safely rewritten. 78 * If IDLE is 1, stop the firmware, so that it can be safely rewritten.
@@ -83,7 +80,7 @@ static void spectrum_cs_release(struct pcmcia_device *link);
83static int 80static int
84spectrum_reset(struct pcmcia_device *link, int idle) 81spectrum_reset(struct pcmcia_device *link, int idle)
85{ 82{
86 int last_ret, last_fn; 83 int ret;
87 conf_reg_t reg; 84 conf_reg_t reg;
88 u_int save_cor; 85 u_int save_cor;
89 86
@@ -95,23 +92,26 @@ spectrum_reset(struct pcmcia_device *link, int idle)
95 reg.Function = 0; 92 reg.Function = 0;
96 reg.Action = CS_READ; 93 reg.Action = CS_READ;
97 reg.Offset = CISREG_COR; 94 reg.Offset = CISREG_COR;
98 CS_CHECK(AccessConfigurationRegister, 95 ret = pcmcia_access_configuration_register(link, &reg);
99 pcmcia_access_configuration_register(link, &reg)); 96 if (ret)
97 goto failed;
100 save_cor = reg.Value; 98 save_cor = reg.Value;
101 99
102 /* Soft-Reset card */ 100 /* Soft-Reset card */
103 reg.Action = CS_WRITE; 101 reg.Action = CS_WRITE;
104 reg.Offset = CISREG_COR; 102 reg.Offset = CISREG_COR;
105 reg.Value = (save_cor | COR_SOFT_RESET); 103 reg.Value = (save_cor | COR_SOFT_RESET);
106 CS_CHECK(AccessConfigurationRegister, 104 ret = pcmcia_access_configuration_register(link, &reg);
107 pcmcia_access_configuration_register(link, &reg)); 105 if (ret)
106 goto failed;
108 udelay(1000); 107 udelay(1000);
109 108
110 /* Read CCSR */ 109 /* Read CCSR */
111 reg.Action = CS_READ; 110 reg.Action = CS_READ;
112 reg.Offset = CISREG_CCSR; 111 reg.Offset = CISREG_CCSR;
113 CS_CHECK(AccessConfigurationRegister, 112 ret = pcmcia_access_configuration_register(link, &reg);
114 pcmcia_access_configuration_register(link, &reg)); 113 if (ret)
114 goto failed;
115 115
116 /* 116 /*
117 * Start or stop the firmware. Memory width bit should be 117 * Start or stop the firmware. Memory width bit should be
@@ -120,21 +120,22 @@ spectrum_reset(struct pcmcia_device *link, int idle)
120 reg.Action = CS_WRITE; 120 reg.Action = CS_WRITE;
121 reg.Offset = CISREG_CCSR; 121 reg.Offset = CISREG_CCSR;
122 reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16); 122 reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16);
123 CS_CHECK(AccessConfigurationRegister, 123 ret = pcmcia_access_configuration_register(link, &reg);
124 pcmcia_access_configuration_register(link, &reg)); 124 if (ret)
125 goto failed;
125 udelay(1000); 126 udelay(1000);
126 127
127 /* Restore original COR configuration index */ 128 /* Restore original COR configuration index */
128 reg.Action = CS_WRITE; 129 reg.Action = CS_WRITE;
129 reg.Offset = CISREG_COR; 130 reg.Offset = CISREG_COR;
130 reg.Value = (save_cor & ~COR_SOFT_RESET); 131 reg.Value = (save_cor & ~COR_SOFT_RESET);
131 CS_CHECK(AccessConfigurationRegister, 132 ret = pcmcia_access_configuration_register(link, &reg);
132 pcmcia_access_configuration_register(link, &reg)); 133 if (ret)
134 goto failed;
133 udelay(1000); 135 udelay(1000);
134 return 0; 136 return 0;
135 137
136cs_failed: 138failed:
137 cs_error(link, last_fn, last_ret);
138 return -ENODEV; 139 return -ENODEV;
139} 140}
140 141
@@ -181,7 +182,7 @@ spectrum_cs_probe(struct pcmcia_device *link)
181 struct orinoco_private *priv; 182 struct orinoco_private *priv;
182 struct orinoco_pccard *card; 183 struct orinoco_pccard *card;
183 184
184 priv = alloc_orinocodev(sizeof(*card), &handle_to_dev(link), 185 priv = alloc_orinocodev(sizeof(*card), &link->dev,
185 spectrum_cs_hard_reset, 186 spectrum_cs_hard_reset,
186 spectrum_cs_stop_firmware); 187 spectrum_cs_stop_firmware);
187 if (!priv) 188 if (!priv)
@@ -193,10 +194,8 @@ spectrum_cs_probe(struct pcmcia_device *link)
193 link->priv = priv; 194 link->priv = priv;
194 195
195 /* Interrupt setup */ 196 /* Interrupt setup */
196 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 197 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
197 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
198 link->irq.Handler = orinoco_interrupt; 198 link->irq.Handler = orinoco_interrupt;
199 link->irq.Instance = priv;
200 199
201 /* General socket configuration defaults can go here. In this 200 /* General socket configuration defaults can go here. In this
202 * client, we assume very little, and rely on the CIS for 201 * client, we assume very little, and rely on the CIS for
@@ -307,7 +306,7 @@ spectrum_cs_config(struct pcmcia_device *link)
307 struct orinoco_private *priv = link->priv; 306 struct orinoco_private *priv = link->priv;
308 struct orinoco_pccard *card = priv->card; 307 struct orinoco_pccard *card = priv->card;
309 hermes_t *hw = &priv->hw; 308 hermes_t *hw = &priv->hw;
310 int last_fn, last_ret; 309 int ret;
311 void __iomem *mem; 310 void __iomem *mem;
312 311
313 /* 312 /*
@@ -324,13 +323,12 @@ spectrum_cs_config(struct pcmcia_device *link)
324 * and most client drivers will only use the CIS to fill in 323 * and most client drivers will only use the CIS to fill in
325 * implementation-defined details. 324 * implementation-defined details.
326 */ 325 */
327 last_ret = pcmcia_loop_config(link, spectrum_cs_config_check, NULL); 326 ret = pcmcia_loop_config(link, spectrum_cs_config_check, NULL);
328 if (last_ret) { 327 if (ret) {
329 if (!ignore_cis_vcc) 328 if (!ignore_cis_vcc)
330 printk(KERN_ERR PFX "GetNextTuple(): No matching " 329 printk(KERN_ERR PFX "GetNextTuple(): No matching "
331 "CIS configuration. Maybe you need the " 330 "CIS configuration. Maybe you need the "
332 "ignore_cis_vcc=1 parameter.\n"); 331 "ignore_cis_vcc=1 parameter.\n");
333 cs_error(link, RequestIO, last_ret);
334 goto failed; 332 goto failed;
335 } 333 }
336 334
@@ -339,14 +337,16 @@ spectrum_cs_config(struct pcmcia_device *link)
339 * a handler to the interrupt, unless the 'Handler' member of 337 * a handler to the interrupt, unless the 'Handler' member of
340 * the irq structure is initialized. 338 * the irq structure is initialized.
341 */ 339 */
342 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 340 ret = pcmcia_request_irq(link, &link->irq);
341 if (ret)
342 goto failed;
343 343
344 /* We initialize the hermes structure before completing PCMCIA 344 /* We initialize the hermes structure before completing PCMCIA
345 * configuration just in case the interrupt handler gets 345 * configuration just in case the interrupt handler gets
346 * called. */ 346 * called. */
347 mem = ioport_map(link->io.BasePort1, link->io.NumPorts1); 347 mem = ioport_map(link->io.BasePort1, link->io.NumPorts1);
348 if (!mem) 348 if (!mem)
349 goto cs_failed; 349 goto failed;
350 350
351 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); 351 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
352 352
@@ -355,8 +355,9 @@ spectrum_cs_config(struct pcmcia_device *link)
355 * the I/O windows and the interrupt mapping, and putting the 355 * the I/O windows and the interrupt mapping, and putting the
356 * card and host interface into "Memory and IO" mode. 356 * card and host interface into "Memory and IO" mode.
357 */ 357 */
358 CS_CHECK(RequestConfiguration, 358 ret = pcmcia_request_configuration(link, &link->conf);
359 pcmcia_request_configuration(link, &link->conf)); 359 if (ret)
360 goto failed;
360 361
361 /* Ok, we have the configuration, prepare to register the netdev */ 362 /* Ok, we have the configuration, prepare to register the netdev */
362 card->node.major = card->node.minor = 0; 363 card->node.major = card->node.minor = 0;
@@ -386,9 +387,6 @@ spectrum_cs_config(struct pcmcia_device *link)
386 * net_device has been registered */ 387 * net_device has been registered */
387 return 0; 388 return 0;
388 389
389 cs_failed:
390 cs_error(link, last_fn, last_ret);
391
392 failed: 390 failed:
393 spectrum_cs_release(link); 391 spectrum_cs_release(link);
394 return -ENODEV; 392 return -ENODEV;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0366f5aeb914..88e1e4e32b22 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -71,25 +71,7 @@ typedef u_char mac_addr[ETH_ALEN]; /* Hardware address */
71#include "rayctl.h" 71#include "rayctl.h"
72#include "ray_cs.h" 72#include "ray_cs.h"
73 73
74/* All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
75 you do not define PCMCIA_DEBUG at all, all the debug code will be
76 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
77 be present but disabled -- but it can then be enabled for specific
78 modules at load time with a 'pc_debug=#' option to insmod.
79*/
80 74
81#ifdef RAYLINK_DEBUG
82#define PCMCIA_DEBUG RAYLINK_DEBUG
83#endif
84#ifdef PCMCIA_DEBUG
85static int ray_debug;
86static int pc_debug = PCMCIA_DEBUG;
87module_param(pc_debug, int, 0);
88/* #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args); */
89#define DEBUG(n, args...) if (pc_debug > (n)) printk(args);
90#else
91#define DEBUG(n, args...)
92#endif
93/** Prototypes based on PCMCIA skeleton driver *******************************/ 75/** Prototypes based on PCMCIA skeleton driver *******************************/
94static int ray_config(struct pcmcia_device *link); 76static int ray_config(struct pcmcia_device *link);
95static void ray_release(struct pcmcia_device *link); 77static void ray_release(struct pcmcia_device *link);
@@ -325,7 +307,7 @@ static int ray_probe(struct pcmcia_device *p_dev)
325 ray_dev_t *local; 307 ray_dev_t *local;
326 struct net_device *dev; 308 struct net_device *dev;
327 309
328 DEBUG(1, "ray_attach()\n"); 310 dev_dbg(&p_dev->dev, "ray_attach()\n");
329 311
330 /* Allocate space for private device-specific data */ 312 /* Allocate space for private device-specific data */
331 dev = alloc_etherdev(sizeof(ray_dev_t)); 313 dev = alloc_etherdev(sizeof(ray_dev_t));
@@ -341,8 +323,7 @@ static int ray_probe(struct pcmcia_device *p_dev)
341 p_dev->io.IOAddrLines = 5; 323 p_dev->io.IOAddrLines = 5;
342 324
343 /* Interrupt setup. For PCMCIA, driver takes what's given */ 325 /* Interrupt setup. For PCMCIA, driver takes what's given */
344 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 326 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
345 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
346 p_dev->irq.Handler = &ray_interrupt; 327 p_dev->irq.Handler = &ray_interrupt;
347 328
348 /* General socket configuration */ 329 /* General socket configuration */
@@ -351,13 +332,12 @@ static int ray_probe(struct pcmcia_device *p_dev)
351 p_dev->conf.ConfigIndex = 1; 332 p_dev->conf.ConfigIndex = 1;
352 333
353 p_dev->priv = dev; 334 p_dev->priv = dev;
354 p_dev->irq.Instance = dev;
355 335
356 local->finder = p_dev; 336 local->finder = p_dev;
357 local->card_status = CARD_INSERTED; 337 local->card_status = CARD_INSERTED;
358 local->authentication_state = UNAUTHENTICATED; 338 local->authentication_state = UNAUTHENTICATED;
359 local->num_multi = 0; 339 local->num_multi = 0;
360 DEBUG(2, "ray_attach p_dev = %p, dev = %p, local = %p, intr = %p\n", 340 dev_dbg(&p_dev->dev, "ray_attach p_dev = %p, dev = %p, local = %p, intr = %p\n",
361 p_dev, dev, local, &ray_interrupt); 341 p_dev, dev, local, &ray_interrupt);
362 342
363 /* Raylink entries in the device structure */ 343 /* Raylink entries in the device structure */
@@ -370,7 +350,7 @@ static int ray_probe(struct pcmcia_device *p_dev)
370#endif /* WIRELESS_SPY */ 350#endif /* WIRELESS_SPY */
371 351
372 352
373 DEBUG(2, "ray_cs ray_attach calling ether_setup.)\n"); 353 dev_dbg(&p_dev->dev, "ray_cs ray_attach calling ether_setup.)\n");
374 netif_stop_queue(dev); 354 netif_stop_queue(dev);
375 355
376 init_timer(&local->timer); 356 init_timer(&local->timer);
@@ -393,7 +373,7 @@ static void ray_detach(struct pcmcia_device *link)
393 struct net_device *dev; 373 struct net_device *dev;
394 ray_dev_t *local; 374 ray_dev_t *local;
395 375
396 DEBUG(1, "ray_detach(0x%p)\n", link); 376 dev_dbg(&link->dev, "ray_detach\n");
397 377
398 this_device = NULL; 378 this_device = NULL;
399 dev = link->priv; 379 dev = link->priv;
@@ -408,7 +388,7 @@ static void ray_detach(struct pcmcia_device *link)
408 unregister_netdev(dev); 388 unregister_netdev(dev);
409 free_netdev(dev); 389 free_netdev(dev);
410 } 390 }
411 DEBUG(2, "ray_cs ray_detach ending\n"); 391 dev_dbg(&link->dev, "ray_cs ray_detach ending\n");
412} /* ray_detach */ 392} /* ray_detach */
413 393
414/*============================================================================= 394/*=============================================================================
@@ -416,19 +396,17 @@ static void ray_detach(struct pcmcia_device *link)
416 is received, to configure the PCMCIA socket, and to make the 396 is received, to configure the PCMCIA socket, and to make the
417 ethernet device available to the system. 397 ethernet device available to the system.
418=============================================================================*/ 398=============================================================================*/
419#define CS_CHECK(fn, ret) \
420do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
421#define MAX_TUPLE_SIZE 128 399#define MAX_TUPLE_SIZE 128
422static int ray_config(struct pcmcia_device *link) 400static int ray_config(struct pcmcia_device *link)
423{ 401{
424 int last_fn = 0, last_ret = 0; 402 int ret = 0;
425 int i; 403 int i;
426 win_req_t req; 404 win_req_t req;
427 memreq_t mem; 405 memreq_t mem;
428 struct net_device *dev = (struct net_device *)link->priv; 406 struct net_device *dev = (struct net_device *)link->priv;
429 ray_dev_t *local = netdev_priv(dev); 407 ray_dev_t *local = netdev_priv(dev);
430 408
431 DEBUG(1, "ray_config(0x%p)\n", link); 409 dev_dbg(&link->dev, "ray_config\n");
432 410
433 /* Determine card type and firmware version */ 411 /* Determine card type and firmware version */
434 printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n", 412 printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n",
@@ -440,14 +418,17 @@ static int ray_config(struct pcmcia_device *link)
440 /* Now allocate an interrupt line. Note that this does not 418 /* Now allocate an interrupt line. Note that this does not
441 actually assign a handler to the interrupt. 419 actually assign a handler to the interrupt.
442 */ 420 */
443 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 421 ret = pcmcia_request_irq(link, &link->irq);
422 if (ret)
423 goto failed;
444 dev->irq = link->irq.AssignedIRQ; 424 dev->irq = link->irq.AssignedIRQ;
445 425
446 /* This actually configures the PCMCIA socket -- setting up 426 /* This actually configures the PCMCIA socket -- setting up
447 the I/O windows and the interrupt mapping. 427 the I/O windows and the interrupt mapping.
448 */ 428 */
449 CS_CHECK(RequestConfiguration, 429 ret = pcmcia_request_configuration(link, &link->conf);
450 pcmcia_request_configuration(link, &link->conf)); 430 if (ret)
431 goto failed;
451 432
452/*** Set up 32k window for shared memory (transmit and control) ************/ 433/*** Set up 32k window for shared memory (transmit and control) ************/
453 req.Attributes = 434 req.Attributes =
@@ -455,10 +436,14 @@ static int ray_config(struct pcmcia_device *link)
455 req.Base = 0; 436 req.Base = 0;
456 req.Size = 0x8000; 437 req.Size = 0x8000;
457 req.AccessSpeed = ray_mem_speed; 438 req.AccessSpeed = ray_mem_speed;
458 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); 439 ret = pcmcia_request_window(link, &req, &link->win);
440 if (ret)
441 goto failed;
459 mem.CardOffset = 0x0000; 442 mem.CardOffset = 0x0000;
460 mem.Page = 0; 443 mem.Page = 0;
461 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); 444 ret = pcmcia_map_mem_page(link, link->win, &mem);
445 if (ret)
446 goto failed;
462 local->sram = ioremap(req.Base, req.Size); 447 local->sram = ioremap(req.Base, req.Size);
463 448
464/*** Set up 16k window for shared memory (receive buffer) ***************/ 449/*** Set up 16k window for shared memory (receive buffer) ***************/
@@ -467,11 +452,14 @@ static int ray_config(struct pcmcia_device *link)
467 req.Base = 0; 452 req.Base = 0;
468 req.Size = 0x4000; 453 req.Size = 0x4000;
469 req.AccessSpeed = ray_mem_speed; 454 req.AccessSpeed = ray_mem_speed;
470 CS_CHECK(RequestWindow, 455 ret = pcmcia_request_window(link, &req, &local->rmem_handle);
471 pcmcia_request_window(&link, &req, &local->rmem_handle)); 456 if (ret)
457 goto failed;
472 mem.CardOffset = 0x8000; 458 mem.CardOffset = 0x8000;
473 mem.Page = 0; 459 mem.Page = 0;
474 CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->rmem_handle, &mem)); 460 ret = pcmcia_map_mem_page(link, local->rmem_handle, &mem);
461 if (ret)
462 goto failed;
475 local->rmem = ioremap(req.Base, req.Size); 463 local->rmem = ioremap(req.Base, req.Size);
476 464
477/*** Set up window for attribute memory ***********************************/ 465/*** Set up window for attribute memory ***********************************/
@@ -480,22 +468,25 @@ static int ray_config(struct pcmcia_device *link)
480 req.Base = 0; 468 req.Base = 0;
481 req.Size = 0x1000; 469 req.Size = 0x1000;
482 req.AccessSpeed = ray_mem_speed; 470 req.AccessSpeed = ray_mem_speed;
483 CS_CHECK(RequestWindow, 471 ret = pcmcia_request_window(link, &req, &local->amem_handle);
484 pcmcia_request_window(&link, &req, &local->amem_handle)); 472 if (ret)
473 goto failed;
485 mem.CardOffset = 0x0000; 474 mem.CardOffset = 0x0000;
486 mem.Page = 0; 475 mem.Page = 0;
487 CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->amem_handle, &mem)); 476 ret = pcmcia_map_mem_page(link, local->amem_handle, &mem);
477 if (ret)
478 goto failed;
488 local->amem = ioremap(req.Base, req.Size); 479 local->amem = ioremap(req.Base, req.Size);
489 480
490 DEBUG(3, "ray_config sram=%p\n", local->sram); 481 dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram);
491 DEBUG(3, "ray_config rmem=%p\n", local->rmem); 482 dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem);
492 DEBUG(3, "ray_config amem=%p\n", local->amem); 483 dev_dbg(&link->dev, "ray_config amem=%p\n", local->amem);
493 if (ray_init(dev) < 0) { 484 if (ray_init(dev) < 0) {
494 ray_release(link); 485 ray_release(link);
495 return -ENODEV; 486 return -ENODEV;
496 } 487 }
497 488
498 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 489 SET_NETDEV_DEV(dev, &link->dev);
499 i = register_netdev(dev); 490 i = register_netdev(dev);
500 if (i != 0) { 491 if (i != 0) {
501 printk("ray_config register_netdev() failed\n"); 492 printk("ray_config register_netdev() failed\n");
@@ -511,9 +502,7 @@ static int ray_config(struct pcmcia_device *link)
511 502
512 return 0; 503 return 0;
513 504
514cs_failed: 505failed:
515 cs_error(link, last_fn, last_ret);
516
517 ray_release(link); 506 ray_release(link);
518 return -ENODEV; 507 return -ENODEV;
519} /* ray_config */ 508} /* ray_config */
@@ -543,9 +532,9 @@ static int ray_init(struct net_device *dev)
543 struct ccs __iomem *pccs; 532 struct ccs __iomem *pccs;
544 ray_dev_t *local = netdev_priv(dev); 533 ray_dev_t *local = netdev_priv(dev);
545 struct pcmcia_device *link = local->finder; 534 struct pcmcia_device *link = local->finder;
546 DEBUG(1, "ray_init(0x%p)\n", dev); 535 dev_dbg(&link->dev, "ray_init(0x%p)\n", dev);
547 if (!(pcmcia_dev_present(link))) { 536 if (!(pcmcia_dev_present(link))) {
548 DEBUG(0, "ray_init - device not present\n"); 537 dev_dbg(&link->dev, "ray_init - device not present\n");
549 return -1; 538 return -1;
550 } 539 }
551 540
@@ -567,13 +556,13 @@ static int ray_init(struct net_device *dev)
567 local->fw_ver = local->startup_res.firmware_version[0]; 556 local->fw_ver = local->startup_res.firmware_version[0];
568 local->fw_bld = local->startup_res.firmware_version[1]; 557 local->fw_bld = local->startup_res.firmware_version[1];
569 local->fw_var = local->startup_res.firmware_version[2]; 558 local->fw_var = local->startup_res.firmware_version[2];
570 DEBUG(1, "ray_init firmware version %d.%d \n", local->fw_ver, 559 dev_dbg(&link->dev, "ray_init firmware version %d.%d \n", local->fw_ver,
571 local->fw_bld); 560 local->fw_bld);
572 561
573 local->tib_length = 0x20; 562 local->tib_length = 0x20;
574 if ((local->fw_ver == 5) && (local->fw_bld >= 30)) 563 if ((local->fw_ver == 5) && (local->fw_bld >= 30))
575 local->tib_length = local->startup_res.tib_length; 564 local->tib_length = local->startup_res.tib_length;
576 DEBUG(2, "ray_init tib_length = 0x%02x\n", local->tib_length); 565 dev_dbg(&link->dev, "ray_init tib_length = 0x%02x\n", local->tib_length);
577 /* Initialize CCS's to buffer free state */ 566 /* Initialize CCS's to buffer free state */
578 pccs = ccs_base(local); 567 pccs = ccs_base(local);
579 for (i = 0; i < NUMBER_OF_CCS; i++) { 568 for (i = 0; i < NUMBER_OF_CCS; i++) {
@@ -592,7 +581,7 @@ static int ray_init(struct net_device *dev)
592 581
593 clear_interrupt(local); /* Clear any interrupt from the card */ 582 clear_interrupt(local); /* Clear any interrupt from the card */
594 local->card_status = CARD_AWAITING_PARAM; 583 local->card_status = CARD_AWAITING_PARAM;
595 DEBUG(2, "ray_init ending\n"); 584 dev_dbg(&link->dev, "ray_init ending\n");
596 return 0; 585 return 0;
597} /* ray_init */ 586} /* ray_init */
598 587
@@ -605,9 +594,9 @@ static int dl_startup_params(struct net_device *dev)
605 struct ccs __iomem *pccs; 594 struct ccs __iomem *pccs;
606 struct pcmcia_device *link = local->finder; 595 struct pcmcia_device *link = local->finder;
607 596
608 DEBUG(1, "dl_startup_params entered\n"); 597 dev_dbg(&link->dev, "dl_startup_params entered\n");
609 if (!(pcmcia_dev_present(link))) { 598 if (!(pcmcia_dev_present(link))) {
610 DEBUG(2, "ray_cs dl_startup_params - device not present\n"); 599 dev_dbg(&link->dev, "ray_cs dl_startup_params - device not present\n");
611 return -1; 600 return -1;
612 } 601 }
613 602
@@ -625,7 +614,7 @@ static int dl_startup_params(struct net_device *dev)
625 local->dl_param_ccs = ccsindex; 614 local->dl_param_ccs = ccsindex;
626 pccs = ccs_base(local) + ccsindex; 615 pccs = ccs_base(local) + ccsindex;
627 writeb(CCS_DOWNLOAD_STARTUP_PARAMS, &pccs->cmd); 616 writeb(CCS_DOWNLOAD_STARTUP_PARAMS, &pccs->cmd);
628 DEBUG(2, "dl_startup_params start ccsindex = %d\n", 617 dev_dbg(&link->dev, "dl_startup_params start ccsindex = %d\n",
629 local->dl_param_ccs); 618 local->dl_param_ccs);
630 /* Interrupt the firmware to process the command */ 619 /* Interrupt the firmware to process the command */
631 if (interrupt_ecf(local, ccsindex)) { 620 if (interrupt_ecf(local, ccsindex)) {
@@ -641,7 +630,7 @@ static int dl_startup_params(struct net_device *dev)
641 local->timer.data = (long)local; 630 local->timer.data = (long)local;
642 local->timer.function = &verify_dl_startup; 631 local->timer.function = &verify_dl_startup;
643 add_timer(&local->timer); 632 add_timer(&local->timer);
644 DEBUG(2, 633 dev_dbg(&link->dev,
645 "ray_cs dl_startup_params started timer for verify_dl_startup\n"); 634 "ray_cs dl_startup_params started timer for verify_dl_startup\n");
646 return 0; 635 return 0;
647} /* dl_startup_params */ 636} /* dl_startup_params */
@@ -717,11 +706,11 @@ static void verify_dl_startup(u_long data)
717 struct pcmcia_device *link = local->finder; 706 struct pcmcia_device *link = local->finder;
718 707
719 if (!(pcmcia_dev_present(link))) { 708 if (!(pcmcia_dev_present(link))) {
720 DEBUG(2, "ray_cs verify_dl_startup - device not present\n"); 709 dev_dbg(&link->dev, "ray_cs verify_dl_startup - device not present\n");
721 return; 710 return;
722 } 711 }
723#ifdef PCMCIA_DEBUG 712#if 0
724 if (pc_debug > 2) { 713 {
725 int i; 714 int i;
726 printk(KERN_DEBUG 715 printk(KERN_DEBUG
727 "verify_dl_startup parameters sent via ccs %d:\n", 716 "verify_dl_startup parameters sent via ccs %d:\n",
@@ -760,7 +749,7 @@ static void start_net(u_long data)
760 int ccsindex; 749 int ccsindex;
761 struct pcmcia_device *link = local->finder; 750 struct pcmcia_device *link = local->finder;
762 if (!(pcmcia_dev_present(link))) { 751 if (!(pcmcia_dev_present(link))) {
763 DEBUG(2, "ray_cs start_net - device not present\n"); 752 dev_dbg(&link->dev, "ray_cs start_net - device not present\n");
764 return; 753 return;
765 } 754 }
766 /* Fill in the CCS fields for the ECF */ 755 /* Fill in the CCS fields for the ECF */
@@ -771,7 +760,7 @@ static void start_net(u_long data)
771 writeb(0, &pccs->var.start_network.update_param); 760 writeb(0, &pccs->var.start_network.update_param);
772 /* Interrupt the firmware to process the command */ 761 /* Interrupt the firmware to process the command */
773 if (interrupt_ecf(local, ccsindex)) { 762 if (interrupt_ecf(local, ccsindex)) {
774 DEBUG(1, "ray start net failed - card not ready for intr\n"); 763 dev_dbg(&link->dev, "ray start net failed - card not ready for intr\n");
775 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 764 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
776 return; 765 return;
777 } 766 }
@@ -790,7 +779,7 @@ static void join_net(u_long data)
790 struct pcmcia_device *link = local->finder; 779 struct pcmcia_device *link = local->finder;
791 780
792 if (!(pcmcia_dev_present(link))) { 781 if (!(pcmcia_dev_present(link))) {
793 DEBUG(2, "ray_cs join_net - device not present\n"); 782 dev_dbg(&link->dev, "ray_cs join_net - device not present\n");
794 return; 783 return;
795 } 784 }
796 /* Fill in the CCS fields for the ECF */ 785 /* Fill in the CCS fields for the ECF */
@@ -802,7 +791,7 @@ static void join_net(u_long data)
802 writeb(0, &pccs->var.join_network.net_initiated); 791 writeb(0, &pccs->var.join_network.net_initiated);
803 /* Interrupt the firmware to process the command */ 792 /* Interrupt the firmware to process the command */
804 if (interrupt_ecf(local, ccsindex)) { 793 if (interrupt_ecf(local, ccsindex)) {
805 DEBUG(1, "ray join net failed - card not ready for intr\n"); 794 dev_dbg(&link->dev, "ray join net failed - card not ready for intr\n");
806 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 795 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
807 return; 796 return;
808 } 797 }
@@ -821,7 +810,7 @@ static void ray_release(struct pcmcia_device *link)
821 ray_dev_t *local = netdev_priv(dev); 810 ray_dev_t *local = netdev_priv(dev);
822 int i; 811 int i;
823 812
824 DEBUG(1, "ray_release(0x%p)\n", link); 813 dev_dbg(&link->dev, "ray_release\n");
825 814
826 del_timer(&local->timer); 815 del_timer(&local->timer);
827 816
@@ -829,15 +818,15 @@ static void ray_release(struct pcmcia_device *link)
829 iounmap(local->rmem); 818 iounmap(local->rmem);
830 iounmap(local->amem); 819 iounmap(local->amem);
831 /* Do bother checking to see if these succeed or not */ 820 /* Do bother checking to see if these succeed or not */
832 i = pcmcia_release_window(local->amem_handle); 821 i = pcmcia_release_window(link, local->amem_handle);
833 if (i != 0) 822 if (i != 0)
834 DEBUG(0, "ReleaseWindow(local->amem) ret = %x\n", i); 823 dev_dbg(&link->dev, "ReleaseWindow(local->amem) ret = %x\n", i);
835 i = pcmcia_release_window(local->rmem_handle); 824 i = pcmcia_release_window(link, local->rmem_handle);
836 if (i != 0) 825 if (i != 0)
837 DEBUG(0, "ReleaseWindow(local->rmem) ret = %x\n", i); 826 dev_dbg(&link->dev, "ReleaseWindow(local->rmem) ret = %x\n", i);
838 pcmcia_disable_device(link); 827 pcmcia_disable_device(link);
839 828
840 DEBUG(2, "ray_release ending\n"); 829 dev_dbg(&link->dev, "ray_release ending\n");
841} 830}
842 831
843static int ray_suspend(struct pcmcia_device *link) 832static int ray_suspend(struct pcmcia_device *link)
@@ -871,9 +860,9 @@ static int ray_dev_init(struct net_device *dev)
871 ray_dev_t *local = netdev_priv(dev); 860 ray_dev_t *local = netdev_priv(dev);
872 struct pcmcia_device *link = local->finder; 861 struct pcmcia_device *link = local->finder;
873 862
874 DEBUG(1, "ray_dev_init(dev=%p)\n", dev); 863 dev_dbg(&link->dev, "ray_dev_init(dev=%p)\n", dev);
875 if (!(pcmcia_dev_present(link))) { 864 if (!(pcmcia_dev_present(link))) {
876 DEBUG(2, "ray_dev_init - device not present\n"); 865 dev_dbg(&link->dev, "ray_dev_init - device not present\n");
877 return -1; 866 return -1;
878 } 867 }
879#ifdef RAY_IMMEDIATE_INIT 868#ifdef RAY_IMMEDIATE_INIT
@@ -887,7 +876,7 @@ static int ray_dev_init(struct net_device *dev)
887 /* Postpone the card init so that we can still configure the card, 876 /* Postpone the card init so that we can still configure the card,
888 * for example using the Wireless Extensions. The init will happen 877 * for example using the Wireless Extensions. The init will happen
889 * in ray_open() - Jean II */ 878 * in ray_open() - Jean II */
890 DEBUG(1, 879 dev_dbg(&link->dev,
891 "ray_dev_init: postponing card init to ray_open() ; Status = %d\n", 880 "ray_dev_init: postponing card init to ray_open() ; Status = %d\n",
892 local->card_status); 881 local->card_status);
893#endif /* RAY_IMMEDIATE_INIT */ 882#endif /* RAY_IMMEDIATE_INIT */
@@ -896,7 +885,7 @@ static int ray_dev_init(struct net_device *dev)
896 memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN); 885 memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
897 memset(dev->broadcast, 0xff, ETH_ALEN); 886 memset(dev->broadcast, 0xff, ETH_ALEN);
898 887
899 DEBUG(2, "ray_dev_init ending\n"); 888 dev_dbg(&link->dev, "ray_dev_init ending\n");
900 return 0; 889 return 0;
901} 890}
902 891
@@ -906,9 +895,9 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map)
906 ray_dev_t *local = netdev_priv(dev); 895 ray_dev_t *local = netdev_priv(dev);
907 struct pcmcia_device *link = local->finder; 896 struct pcmcia_device *link = local->finder;
908 /* Dummy routine to satisfy device structure */ 897 /* Dummy routine to satisfy device structure */
909 DEBUG(1, "ray_dev_config(dev=%p,ifmap=%p)\n", dev, map); 898 dev_dbg(&link->dev, "ray_dev_config(dev=%p,ifmap=%p)\n", dev, map);
910 if (!(pcmcia_dev_present(link))) { 899 if (!(pcmcia_dev_present(link))) {
911 DEBUG(2, "ray_dev_config - device not present\n"); 900 dev_dbg(&link->dev, "ray_dev_config - device not present\n");
912 return -1; 901 return -1;
913 } 902 }
914 903
@@ -924,14 +913,14 @@ static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
924 short length = skb->len; 913 short length = skb->len;
925 914
926 if (!pcmcia_dev_present(link)) { 915 if (!pcmcia_dev_present(link)) {
927 DEBUG(2, "ray_dev_start_xmit - device not present\n"); 916 dev_dbg(&link->dev, "ray_dev_start_xmit - device not present\n");
928 dev_kfree_skb(skb); 917 dev_kfree_skb(skb);
929 return NETDEV_TX_OK; 918 return NETDEV_TX_OK;
930 } 919 }
931 920
932 DEBUG(3, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev); 921 dev_dbg(&link->dev, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev);
933 if (local->authentication_state == NEED_TO_AUTH) { 922 if (local->authentication_state == NEED_TO_AUTH) {
934 DEBUG(0, "ray_cs Sending authentication request.\n"); 923 dev_dbg(&link->dev, "ray_cs Sending authentication request.\n");
935 if (!build_auth_frame(local, local->auth_id, OPEN_AUTH_REQUEST)) { 924 if (!build_auth_frame(local, local->auth_id, OPEN_AUTH_REQUEST)) {
936 local->authentication_state = AUTHENTICATED; 925 local->authentication_state = AUTHENTICATED;
937 netif_stop_queue(dev); 926 netif_stop_queue(dev);
@@ -971,7 +960,7 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
971 struct tx_msg __iomem *ptx; /* Address of xmit buffer in PC space */ 960 struct tx_msg __iomem *ptx; /* Address of xmit buffer in PC space */
972 short int addr; /* Address of xmit buffer in card space */ 961 short int addr; /* Address of xmit buffer in card space */
973 962
974 DEBUG(3, "ray_hw_xmit(data=%p, len=%d, dev=%p)\n", data, len, dev); 963 pr_debug("ray_hw_xmit(data=%p, len=%d, dev=%p)\n", data, len, dev);
975 if (len + TX_HEADER_LENGTH > TX_BUF_SIZE) { 964 if (len + TX_HEADER_LENGTH > TX_BUF_SIZE) {
976 printk(KERN_INFO "ray_hw_xmit packet too large: %d bytes\n", 965 printk(KERN_INFO "ray_hw_xmit packet too large: %d bytes\n",
977 len); 966 len);
@@ -979,9 +968,9 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
979 } 968 }
980 switch (ccsindex = get_free_tx_ccs(local)) { 969 switch (ccsindex = get_free_tx_ccs(local)) {
981 case ECCSBUSY: 970 case ECCSBUSY:
982 DEBUG(2, "ray_hw_xmit tx_ccs table busy\n"); 971 pr_debug("ray_hw_xmit tx_ccs table busy\n");
983 case ECCSFULL: 972 case ECCSFULL:
984 DEBUG(2, "ray_hw_xmit No free tx ccs\n"); 973 pr_debug("ray_hw_xmit No free tx ccs\n");
985 case ECARDGONE: 974 case ECARDGONE:
986 netif_stop_queue(dev); 975 netif_stop_queue(dev);
987 return XMIT_NO_CCS; 976 return XMIT_NO_CCS;
@@ -1018,12 +1007,12 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
1018 writeb(PSM_CAM, &pccs->var.tx_request.pow_sav_mode); 1007 writeb(PSM_CAM, &pccs->var.tx_request.pow_sav_mode);
1019 writeb(local->net_default_tx_rate, &pccs->var.tx_request.tx_rate); 1008 writeb(local->net_default_tx_rate, &pccs->var.tx_request.tx_rate);
1020 writeb(0, &pccs->var.tx_request.antenna); 1009 writeb(0, &pccs->var.tx_request.antenna);
1021 DEBUG(3, "ray_hw_xmit default_tx_rate = 0x%x\n", 1010 pr_debug("ray_hw_xmit default_tx_rate = 0x%x\n",
1022 local->net_default_tx_rate); 1011 local->net_default_tx_rate);
1023 1012
1024 /* Interrupt the firmware to process the command */ 1013 /* Interrupt the firmware to process the command */
1025 if (interrupt_ecf(local, ccsindex)) { 1014 if (interrupt_ecf(local, ccsindex)) {
1026 DEBUG(2, "ray_hw_xmit failed - ECF not ready for intr\n"); 1015 pr_debug("ray_hw_xmit failed - ECF not ready for intr\n");
1027/* TBD very inefficient to copy packet to buffer, and then not 1016/* TBD very inefficient to copy packet to buffer, and then not
1028 send it, but the alternative is to queue the messages and that 1017 send it, but the alternative is to queue the messages and that
1029 won't be done for a while. Maybe set tbusy until a CCS is free? 1018 won't be done for a while. Maybe set tbusy until a CCS is free?
@@ -1040,7 +1029,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
1040{ 1029{
1041 __be16 proto = ((struct ethhdr *)data)->h_proto; 1030 __be16 proto = ((struct ethhdr *)data)->h_proto;
1042 if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */ 1031 if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */
1043 DEBUG(3, "ray_cs translate_frame DIX II\n"); 1032 pr_debug("ray_cs translate_frame DIX II\n");
1044 /* Copy LLC header to card buffer */ 1033 /* Copy LLC header to card buffer */
1045 memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc)); 1034 memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc));
1046 memcpy_toio(((void __iomem *)&ptx->var) + sizeof(eth2_llc), 1035 memcpy_toio(((void __iomem *)&ptx->var) + sizeof(eth2_llc),
@@ -1056,9 +1045,9 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
1056 len - ETH_HLEN); 1045 len - ETH_HLEN);
1057 return (int)sizeof(struct snaphdr_t) - ETH_HLEN; 1046 return (int)sizeof(struct snaphdr_t) - ETH_HLEN;
1058 } else { /* already 802 type, and proto is length */ 1047 } else { /* already 802 type, and proto is length */
1059 DEBUG(3, "ray_cs translate_frame 802\n"); 1048 pr_debug("ray_cs translate_frame 802\n");
1060 if (proto == htons(0xffff)) { /* evil netware IPX 802.3 without LLC */ 1049 if (proto == htons(0xffff)) { /* evil netware IPX 802.3 without LLC */
1061 DEBUG(3, "ray_cs translate_frame evil IPX\n"); 1050 pr_debug("ray_cs translate_frame evil IPX\n");
1062 memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN); 1051 memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN);
1063 return 0 - ETH_HLEN; 1052 return 0 - ETH_HLEN;
1064 } 1053 }
@@ -1603,7 +1592,7 @@ static int ray_open(struct net_device *dev)
1603 struct pcmcia_device *link; 1592 struct pcmcia_device *link;
1604 link = local->finder; 1593 link = local->finder;
1605 1594
1606 DEBUG(1, "ray_open('%s')\n", dev->name); 1595 dev_dbg(&link->dev, "ray_open('%s')\n", dev->name);
1607 1596
1608 if (link->open == 0) 1597 if (link->open == 0)
1609 local->num_multi = 0; 1598 local->num_multi = 0;
@@ -1613,7 +1602,7 @@ static int ray_open(struct net_device *dev)
1613 if (local->card_status == CARD_AWAITING_PARAM) { 1602 if (local->card_status == CARD_AWAITING_PARAM) {
1614 int i; 1603 int i;
1615 1604
1616 DEBUG(1, "ray_open: doing init now !\n"); 1605 dev_dbg(&link->dev, "ray_open: doing init now !\n");
1617 1606
1618 /* Download startup parameters */ 1607 /* Download startup parameters */
1619 if ((i = dl_startup_params(dev)) < 0) { 1608 if ((i = dl_startup_params(dev)) < 0) {
@@ -1629,7 +1618,7 @@ static int ray_open(struct net_device *dev)
1629 else 1618 else
1630 netif_start_queue(dev); 1619 netif_start_queue(dev);
1631 1620
1632 DEBUG(2, "ray_open ending\n"); 1621 dev_dbg(&link->dev, "ray_open ending\n");
1633 return 0; 1622 return 0;
1634} /* end ray_open */ 1623} /* end ray_open */
1635 1624
@@ -1640,7 +1629,7 @@ static int ray_dev_close(struct net_device *dev)
1640 struct pcmcia_device *link; 1629 struct pcmcia_device *link;
1641 link = local->finder; 1630 link = local->finder;
1642 1631
1643 DEBUG(1, "ray_dev_close('%s')\n", dev->name); 1632 dev_dbg(&link->dev, "ray_dev_close('%s')\n", dev->name);
1644 1633
1645 link->open--; 1634 link->open--;
1646 netif_stop_queue(dev); 1635 netif_stop_queue(dev);
@@ -1656,7 +1645,7 @@ static int ray_dev_close(struct net_device *dev)
1656/*===========================================================================*/ 1645/*===========================================================================*/
1657static void ray_reset(struct net_device *dev) 1646static void ray_reset(struct net_device *dev)
1658{ 1647{
1659 DEBUG(1, "ray_reset entered\n"); 1648 pr_debug("ray_reset entered\n");
1660 return; 1649 return;
1661} 1650}
1662 1651
@@ -1669,17 +1658,17 @@ static int interrupt_ecf(ray_dev_t *local, int ccs)
1669 struct pcmcia_device *link = local->finder; 1658 struct pcmcia_device *link = local->finder;
1670 1659
1671 if (!(pcmcia_dev_present(link))) { 1660 if (!(pcmcia_dev_present(link))) {
1672 DEBUG(2, "ray_cs interrupt_ecf - device not present\n"); 1661 dev_dbg(&link->dev, "ray_cs interrupt_ecf - device not present\n");
1673 return -1; 1662 return -1;
1674 } 1663 }
1675 DEBUG(2, "interrupt_ecf(local=%p, ccs = 0x%x\n", local, ccs); 1664 dev_dbg(&link->dev, "interrupt_ecf(local=%p, ccs = 0x%x\n", local, ccs);
1676 1665
1677 while (i && 1666 while (i &&
1678 (readb(local->amem + CIS_OFFSET + ECF_INTR_OFFSET) & 1667 (readb(local->amem + CIS_OFFSET + ECF_INTR_OFFSET) &
1679 ECF_INTR_SET)) 1668 ECF_INTR_SET))
1680 i--; 1669 i--;
1681 if (i == 0) { 1670 if (i == 0) {
1682 DEBUG(2, "ray_cs interrupt_ecf card not ready for interrupt\n"); 1671 dev_dbg(&link->dev, "ray_cs interrupt_ecf card not ready for interrupt\n");
1683 return -1; 1672 return -1;
1684 } 1673 }
1685 /* Fill the mailbox, then kick the card */ 1674 /* Fill the mailbox, then kick the card */
@@ -1698,12 +1687,12 @@ static int get_free_tx_ccs(ray_dev_t *local)
1698 struct pcmcia_device *link = local->finder; 1687 struct pcmcia_device *link = local->finder;
1699 1688
1700 if (!(pcmcia_dev_present(link))) { 1689 if (!(pcmcia_dev_present(link))) {
1701 DEBUG(2, "ray_cs get_free_tx_ccs - device not present\n"); 1690 dev_dbg(&link->dev, "ray_cs get_free_tx_ccs - device not present\n");
1702 return ECARDGONE; 1691 return ECARDGONE;
1703 } 1692 }
1704 1693
1705 if (test_and_set_bit(0, &local->tx_ccs_lock)) { 1694 if (test_and_set_bit(0, &local->tx_ccs_lock)) {
1706 DEBUG(1, "ray_cs tx_ccs_lock busy\n"); 1695 dev_dbg(&link->dev, "ray_cs tx_ccs_lock busy\n");
1707 return ECCSBUSY; 1696 return ECCSBUSY;
1708 } 1697 }
1709 1698
@@ -1716,7 +1705,7 @@ static int get_free_tx_ccs(ray_dev_t *local)
1716 } 1705 }
1717 } 1706 }
1718 local->tx_ccs_lock = 0; 1707 local->tx_ccs_lock = 0;
1719 DEBUG(2, "ray_cs ERROR no free tx CCS for raylink card\n"); 1708 dev_dbg(&link->dev, "ray_cs ERROR no free tx CCS for raylink card\n");
1720 return ECCSFULL; 1709 return ECCSFULL;
1721} /* get_free_tx_ccs */ 1710} /* get_free_tx_ccs */
1722 1711
@@ -1730,11 +1719,11 @@ static int get_free_ccs(ray_dev_t *local)
1730 struct pcmcia_device *link = local->finder; 1719 struct pcmcia_device *link = local->finder;
1731 1720
1732 if (!(pcmcia_dev_present(link))) { 1721 if (!(pcmcia_dev_present(link))) {
1733 DEBUG(2, "ray_cs get_free_ccs - device not present\n"); 1722 dev_dbg(&link->dev, "ray_cs get_free_ccs - device not present\n");
1734 return ECARDGONE; 1723 return ECARDGONE;
1735 } 1724 }
1736 if (test_and_set_bit(0, &local->ccs_lock)) { 1725 if (test_and_set_bit(0, &local->ccs_lock)) {
1737 DEBUG(1, "ray_cs ccs_lock busy\n"); 1726 dev_dbg(&link->dev, "ray_cs ccs_lock busy\n");
1738 return ECCSBUSY; 1727 return ECCSBUSY;
1739 } 1728 }
1740 1729
@@ -1747,7 +1736,7 @@ static int get_free_ccs(ray_dev_t *local)
1747 } 1736 }
1748 } 1737 }
1749 local->ccs_lock = 0; 1738 local->ccs_lock = 0;
1750 DEBUG(1, "ray_cs ERROR no free CCS for raylink card\n"); 1739 dev_dbg(&link->dev, "ray_cs ERROR no free CCS for raylink card\n");
1751 return ECCSFULL; 1740 return ECCSFULL;
1752} /* get_free_ccs */ 1741} /* get_free_ccs */
1753 1742
@@ -1823,7 +1812,7 @@ static struct net_device_stats *ray_get_stats(struct net_device *dev)
1823 struct pcmcia_device *link = local->finder; 1812 struct pcmcia_device *link = local->finder;
1824 struct status __iomem *p = local->sram + STATUS_BASE; 1813 struct status __iomem *p = local->sram + STATUS_BASE;
1825 if (!(pcmcia_dev_present(link))) { 1814 if (!(pcmcia_dev_present(link))) {
1826 DEBUG(2, "ray_cs net_device_stats - device not present\n"); 1815 dev_dbg(&link->dev, "ray_cs net_device_stats - device not present\n");
1827 return &local->stats; 1816 return &local->stats;
1828 } 1817 }
1829 if (readb(&p->mrx_overflow_for_host)) { 1818 if (readb(&p->mrx_overflow_for_host)) {
@@ -1856,12 +1845,12 @@ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value,
1856 struct ccs __iomem *pccs; 1845 struct ccs __iomem *pccs;
1857 1846
1858 if (!(pcmcia_dev_present(link))) { 1847 if (!(pcmcia_dev_present(link))) {
1859 DEBUG(2, "ray_update_parm - device not present\n"); 1848 dev_dbg(&link->dev, "ray_update_parm - device not present\n");
1860 return; 1849 return;
1861 } 1850 }
1862 1851
1863 if ((ccsindex = get_free_ccs(local)) < 0) { 1852 if ((ccsindex = get_free_ccs(local)) < 0) {
1864 DEBUG(0, "ray_update_parm - No free ccs\n"); 1853 dev_dbg(&link->dev, "ray_update_parm - No free ccs\n");
1865 return; 1854 return;
1866 } 1855 }
1867 pccs = ccs_base(local) + ccsindex; 1856 pccs = ccs_base(local) + ccsindex;
@@ -1874,7 +1863,7 @@ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value,
1874 } 1863 }
1875 /* Interrupt the firmware to process the command */ 1864 /* Interrupt the firmware to process the command */
1876 if (interrupt_ecf(local, ccsindex)) { 1865 if (interrupt_ecf(local, ccsindex)) {
1877 DEBUG(0, "ray_cs associate failed - ECF not ready for intr\n"); 1866 dev_dbg(&link->dev, "ray_cs associate failed - ECF not ready for intr\n");
1878 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 1867 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
1879 } 1868 }
1880} 1869}
@@ -1891,12 +1880,12 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1891 void __iomem *p = local->sram + HOST_TO_ECF_BASE; 1880 void __iomem *p = local->sram + HOST_TO_ECF_BASE;
1892 1881
1893 if (!(pcmcia_dev_present(link))) { 1882 if (!(pcmcia_dev_present(link))) {
1894 DEBUG(2, "ray_update_multi_list - device not present\n"); 1883 dev_dbg(&link->dev, "ray_update_multi_list - device not present\n");
1895 return; 1884 return;
1896 } else 1885 } else
1897 DEBUG(2, "ray_update_multi_list(%p)\n", dev); 1886 dev_dbg(&link->dev, "ray_update_multi_list(%p)\n", dev);
1898 if ((ccsindex = get_free_ccs(local)) < 0) { 1887 if ((ccsindex = get_free_ccs(local)) < 0) {
1899 DEBUG(1, "ray_update_multi - No free ccs\n"); 1888 dev_dbg(&link->dev, "ray_update_multi - No free ccs\n");
1900 return; 1889 return;
1901 } 1890 }
1902 pccs = ccs_base(local) + ccsindex; 1891 pccs = ccs_base(local) + ccsindex;
@@ -1910,7 +1899,7 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1910 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; 1899 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL;
1911 dmip = &dmi->next) { 1900 dmip = &dmi->next) {
1912 memcpy_toio(p, dmi->dmi_addr, ETH_ALEN); 1901 memcpy_toio(p, dmi->dmi_addr, ETH_ALEN);
1913 DEBUG(1, 1902 dev_dbg(&link->dev,
1914 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n", 1903 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
1915 dmi->dmi_addr[0], dmi->dmi_addr[1], 1904 dmi->dmi_addr[0], dmi->dmi_addr[1],
1916 dmi->dmi_addr[2], dmi->dmi_addr[3], 1905 dmi->dmi_addr[2], dmi->dmi_addr[3],
@@ -1921,12 +1910,12 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1921 if (i > 256 / ADDRLEN) 1910 if (i > 256 / ADDRLEN)
1922 i = 256 / ADDRLEN; 1911 i = 256 / ADDRLEN;
1923 writeb((UCHAR) i, &pccs->var); 1912 writeb((UCHAR) i, &pccs->var);
1924 DEBUG(1, "ray_cs update_multi %d addresses in list\n", i); 1913 dev_dbg(&link->dev, "ray_cs update_multi %d addresses in list\n", i);
1925 /* Interrupt the firmware to process the command */ 1914 /* Interrupt the firmware to process the command */
1926 local->num_multi = i; 1915 local->num_multi = i;
1927 } 1916 }
1928 if (interrupt_ecf(local, ccsindex)) { 1917 if (interrupt_ecf(local, ccsindex)) {
1929 DEBUG(1, 1918 dev_dbg(&link->dev,
1930 "ray_cs update_multi failed - ECF not ready for intr\n"); 1919 "ray_cs update_multi failed - ECF not ready for intr\n");
1931 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 1920 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
1932 } 1921 }
@@ -1938,11 +1927,11 @@ static void set_multicast_list(struct net_device *dev)
1938 ray_dev_t *local = netdev_priv(dev); 1927 ray_dev_t *local = netdev_priv(dev);
1939 UCHAR promisc; 1928 UCHAR promisc;
1940 1929
1941 DEBUG(2, "ray_cs set_multicast_list(%p)\n", dev); 1930 pr_debug("ray_cs set_multicast_list(%p)\n", dev);
1942 1931
1943 if (dev->flags & IFF_PROMISC) { 1932 if (dev->flags & IFF_PROMISC) {
1944 if (local->sparm.b5.a_promiscuous_mode == 0) { 1933 if (local->sparm.b5.a_promiscuous_mode == 0) {
1945 DEBUG(1, "ray_cs set_multicast_list promisc on\n"); 1934 pr_debug("ray_cs set_multicast_list promisc on\n");
1946 local->sparm.b5.a_promiscuous_mode = 1; 1935 local->sparm.b5.a_promiscuous_mode = 1;
1947 promisc = 1; 1936 promisc = 1;
1948 ray_update_parm(dev, OBJID_promiscuous_mode, 1937 ray_update_parm(dev, OBJID_promiscuous_mode,
@@ -1950,7 +1939,7 @@ static void set_multicast_list(struct net_device *dev)
1950 } 1939 }
1951 } else { 1940 } else {
1952 if (local->sparm.b5.a_promiscuous_mode == 1) { 1941 if (local->sparm.b5.a_promiscuous_mode == 1) {
1953 DEBUG(1, "ray_cs set_multicast_list promisc off\n"); 1942 pr_debug("ray_cs set_multicast_list promisc off\n");
1954 local->sparm.b5.a_promiscuous_mode = 0; 1943 local->sparm.b5.a_promiscuous_mode = 0;
1955 promisc = 0; 1944 promisc = 0;
1956 ray_update_parm(dev, OBJID_promiscuous_mode, 1945 ray_update_parm(dev, OBJID_promiscuous_mode,
@@ -1984,19 +1973,19 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
1984 if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */ 1973 if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */
1985 return IRQ_NONE; 1974 return IRQ_NONE;
1986 1975
1987 DEBUG(4, "ray_cs: interrupt for *dev=%p\n", dev); 1976 pr_debug("ray_cs: interrupt for *dev=%p\n", dev);
1988 1977
1989 local = netdev_priv(dev); 1978 local = netdev_priv(dev);
1990 link = (struct pcmcia_device *)local->finder; 1979 link = (struct pcmcia_device *)local->finder;
1991 if (!pcmcia_dev_present(link)) { 1980 if (!pcmcia_dev_present(link)) {
1992 DEBUG(2, 1981 pr_debug(
1993 "ray_cs interrupt from device not present or suspended.\n"); 1982 "ray_cs interrupt from device not present or suspended.\n");
1994 return IRQ_NONE; 1983 return IRQ_NONE;
1995 } 1984 }
1996 rcsindex = readb(&((struct scb __iomem *)(local->sram))->rcs_index); 1985 rcsindex = readb(&((struct scb __iomem *)(local->sram))->rcs_index);
1997 1986
1998 if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) { 1987 if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) {
1999 DEBUG(1, "ray_cs interrupt bad rcsindex = 0x%x\n", rcsindex); 1988 dev_dbg(&link->dev, "ray_cs interrupt bad rcsindex = 0x%x\n", rcsindex);
2000 clear_interrupt(local); 1989 clear_interrupt(local);
2001 return IRQ_HANDLED; 1990 return IRQ_HANDLED;
2002 } 1991 }
@@ -2008,33 +1997,33 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2008 case CCS_DOWNLOAD_STARTUP_PARAMS: /* Happens in firmware someday */ 1997 case CCS_DOWNLOAD_STARTUP_PARAMS: /* Happens in firmware someday */
2009 del_timer(&local->timer); 1998 del_timer(&local->timer);
2010 if (status == CCS_COMMAND_COMPLETE) { 1999 if (status == CCS_COMMAND_COMPLETE) {
2011 DEBUG(1, 2000 dev_dbg(&link->dev,
2012 "ray_cs interrupt download_startup_parameters OK\n"); 2001 "ray_cs interrupt download_startup_parameters OK\n");
2013 } else { 2002 } else {
2014 DEBUG(1, 2003 dev_dbg(&link->dev,
2015 "ray_cs interrupt download_startup_parameters fail\n"); 2004 "ray_cs interrupt download_startup_parameters fail\n");
2016 } 2005 }
2017 break; 2006 break;
2018 case CCS_UPDATE_PARAMS: 2007 case CCS_UPDATE_PARAMS:
2019 DEBUG(1, "ray_cs interrupt update params done\n"); 2008 dev_dbg(&link->dev, "ray_cs interrupt update params done\n");
2020 if (status != CCS_COMMAND_COMPLETE) { 2009 if (status != CCS_COMMAND_COMPLETE) {
2021 tmp = 2010 tmp =
2022 readb(&pccs->var.update_param. 2011 readb(&pccs->var.update_param.
2023 failure_cause); 2012 failure_cause);
2024 DEBUG(0, 2013 dev_dbg(&link->dev,
2025 "ray_cs interrupt update params failed - reason %d\n", 2014 "ray_cs interrupt update params failed - reason %d\n",
2026 tmp); 2015 tmp);
2027 } 2016 }
2028 break; 2017 break;
2029 case CCS_REPORT_PARAMS: 2018 case CCS_REPORT_PARAMS:
2030 DEBUG(1, "ray_cs interrupt report params done\n"); 2019 dev_dbg(&link->dev, "ray_cs interrupt report params done\n");
2031 break; 2020 break;
2032 case CCS_UPDATE_MULTICAST_LIST: /* Note that this CCS isn't returned */ 2021 case CCS_UPDATE_MULTICAST_LIST: /* Note that this CCS isn't returned */
2033 DEBUG(1, 2022 dev_dbg(&link->dev,
2034 "ray_cs interrupt CCS Update Multicast List done\n"); 2023 "ray_cs interrupt CCS Update Multicast List done\n");
2035 break; 2024 break;
2036 case CCS_UPDATE_POWER_SAVINGS_MODE: 2025 case CCS_UPDATE_POWER_SAVINGS_MODE:
2037 DEBUG(1, 2026 dev_dbg(&link->dev,
2038 "ray_cs interrupt update power save mode done\n"); 2027 "ray_cs interrupt update power save mode done\n");
2039 break; 2028 break;
2040 case CCS_START_NETWORK: 2029 case CCS_START_NETWORK:
@@ -2043,11 +2032,11 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2043 if (readb 2032 if (readb
2044 (&pccs->var.start_network.net_initiated) == 2033 (&pccs->var.start_network.net_initiated) ==
2045 1) { 2034 1) {
2046 DEBUG(0, 2035 dev_dbg(&link->dev,
2047 "ray_cs interrupt network \"%s\" started\n", 2036 "ray_cs interrupt network \"%s\" started\n",
2048 local->sparm.b4.a_current_ess_id); 2037 local->sparm.b4.a_current_ess_id);
2049 } else { 2038 } else {
2050 DEBUG(0, 2039 dev_dbg(&link->dev,
2051 "ray_cs interrupt network \"%s\" joined\n", 2040 "ray_cs interrupt network \"%s\" joined\n",
2052 local->sparm.b4.a_current_ess_id); 2041 local->sparm.b4.a_current_ess_id);
2053 } 2042 }
@@ -2074,13 +2063,13 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2074 del_timer(&local->timer); 2063 del_timer(&local->timer);
2075 local->timer.expires = jiffies + HZ * 5; 2064 local->timer.expires = jiffies + HZ * 5;
2076 local->timer.data = (long)local; 2065 local->timer.data = (long)local;
2077 if (cmd == CCS_START_NETWORK) { 2066 if (status == CCS_START_NETWORK) {
2078 DEBUG(0, 2067 dev_dbg(&link->dev,
2079 "ray_cs interrupt network \"%s\" start failed\n", 2068 "ray_cs interrupt network \"%s\" start failed\n",
2080 local->sparm.b4.a_current_ess_id); 2069 local->sparm.b4.a_current_ess_id);
2081 local->timer.function = &start_net; 2070 local->timer.function = &start_net;
2082 } else { 2071 } else {
2083 DEBUG(0, 2072 dev_dbg(&link->dev,
2084 "ray_cs interrupt network \"%s\" join failed\n", 2073 "ray_cs interrupt network \"%s\" join failed\n",
2085 local->sparm.b4.a_current_ess_id); 2074 local->sparm.b4.a_current_ess_id);
2086 local->timer.function = &join_net; 2075 local->timer.function = &join_net;
@@ -2091,19 +2080,19 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2091 case CCS_START_ASSOCIATION: 2080 case CCS_START_ASSOCIATION:
2092 if (status == CCS_COMMAND_COMPLETE) { 2081 if (status == CCS_COMMAND_COMPLETE) {
2093 local->card_status = CARD_ASSOC_COMPLETE; 2082 local->card_status = CARD_ASSOC_COMPLETE;
2094 DEBUG(0, "ray_cs association successful\n"); 2083 dev_dbg(&link->dev, "ray_cs association successful\n");
2095 } else { 2084 } else {
2096 DEBUG(0, "ray_cs association failed,\n"); 2085 dev_dbg(&link->dev, "ray_cs association failed,\n");
2097 local->card_status = CARD_ASSOC_FAILED; 2086 local->card_status = CARD_ASSOC_FAILED;
2098 join_net((u_long) local); 2087 join_net((u_long) local);
2099 } 2088 }
2100 break; 2089 break;
2101 case CCS_TX_REQUEST: 2090 case CCS_TX_REQUEST:
2102 if (status == CCS_COMMAND_COMPLETE) { 2091 if (status == CCS_COMMAND_COMPLETE) {
2103 DEBUG(3, 2092 dev_dbg(&link->dev,
2104 "ray_cs interrupt tx request complete\n"); 2093 "ray_cs interrupt tx request complete\n");
2105 } else { 2094 } else {
2106 DEBUG(1, 2095 dev_dbg(&link->dev,
2107 "ray_cs interrupt tx request failed\n"); 2096 "ray_cs interrupt tx request failed\n");
2108 } 2097 }
2109 if (!sniffer) 2098 if (!sniffer)
@@ -2111,21 +2100,21 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2111 netif_wake_queue(dev); 2100 netif_wake_queue(dev);
2112 break; 2101 break;
2113 case CCS_TEST_MEMORY: 2102 case CCS_TEST_MEMORY:
2114 DEBUG(1, "ray_cs interrupt mem test done\n"); 2103 dev_dbg(&link->dev, "ray_cs interrupt mem test done\n");
2115 break; 2104 break;
2116 case CCS_SHUTDOWN: 2105 case CCS_SHUTDOWN:
2117 DEBUG(1, 2106 dev_dbg(&link->dev,
2118 "ray_cs interrupt Unexpected CCS returned - Shutdown\n"); 2107 "ray_cs interrupt Unexpected CCS returned - Shutdown\n");
2119 break; 2108 break;
2120 case CCS_DUMP_MEMORY: 2109 case CCS_DUMP_MEMORY:
2121 DEBUG(1, "ray_cs interrupt dump memory done\n"); 2110 dev_dbg(&link->dev, "ray_cs interrupt dump memory done\n");
2122 break; 2111 break;
2123 case CCS_START_TIMER: 2112 case CCS_START_TIMER:
2124 DEBUG(2, 2113 dev_dbg(&link->dev,
2125 "ray_cs interrupt DING - raylink timer expired\n"); 2114 "ray_cs interrupt DING - raylink timer expired\n");
2126 break; 2115 break;
2127 default: 2116 default:
2128 DEBUG(1, 2117 dev_dbg(&link->dev,
2129 "ray_cs interrupt Unexpected CCS 0x%x returned 0x%x\n", 2118 "ray_cs interrupt Unexpected CCS 0x%x returned 0x%x\n",
2130 rcsindex, cmd); 2119 rcsindex, cmd);
2131 } 2120 }
@@ -2139,7 +2128,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2139 ray_rx(dev, local, prcs); 2128 ray_rx(dev, local, prcs);
2140 break; 2129 break;
2141 case REJOIN_NET_COMPLETE: 2130 case REJOIN_NET_COMPLETE:
2142 DEBUG(1, "ray_cs interrupt rejoin net complete\n"); 2131 dev_dbg(&link->dev, "ray_cs interrupt rejoin net complete\n");
2143 local->card_status = CARD_ACQ_COMPLETE; 2132 local->card_status = CARD_ACQ_COMPLETE;
2144 /* do we need to clear tx buffers CCS's? */ 2133 /* do we need to clear tx buffers CCS's? */
2145 if (local->sparm.b4.a_network_type == ADHOC) { 2134 if (local->sparm.b4.a_network_type == ADHOC) {
@@ -2149,7 +2138,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2149 memcpy_fromio(&local->bss_id, 2138 memcpy_fromio(&local->bss_id,
2150 prcs->var.rejoin_net_complete. 2139 prcs->var.rejoin_net_complete.
2151 bssid, ADDRLEN); 2140 bssid, ADDRLEN);
2152 DEBUG(1, 2141 dev_dbg(&link->dev,
2153 "ray_cs new BSSID = %02x%02x%02x%02x%02x%02x\n", 2142 "ray_cs new BSSID = %02x%02x%02x%02x%02x%02x\n",
2154 local->bss_id[0], local->bss_id[1], 2143 local->bss_id[0], local->bss_id[1],
2155 local->bss_id[2], local->bss_id[3], 2144 local->bss_id[2], local->bss_id[3],
@@ -2159,15 +2148,15 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2159 } 2148 }
2160 break; 2149 break;
2161 case ROAMING_INITIATED: 2150 case ROAMING_INITIATED:
2162 DEBUG(1, "ray_cs interrupt roaming initiated\n"); 2151 dev_dbg(&link->dev, "ray_cs interrupt roaming initiated\n");
2163 netif_stop_queue(dev); 2152 netif_stop_queue(dev);
2164 local->card_status = CARD_DOING_ACQ; 2153 local->card_status = CARD_DOING_ACQ;
2165 break; 2154 break;
2166 case JAPAN_CALL_SIGN_RXD: 2155 case JAPAN_CALL_SIGN_RXD:
2167 DEBUG(1, "ray_cs interrupt japan call sign rx\n"); 2156 dev_dbg(&link->dev, "ray_cs interrupt japan call sign rx\n");
2168 break; 2157 break;
2169 default: 2158 default:
2170 DEBUG(1, 2159 dev_dbg(&link->dev,
2171 "ray_cs Unexpected interrupt for RCS 0x%x cmd = 0x%x\n", 2160 "ray_cs Unexpected interrupt for RCS 0x%x cmd = 0x%x\n",
2172 rcsindex, 2161 rcsindex,
2173 (unsigned int)readb(&prcs->interrupt_id)); 2162 (unsigned int)readb(&prcs->interrupt_id));
@@ -2186,7 +2175,7 @@ static void ray_rx(struct net_device *dev, ray_dev_t *local,
2186 int rx_len; 2175 int rx_len;
2187 unsigned int pkt_addr; 2176 unsigned int pkt_addr;
2188 void __iomem *pmsg; 2177 void __iomem *pmsg;
2189 DEBUG(4, "ray_rx process rx packet\n"); 2178 pr_debug("ray_rx process rx packet\n");
2190 2179
2191 /* Calculate address of packet within Rx buffer */ 2180 /* Calculate address of packet within Rx buffer */
2192 pkt_addr = ((readb(&prcs->var.rx_packet.rx_data_ptr[0]) << 8) 2181 pkt_addr = ((readb(&prcs->var.rx_packet.rx_data_ptr[0]) << 8)
@@ -2199,28 +2188,28 @@ static void ray_rx(struct net_device *dev, ray_dev_t *local,
2199 pmsg = local->rmem + pkt_addr; 2188 pmsg = local->rmem + pkt_addr;
2200 switch (readb(pmsg)) { 2189 switch (readb(pmsg)) {
2201 case DATA_TYPE: 2190 case DATA_TYPE:
2202 DEBUG(4, "ray_rx data type\n"); 2191 pr_debug("ray_rx data type\n");
2203 rx_data(dev, prcs, pkt_addr, rx_len); 2192 rx_data(dev, prcs, pkt_addr, rx_len);
2204 break; 2193 break;
2205 case AUTHENTIC_TYPE: 2194 case AUTHENTIC_TYPE:
2206 DEBUG(4, "ray_rx authentic type\n"); 2195 pr_debug("ray_rx authentic type\n");
2207 if (sniffer) 2196 if (sniffer)
2208 rx_data(dev, prcs, pkt_addr, rx_len); 2197 rx_data(dev, prcs, pkt_addr, rx_len);
2209 else 2198 else
2210 rx_authenticate(local, prcs, pkt_addr, rx_len); 2199 rx_authenticate(local, prcs, pkt_addr, rx_len);
2211 break; 2200 break;
2212 case DEAUTHENTIC_TYPE: 2201 case DEAUTHENTIC_TYPE:
2213 DEBUG(4, "ray_rx deauth type\n"); 2202 pr_debug("ray_rx deauth type\n");
2214 if (sniffer) 2203 if (sniffer)
2215 rx_data(dev, prcs, pkt_addr, rx_len); 2204 rx_data(dev, prcs, pkt_addr, rx_len);
2216 else 2205 else
2217 rx_deauthenticate(local, prcs, pkt_addr, rx_len); 2206 rx_deauthenticate(local, prcs, pkt_addr, rx_len);
2218 break; 2207 break;
2219 case NULL_MSG_TYPE: 2208 case NULL_MSG_TYPE:
2220 DEBUG(3, "ray_cs rx NULL msg\n"); 2209 pr_debug("ray_cs rx NULL msg\n");
2221 break; 2210 break;
2222 case BEACON_TYPE: 2211 case BEACON_TYPE:
2223 DEBUG(4, "ray_rx beacon type\n"); 2212 pr_debug("ray_rx beacon type\n");
2224 if (sniffer) 2213 if (sniffer)
2225 rx_data(dev, prcs, pkt_addr, rx_len); 2214 rx_data(dev, prcs, pkt_addr, rx_len);
2226 2215
@@ -2233,7 +2222,7 @@ static void ray_rx(struct net_device *dev, ray_dev_t *local,
2233 ray_get_stats(dev); 2222 ray_get_stats(dev);
2234 break; 2223 break;
2235 default: 2224 default:
2236 DEBUG(0, "ray_cs unknown pkt type %2x\n", 2225 pr_debug("ray_cs unknown pkt type %2x\n",
2237 (unsigned int)readb(pmsg)); 2226 (unsigned int)readb(pmsg));
2238 break; 2227 break;
2239 } 2228 }
@@ -2262,7 +2251,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2262 rx_len > 2251 rx_len >
2263 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + 2252 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
2264 FCS_LEN)) { 2253 FCS_LEN)) {
2265 DEBUG(0, 2254 pr_debug(
2266 "ray_cs invalid packet length %d received \n", 2255 "ray_cs invalid packet length %d received \n",
2267 rx_len); 2256 rx_len);
2268 return; 2257 return;
@@ -2273,17 +2262,17 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2273 rx_len > 2262 rx_len >
2274 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + 2263 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
2275 FCS_LEN)) { 2264 FCS_LEN)) {
2276 DEBUG(0, 2265 pr_debug(
2277 "ray_cs invalid packet length %d received \n", 2266 "ray_cs invalid packet length %d received \n",
2278 rx_len); 2267 rx_len);
2279 return; 2268 return;
2280 } 2269 }
2281 } 2270 }
2282 } 2271 }
2283 DEBUG(4, "ray_cs rx_data packet\n"); 2272 pr_debug("ray_cs rx_data packet\n");
2284 /* If fragmented packet, verify sizes of fragments add up */ 2273 /* If fragmented packet, verify sizes of fragments add up */
2285 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) { 2274 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) {
2286 DEBUG(1, "ray_cs rx'ed fragment\n"); 2275 pr_debug("ray_cs rx'ed fragment\n");
2287 tmp = (readb(&prcs->var.rx_packet.totalpacketlength[0]) << 8) 2276 tmp = (readb(&prcs->var.rx_packet.totalpacketlength[0]) << 8)
2288 + readb(&prcs->var.rx_packet.totalpacketlength[1]); 2277 + readb(&prcs->var.rx_packet.totalpacketlength[1]);
2289 total_len = tmp; 2278 total_len = tmp;
@@ -2301,7 +2290,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2301 } while (1); 2290 } while (1);
2302 2291
2303 if (tmp < 0) { 2292 if (tmp < 0) {
2304 DEBUG(0, 2293 pr_debug(
2305 "ray_cs rx_data fragment lengths don't add up\n"); 2294 "ray_cs rx_data fragment lengths don't add up\n");
2306 local->stats.rx_dropped++; 2295 local->stats.rx_dropped++;
2307 release_frag_chain(local, prcs); 2296 release_frag_chain(local, prcs);
@@ -2313,7 +2302,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2313 2302
2314 skb = dev_alloc_skb(total_len + 5); 2303 skb = dev_alloc_skb(total_len + 5);
2315 if (skb == NULL) { 2304 if (skb == NULL) {
2316 DEBUG(0, "ray_cs rx_data could not allocate skb\n"); 2305 pr_debug("ray_cs rx_data could not allocate skb\n");
2317 local->stats.rx_dropped++; 2306 local->stats.rx_dropped++;
2318 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) 2307 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF)
2319 release_frag_chain(local, prcs); 2308 release_frag_chain(local, prcs);
@@ -2321,7 +2310,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2321 } 2310 }
2322 skb_reserve(skb, 2); /* Align IP on 16 byte (TBD check this) */ 2311 skb_reserve(skb, 2); /* Align IP on 16 byte (TBD check this) */
2323 2312
2324 DEBUG(4, "ray_cs rx_data total_len = %x, rx_len = %x\n", total_len, 2313 pr_debug("ray_cs rx_data total_len = %x, rx_len = %x\n", total_len,
2325 rx_len); 2314 rx_len);
2326 2315
2327/************************/ 2316/************************/
@@ -2354,7 +2343,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2354 tmp = 17; 2343 tmp = 17;
2355 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) { 2344 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) {
2356 prcslink = prcs; 2345 prcslink = prcs;
2357 DEBUG(1, "ray_cs rx_data in fragment loop\n"); 2346 pr_debug("ray_cs rx_data in fragment loop\n");
2358 do { 2347 do {
2359 prcslink = rcs_base(local) 2348 prcslink = rcs_base(local)
2360 + 2349 +
@@ -2426,8 +2415,8 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2426 memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN); 2415 memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN);
2427 memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN); 2416 memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN);
2428 2417
2429#ifdef PCMCIA_DEBUG 2418#if 0
2430 if (pc_debug > 3) { 2419 if {
2431 print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ", 2420 print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ",
2432 DUMP_PREFIX_NONE, 16, 1, 2421 DUMP_PREFIX_NONE, 16, 1,
2433 skb->data, 64, true); 2422 skb->data, 64, true);
@@ -2441,7 +2430,7 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2441 2430
2442 if (psnap->dsap != 0xaa || psnap->ssap != 0xaa || psnap->ctrl != 3) { 2431 if (psnap->dsap != 0xaa || psnap->ssap != 0xaa || psnap->ctrl != 3) {
2443 /* not a snap type so leave it alone */ 2432 /* not a snap type so leave it alone */
2444 DEBUG(3, "ray_cs untranslate NOT SNAP %02x %02x %02x\n", 2433 pr_debug("ray_cs untranslate NOT SNAP %02x %02x %02x\n",
2445 psnap->dsap, psnap->ssap, psnap->ctrl); 2434 psnap->dsap, psnap->ssap, psnap->ctrl);
2446 2435
2447 delta = RX_MAC_HEADER_LENGTH - ETH_HLEN; 2436 delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
@@ -2450,7 +2439,7 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2450 } else { /* Its a SNAP */ 2439 } else { /* Its a SNAP */
2451 if (memcmp(psnap->org, org_bridge, 3) == 0) { 2440 if (memcmp(psnap->org, org_bridge, 3) == 0) {
2452 /* EtherII and nuke the LLC */ 2441 /* EtherII and nuke the LLC */
2453 DEBUG(3, "ray_cs untranslate Bridge encap\n"); 2442 pr_debug("ray_cs untranslate Bridge encap\n");
2454 delta = RX_MAC_HEADER_LENGTH 2443 delta = RX_MAC_HEADER_LENGTH
2455 + sizeof(struct snaphdr_t) - ETH_HLEN; 2444 + sizeof(struct snaphdr_t) - ETH_HLEN;
2456 peth = (struct ethhdr *)(skb->data + delta); 2445 peth = (struct ethhdr *)(skb->data + delta);
@@ -2459,14 +2448,14 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2459 switch (ntohs(type)) { 2448 switch (ntohs(type)) {
2460 case ETH_P_IPX: 2449 case ETH_P_IPX:
2461 case ETH_P_AARP: 2450 case ETH_P_AARP:
2462 DEBUG(3, "ray_cs untranslate RFC IPX/AARP\n"); 2451 pr_debug("ray_cs untranslate RFC IPX/AARP\n");
2463 delta = RX_MAC_HEADER_LENGTH - ETH_HLEN; 2452 delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
2464 peth = (struct ethhdr *)(skb->data + delta); 2453 peth = (struct ethhdr *)(skb->data + delta);
2465 peth->h_proto = 2454 peth->h_proto =
2466 htons(len - RX_MAC_HEADER_LENGTH); 2455 htons(len - RX_MAC_HEADER_LENGTH);
2467 break; 2456 break;
2468 default: 2457 default:
2469 DEBUG(3, "ray_cs untranslate RFC default\n"); 2458 pr_debug("ray_cs untranslate RFC default\n");
2470 delta = RX_MAC_HEADER_LENGTH + 2459 delta = RX_MAC_HEADER_LENGTH +
2471 sizeof(struct snaphdr_t) - ETH_HLEN; 2460 sizeof(struct snaphdr_t) - ETH_HLEN;
2472 peth = (struct ethhdr *)(skb->data + delta); 2461 peth = (struct ethhdr *)(skb->data + delta);
@@ -2482,12 +2471,12 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2482 } 2471 }
2483/* TBD reserve skb_reserve(skb, delta); */ 2472/* TBD reserve skb_reserve(skb, delta); */
2484 skb_pull(skb, delta); 2473 skb_pull(skb, delta);
2485 DEBUG(3, "untranslate after skb_pull(%d), skb->data = %p\n", delta, 2474 pr_debug("untranslate after skb_pull(%d), skb->data = %p\n", delta,
2486 skb->data); 2475 skb->data);
2487 memcpy(peth->h_dest, destaddr, ADDRLEN); 2476 memcpy(peth->h_dest, destaddr, ADDRLEN);
2488 memcpy(peth->h_source, srcaddr, ADDRLEN); 2477 memcpy(peth->h_source, srcaddr, ADDRLEN);
2489#ifdef PCMCIA_DEBUG 2478#if 0
2490 if (pc_debug > 3) { 2479 {
2491 int i; 2480 int i;
2492 printk(KERN_DEBUG "skb->data after untranslate:"); 2481 printk(KERN_DEBUG "skb->data after untranslate:");
2493 for (i = 0; i < 64; i++) 2482 for (i = 0; i < 64; i++)
@@ -2529,7 +2518,7 @@ static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs)
2529 while (tmp--) { 2518 while (tmp--) {
2530 writeb(CCS_BUFFER_FREE, &prcslink->buffer_status); 2519 writeb(CCS_BUFFER_FREE, &prcslink->buffer_status);
2531 if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) { 2520 if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) {
2532 DEBUG(1, "ray_cs interrupt bad rcsindex = 0x%x\n", 2521 pr_debug("ray_cs interrupt bad rcsindex = 0x%x\n",
2533 rcsindex); 2522 rcsindex);
2534 break; 2523 break;
2535 } 2524 }
@@ -2543,9 +2532,9 @@ static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs)
2543static void authenticate(ray_dev_t *local) 2532static void authenticate(ray_dev_t *local)
2544{ 2533{
2545 struct pcmcia_device *link = local->finder; 2534 struct pcmcia_device *link = local->finder;
2546 DEBUG(0, "ray_cs Starting authentication.\n"); 2535 dev_dbg(&link->dev, "ray_cs Starting authentication.\n");
2547 if (!(pcmcia_dev_present(link))) { 2536 if (!(pcmcia_dev_present(link))) {
2548 DEBUG(2, "ray_cs authenticate - device not present\n"); 2537 dev_dbg(&link->dev, "ray_cs authenticate - device not present\n");
2549 return; 2538 return;
2550 } 2539 }
2551 2540
@@ -2573,11 +2562,11 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
2573 copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff); 2562 copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff);
2574 /* if we are trying to get authenticated */ 2563 /* if we are trying to get authenticated */
2575 if (local->sparm.b4.a_network_type == ADHOC) { 2564 if (local->sparm.b4.a_network_type == ADHOC) {
2576 DEBUG(1, "ray_cs rx_auth var= %02x %02x %02x %02x %02x %02x\n", 2565 pr_debug("ray_cs rx_auth var= %02x %02x %02x %02x %02x %02x\n",
2577 msg->var[0], msg->var[1], msg->var[2], msg->var[3], 2566 msg->var[0], msg->var[1], msg->var[2], msg->var[3],
2578 msg->var[4], msg->var[5]); 2567 msg->var[4], msg->var[5]);
2579 if (msg->var[2] == 1) { 2568 if (msg->var[2] == 1) {
2580 DEBUG(0, "ray_cs Sending authentication response.\n"); 2569 pr_debug("ray_cs Sending authentication response.\n");
2581 if (!build_auth_frame 2570 if (!build_auth_frame
2582 (local, msg->mac.addr_2, OPEN_AUTH_RESPONSE)) { 2571 (local, msg->mac.addr_2, OPEN_AUTH_RESPONSE)) {
2583 local->authentication_state = NEED_TO_AUTH; 2572 local->authentication_state = NEED_TO_AUTH;
@@ -2591,13 +2580,13 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
2591 /* Verify authentication sequence #2 and success */ 2580 /* Verify authentication sequence #2 and success */
2592 if (msg->var[2] == 2) { 2581 if (msg->var[2] == 2) {
2593 if ((msg->var[3] | msg->var[4]) == 0) { 2582 if ((msg->var[3] | msg->var[4]) == 0) {
2594 DEBUG(1, "Authentication successful\n"); 2583 pr_debug("Authentication successful\n");
2595 local->card_status = CARD_AUTH_COMPLETE; 2584 local->card_status = CARD_AUTH_COMPLETE;
2596 associate(local); 2585 associate(local);
2597 local->authentication_state = 2586 local->authentication_state =
2598 AUTHENTICATED; 2587 AUTHENTICATED;
2599 } else { 2588 } else {
2600 DEBUG(0, "Authentication refused\n"); 2589 pr_debug("Authentication refused\n");
2601 local->card_status = CARD_AUTH_REFUSED; 2590 local->card_status = CARD_AUTH_REFUSED;
2602 join_net((u_long) local); 2591 join_net((u_long) local);
2603 local->authentication_state = 2592 local->authentication_state =
@@ -2617,22 +2606,22 @@ static void associate(ray_dev_t *local)
2617 struct net_device *dev = link->priv; 2606 struct net_device *dev = link->priv;
2618 int ccsindex; 2607 int ccsindex;
2619 if (!(pcmcia_dev_present(link))) { 2608 if (!(pcmcia_dev_present(link))) {
2620 DEBUG(2, "ray_cs associate - device not present\n"); 2609 dev_dbg(&link->dev, "ray_cs associate - device not present\n");
2621 return; 2610 return;
2622 } 2611 }
2623 /* If no tx buffers available, return */ 2612 /* If no tx buffers available, return */
2624 if ((ccsindex = get_free_ccs(local)) < 0) { 2613 if ((ccsindex = get_free_ccs(local)) < 0) {
2625/* TBD should never be here but... what if we are? */ 2614/* TBD should never be here but... what if we are? */
2626 DEBUG(1, "ray_cs associate - No free ccs\n"); 2615 dev_dbg(&link->dev, "ray_cs associate - No free ccs\n");
2627 return; 2616 return;
2628 } 2617 }
2629 DEBUG(1, "ray_cs Starting association with access point\n"); 2618 dev_dbg(&link->dev, "ray_cs Starting association with access point\n");
2630 pccs = ccs_base(local) + ccsindex; 2619 pccs = ccs_base(local) + ccsindex;
2631 /* fill in the CCS */ 2620 /* fill in the CCS */
2632 writeb(CCS_START_ASSOCIATION, &pccs->cmd); 2621 writeb(CCS_START_ASSOCIATION, &pccs->cmd);
2633 /* Interrupt the firmware to process the command */ 2622 /* Interrupt the firmware to process the command */
2634 if (interrupt_ecf(local, ccsindex)) { 2623 if (interrupt_ecf(local, ccsindex)) {
2635 DEBUG(1, "ray_cs associate failed - ECF not ready for intr\n"); 2624 dev_dbg(&link->dev, "ray_cs associate failed - ECF not ready for intr\n");
2636 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 2625 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
2637 2626
2638 del_timer(&local->timer); 2627 del_timer(&local->timer);
@@ -2655,7 +2644,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
2655/* UCHAR buff[256]; 2644/* UCHAR buff[256];
2656 struct rx_msg *msg = (struct rx_msg *)buff; 2645 struct rx_msg *msg = (struct rx_msg *)buff;
2657*/ 2646*/
2658 DEBUG(0, "Deauthentication frame received\n"); 2647 pr_debug("Deauthentication frame received\n");
2659 local->authentication_state = UNAUTHENTICATED; 2648 local->authentication_state = UNAUTHENTICATED;
2660 /* Need to reauthenticate or rejoin depending on reason code */ 2649 /* Need to reauthenticate or rejoin depending on reason code */
2661/* copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff); 2650/* copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff);
@@ -2823,7 +2812,7 @@ static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type)
2823 2812
2824 /* If no tx buffers available, return */ 2813 /* If no tx buffers available, return */
2825 if ((ccsindex = get_free_tx_ccs(local)) < 0) { 2814 if ((ccsindex = get_free_tx_ccs(local)) < 0) {
2826 DEBUG(1, "ray_cs send authenticate - No free tx ccs\n"); 2815 pr_debug("ray_cs send authenticate - No free tx ccs\n");
2827 return -1; 2816 return -1;
2828 } 2817 }
2829 2818
@@ -2855,7 +2844,7 @@ static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type)
2855 2844
2856 /* Interrupt the firmware to process the command */ 2845 /* Interrupt the firmware to process the command */
2857 if (interrupt_ecf(local, ccsindex)) { 2846 if (interrupt_ecf(local, ccsindex)) {
2858 DEBUG(1, 2847 pr_debug(
2859 "ray_cs send authentication request failed - ECF not ready for intr\n"); 2848 "ray_cs send authentication request failed - ECF not ready for intr\n");
2860 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 2849 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
2861 return -1; 2850 return -1;
@@ -2942,9 +2931,9 @@ static int __init init_ray_cs(void)
2942{ 2931{
2943 int rc; 2932 int rc;
2944 2933
2945 DEBUG(1, "%s\n", rcsid); 2934 pr_debug("%s\n", rcsid);
2946 rc = pcmcia_register_driver(&ray_driver); 2935 rc = pcmcia_register_driver(&ray_driver);
2947 DEBUG(1, "raylink init_module register_pcmcia_driver returns 0x%x\n", 2936 pr_debug("raylink init_module register_pcmcia_driver returns 0x%x\n",
2948 rc); 2937 rc);
2949 2938
2950#ifdef CONFIG_PROC_FS 2939#ifdef CONFIG_PROC_FS
@@ -2964,7 +2953,7 @@ static int __init init_ray_cs(void)
2964 2953
2965static void __exit exit_ray_cs(void) 2954static void __exit exit_ray_cs(void)
2966{ 2955{
2967 DEBUG(0, "ray_cs: cleanup_module\n"); 2956 pr_debug("ray_cs: cleanup_module\n");
2968 2957
2969#ifdef CONFIG_PROC_FS 2958#ifdef CONFIG_PROC_FS
2970 remove_proc_entry("driver/ray_cs/ray_cs", NULL); 2959 remove_proc_entry("driver/ray_cs/ray_cs", NULL);
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 891bdab49887..7b9621de239f 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -67,23 +67,7 @@
67/* For rough constant delay */ 67/* For rough constant delay */
68#define WL3501_NOPLOOP(n) { int x = 0; while (x++ < n) slow_down_io(); } 68#define WL3501_NOPLOOP(n) { int x = 0; while (x++ < n) slow_down_io(); }
69 69
70/* 70
71 * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If you do not
72 * define PCMCIA_DEBUG at all, all the debug code will be left out. If you
73 * compile with PCMCIA_DEBUG=0, the debug code will be present but disabled --
74 * but it can then be enabled for specific modules at load time with a
75 * 'pc_debug=#' option to insmod.
76 */
77#define PCMCIA_DEBUG 0
78#ifdef PCMCIA_DEBUG
79static int pc_debug = PCMCIA_DEBUG;
80module_param(pc_debug, int, 0);
81#define dprintk(n, format, args...) \
82 { if (pc_debug > (n)) \
83 printk(KERN_INFO "%s: " format "\n", __func__ , ##args); }
84#else
85#define dprintk(n, format, args...)
86#endif
87 71
88#define wl3501_outb(a, b) { outb(a, b); slow_down_io(); } 72#define wl3501_outb(a, b) { outb(a, b); slow_down_io(); }
89#define wl3501_outb_p(a, b) { outb_p(a, b); slow_down_io(); } 73#define wl3501_outb_p(a, b) { outb_p(a, b); slow_down_io(); }
@@ -684,10 +668,10 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
684 int matchflag = 0; 668 int matchflag = 0;
685 struct wl3501_scan_confirm sig; 669 struct wl3501_scan_confirm sig;
686 670
687 dprintk(3, "entry"); 671 pr_debug("entry");
688 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 672 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
689 if (sig.status == WL3501_STATUS_SUCCESS) { 673 if (sig.status == WL3501_STATUS_SUCCESS) {
690 dprintk(3, "success"); 674 pr_debug("success");
691 if ((this->net_type == IW_MODE_INFRA && 675 if ((this->net_type == IW_MODE_INFRA &&
692 (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || 676 (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
693 (this->net_type == IW_MODE_ADHOC && 677 (this->net_type == IW_MODE_ADHOC &&
@@ -722,7 +706,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
722 } 706 }
723 } 707 }
724 } else if (sig.status == WL3501_STATUS_TIMEOUT) { 708 } else if (sig.status == WL3501_STATUS_TIMEOUT) {
725 dprintk(3, "timeout"); 709 pr_debug("timeout");
726 this->join_sta_bss = 0; 710 this->join_sta_bss = 0;
727 for (i = this->join_sta_bss; i < this->bss_cnt; i++) 711 for (i = this->join_sta_bss; i < this->bss_cnt; i++)
728 if (!wl3501_mgmt_join(this, i)) 712 if (!wl3501_mgmt_join(this, i))
@@ -879,7 +863,7 @@ static int wl3501_mgmt_auth(struct wl3501_card *this)
879 .timeout = 1000, 863 .timeout = 1000,
880 }; 864 };
881 865
882 dprintk(3, "entry"); 866 pr_debug("entry");
883 memcpy(sig.mac_addr, this->bssid, ETH_ALEN); 867 memcpy(sig.mac_addr, this->bssid, ETH_ALEN);
884 return wl3501_esbq_exec(this, &sig, sizeof(sig)); 868 return wl3501_esbq_exec(this, &sig, sizeof(sig));
885} 869}
@@ -893,7 +877,7 @@ static int wl3501_mgmt_association(struct wl3501_card *this)
893 .cap_info = this->cap_info, 877 .cap_info = this->cap_info,
894 }; 878 };
895 879
896 dprintk(3, "entry"); 880 pr_debug("entry");
897 memcpy(sig.mac_addr, this->bssid, ETH_ALEN); 881 memcpy(sig.mac_addr, this->bssid, ETH_ALEN);
898 return wl3501_esbq_exec(this, &sig, sizeof(sig)); 882 return wl3501_esbq_exec(this, &sig, sizeof(sig));
899} 883}
@@ -903,7 +887,7 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
903 struct wl3501_card *this = netdev_priv(dev); 887 struct wl3501_card *this = netdev_priv(dev);
904 struct wl3501_join_confirm sig; 888 struct wl3501_join_confirm sig;
905 889
906 dprintk(3, "entry"); 890 pr_debug("entry");
907 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 891 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
908 if (sig.status == WL3501_STATUS_SUCCESS) { 892 if (sig.status == WL3501_STATUS_SUCCESS) {
909 if (this->net_type == IW_MODE_INFRA) { 893 if (this->net_type == IW_MODE_INFRA) {
@@ -962,7 +946,7 @@ static inline void wl3501_md_confirm_interrupt(struct net_device *dev,
962{ 946{
963 struct wl3501_md_confirm sig; 947 struct wl3501_md_confirm sig;
964 948
965 dprintk(3, "entry"); 949 pr_debug("entry");
966 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 950 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
967 wl3501_free_tx_buffer(this, sig.data); 951 wl3501_free_tx_buffer(this, sig.data);
968 if (netif_queue_stopped(dev)) 952 if (netif_queue_stopped(dev))
@@ -1017,7 +1001,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
1017static inline void wl3501_get_confirm_interrupt(struct wl3501_card *this, 1001static inline void wl3501_get_confirm_interrupt(struct wl3501_card *this,
1018 u16 addr, void *sig, int size) 1002 u16 addr, void *sig, int size)
1019{ 1003{
1020 dprintk(3, "entry"); 1004 pr_debug("entry");
1021 wl3501_get_from_wla(this, addr, &this->sig_get_confirm, 1005 wl3501_get_from_wla(this, addr, &this->sig_get_confirm,
1022 sizeof(this->sig_get_confirm)); 1006 sizeof(this->sig_get_confirm));
1023 wake_up(&this->wait); 1007 wake_up(&this->wait);
@@ -1029,7 +1013,7 @@ static inline void wl3501_start_confirm_interrupt(struct net_device *dev,
1029{ 1013{
1030 struct wl3501_start_confirm sig; 1014 struct wl3501_start_confirm sig;
1031 1015
1032 dprintk(3, "entry"); 1016 pr_debug("entry");
1033 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 1017 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
1034 if (sig.status == WL3501_STATUS_SUCCESS) 1018 if (sig.status == WL3501_STATUS_SUCCESS)
1035 netif_wake_queue(dev); 1019 netif_wake_queue(dev);
@@ -1041,7 +1025,7 @@ static inline void wl3501_assoc_confirm_interrupt(struct net_device *dev,
1041 struct wl3501_card *this = netdev_priv(dev); 1025 struct wl3501_card *this = netdev_priv(dev);
1042 struct wl3501_assoc_confirm sig; 1026 struct wl3501_assoc_confirm sig;
1043 1027
1044 dprintk(3, "entry"); 1028 pr_debug("entry");
1045 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 1029 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
1046 1030
1047 if (sig.status == WL3501_STATUS_SUCCESS) 1031 if (sig.status == WL3501_STATUS_SUCCESS)
@@ -1053,7 +1037,7 @@ static inline void wl3501_auth_confirm_interrupt(struct wl3501_card *this,
1053{ 1037{
1054 struct wl3501_auth_confirm sig; 1038 struct wl3501_auth_confirm sig;
1055 1039
1056 dprintk(3, "entry"); 1040 pr_debug("entry");
1057 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 1041 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
1058 1042
1059 if (sig.status == WL3501_STATUS_SUCCESS) 1043 if (sig.status == WL3501_STATUS_SUCCESS)
@@ -1069,7 +1053,7 @@ static inline void wl3501_rx_interrupt(struct net_device *dev)
1069 u8 sig_id; 1053 u8 sig_id;
1070 struct wl3501_card *this = netdev_priv(dev); 1054 struct wl3501_card *this = netdev_priv(dev);
1071 1055
1072 dprintk(3, "entry"); 1056 pr_debug("entry");
1073loop: 1057loop:
1074 morepkts = 0; 1058 morepkts = 0;
1075 if (!wl3501_esbq_confirm(this)) 1059 if (!wl3501_esbq_confirm(this))
@@ -1302,7 +1286,7 @@ static int wl3501_reset(struct net_device *dev)
1302 wl3501_ack_interrupt(this); 1286 wl3501_ack_interrupt(this);
1303 wl3501_unblock_interrupt(this); 1287 wl3501_unblock_interrupt(this);
1304 wl3501_mgmt_scan(this, 100); 1288 wl3501_mgmt_scan(this, 100);
1305 dprintk(1, "%s: device reset", dev->name); 1289 pr_debug("%s: device reset", dev->name);
1306 rc = 0; 1290 rc = 0;
1307out: 1291out:
1308 return rc; 1292 return rc;
@@ -1376,7 +1360,7 @@ static int wl3501_open(struct net_device *dev)
1376 link->open++; 1360 link->open++;
1377 1361
1378 /* Initial WL3501 firmware */ 1362 /* Initial WL3501 firmware */
1379 dprintk(1, "%s: Initialize WL3501 firmware...", dev->name); 1363 pr_debug("%s: Initialize WL3501 firmware...", dev->name);
1380 if (wl3501_init_firmware(this)) 1364 if (wl3501_init_firmware(this))
1381 goto fail; 1365 goto fail;
1382 /* Initial device variables */ 1366 /* Initial device variables */
@@ -1388,7 +1372,7 @@ static int wl3501_open(struct net_device *dev)
1388 wl3501_unblock_interrupt(this); 1372 wl3501_unblock_interrupt(this);
1389 wl3501_mgmt_scan(this, 100); 1373 wl3501_mgmt_scan(this, 100);
1390 rc = 0; 1374 rc = 0;
1391 dprintk(1, "%s: WL3501 opened", dev->name); 1375 pr_debug("%s: WL3501 opened", dev->name);
1392 printk(KERN_INFO "%s: Card Name: %s\n" 1376 printk(KERN_INFO "%s: Card Name: %s\n"
1393 "%s: Firmware Date: %s\n", 1377 "%s: Firmware Date: %s\n",
1394 dev->name, this->card_name, 1378 dev->name, this->card_name,
@@ -1914,8 +1898,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1914 p_dev->io.IOAddrLines = 5; 1898 p_dev->io.IOAddrLines = 5;
1915 1899
1916 /* Interrupt setup */ 1900 /* Interrupt setup */
1917 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 1901 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
1918 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
1919 p_dev->irq.Handler = wl3501_interrupt; 1902 p_dev->irq.Handler = wl3501_interrupt;
1920 1903
1921 /* General socket configuration */ 1904 /* General socket configuration */
@@ -1938,16 +1921,13 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1938 dev->wireless_handlers = &wl3501_handler_def; 1921 dev->wireless_handlers = &wl3501_handler_def;
1939 SET_ETHTOOL_OPS(dev, &ops); 1922 SET_ETHTOOL_OPS(dev, &ops);
1940 netif_stop_queue(dev); 1923 netif_stop_queue(dev);
1941 p_dev->priv = p_dev->irq.Instance = dev; 1924 p_dev->priv = dev;
1942 1925
1943 return wl3501_config(p_dev); 1926 return wl3501_config(p_dev);
1944out_link: 1927out_link:
1945 return -ENOMEM; 1928 return -ENOMEM;
1946} 1929}
1947 1930
1948#define CS_CHECK(fn, ret) \
1949do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
1950
1951/** 1931/**
1952 * wl3501_config - configure the PCMCIA socket and make eth device available 1932 * wl3501_config - configure the PCMCIA socket and make eth device available
1953 * @link - FILL_IN 1933 * @link - FILL_IN
@@ -1959,7 +1939,7 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
1959static int wl3501_config(struct pcmcia_device *link) 1939static int wl3501_config(struct pcmcia_device *link)
1960{ 1940{
1961 struct net_device *dev = link->priv; 1941 struct net_device *dev = link->priv;
1962 int i = 0, j, last_fn, last_ret; 1942 int i = 0, j, ret;
1963 struct wl3501_card *this; 1943 struct wl3501_card *this;
1964 1944
1965 /* Try allocating IO ports. This tries a few fixed addresses. If you 1945 /* Try allocating IO ports. This tries a few fixed addresses. If you
@@ -1975,24 +1955,26 @@ static int wl3501_config(struct pcmcia_device *link)
1975 if (i == 0) 1955 if (i == 0)
1976 break; 1956 break;
1977 } 1957 }
1978 if (i != 0) { 1958 if (i != 0)
1979 cs_error(link, RequestIO, i);
1980 goto failed; 1959 goto failed;
1981 }
1982 1960
1983 /* Now allocate an interrupt line. Note that this does not actually 1961 /* Now allocate an interrupt line. Note that this does not actually
1984 * assign a handler to the interrupt. */ 1962 * assign a handler to the interrupt. */
1985 1963
1986 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 1964 ret = pcmcia_request_irq(link, &link->irq);
1965 if (ret)
1966 goto failed;
1987 1967
1988 /* This actually configures the PCMCIA socket -- setting up the I/O 1968 /* This actually configures the PCMCIA socket -- setting up the I/O
1989 * windows and the interrupt mapping. */ 1969 * windows and the interrupt mapping. */
1990 1970
1991 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 1971 ret = pcmcia_request_configuration(link, &link->conf);
1972 if (ret)
1973 goto failed;
1992 1974
1993 dev->irq = link->irq.AssignedIRQ; 1975 dev->irq = link->irq.AssignedIRQ;
1994 dev->base_addr = link->io.BasePort1; 1976 dev->base_addr = link->io.BasePort1;
1995 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 1977 SET_NETDEV_DEV(dev, &link->dev);
1996 if (register_netdev(dev)) { 1978 if (register_netdev(dev)) {
1997 printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n"); 1979 printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n");
1998 goto failed; 1980 goto failed;
@@ -2041,8 +2023,6 @@ static int wl3501_config(struct pcmcia_device *link)
2041 netif_start_queue(dev); 2023 netif_start_queue(dev);
2042 return 0; 2024 return 0;
2043 2025
2044cs_failed:
2045 cs_error(link, last_fn, last_ret);
2046failed: 2026failed:
2047 wl3501_release(link); 2027 wl3501_release(link);
2048 return -ENODEV; 2028 return -ENODEV;
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 8fdfa4f537a6..7dd370fa3439 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -67,14 +67,6 @@ MODULE_LICENSE("Dual MPL/GPL");
67 67
68INT_MODULE_PARM(epp_mode, 1); 68INT_MODULE_PARM(epp_mode, 1);
69 69
70#ifdef PCMCIA_DEBUG
71INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
72#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
73static char *version =
74"parport_cs.c 1.29 2002/10/11 06:57:41 (David Hinds)";
75#else
76#define DEBUG(n, args...)
77#endif
78 70
79/*====================================================================*/ 71/*====================================================================*/
80 72
@@ -103,7 +95,7 @@ static int parport_probe(struct pcmcia_device *link)
103{ 95{
104 parport_info_t *info; 96 parport_info_t *info;
105 97
106 DEBUG(0, "parport_attach()\n"); 98 dev_dbg(&link->dev, "parport_attach()\n");
107 99
108 /* Create new parport device */ 100 /* Create new parport device */
109 info = kzalloc(sizeof(*info), GFP_KERNEL); 101 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -114,7 +106,6 @@ static int parport_probe(struct pcmcia_device *link)
114 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 106 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
115 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 107 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
116 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 108 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
117 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
118 link->conf.Attributes = CONF_ENABLE_IRQ; 109 link->conf.Attributes = CONF_ENABLE_IRQ;
119 link->conf.IntType = INT_MEMORY_AND_IO; 110 link->conf.IntType = INT_MEMORY_AND_IO;
120 111
@@ -132,7 +123,7 @@ static int parport_probe(struct pcmcia_device *link)
132 123
133static void parport_detach(struct pcmcia_device *link) 124static void parport_detach(struct pcmcia_device *link)
134{ 125{
135 DEBUG(0, "parport_detach(0x%p)\n", link); 126 dev_dbg(&link->dev, "parport_detach\n");
136 127
137 parport_cs_release(link); 128 parport_cs_release(link);
138 129
@@ -147,9 +138,6 @@ static void parport_detach(struct pcmcia_device *link)
147 138
148======================================================================*/ 139======================================================================*/
149 140
150#define CS_CHECK(fn, ret) \
151do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
152
153static int parport_config_check(struct pcmcia_device *p_dev, 141static int parport_config_check(struct pcmcia_device *p_dev,
154 cistpl_cftable_entry_t *cfg, 142 cistpl_cftable_entry_t *cfg,
155 cistpl_cftable_entry_t *dflt, 143 cistpl_cftable_entry_t *dflt,
@@ -178,18 +166,20 @@ static int parport_config(struct pcmcia_device *link)
178{ 166{
179 parport_info_t *info = link->priv; 167 parport_info_t *info = link->priv;
180 struct parport *p; 168 struct parport *p;
181 int last_ret, last_fn; 169 int ret;
182 170
183 DEBUG(0, "parport_config(0x%p)\n", link); 171 dev_dbg(&link->dev, "parport_config\n");
184 172
185 last_ret = pcmcia_loop_config(link, parport_config_check, NULL); 173 ret = pcmcia_loop_config(link, parport_config_check, NULL);
186 if (last_ret) { 174 if (ret)
187 cs_error(link, RequestIO, last_ret);
188 goto failed; 175 goto failed;
189 }
190 176
191 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 177 ret = pcmcia_request_irq(link, &link->irq);
192 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 178 if (ret)
179 goto failed;
180 ret = pcmcia_request_configuration(link, &link->conf);
181 if (ret)
182 goto failed;
193 183
194 p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2, 184 p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2,
195 link->irq.AssignedIRQ, PARPORT_DMA_NONE, 185 link->irq.AssignedIRQ, PARPORT_DMA_NONE,
@@ -213,8 +203,6 @@ static int parport_config(struct pcmcia_device *link)
213 203
214 return 0; 204 return 0;
215 205
216cs_failed:
217 cs_error(link, last_fn, last_ret);
218failed: 206failed:
219 parport_cs_release(link); 207 parport_cs_release(link);
220 return -ENODEV; 208 return -ENODEV;
@@ -232,7 +220,7 @@ static void parport_cs_release(struct pcmcia_device *link)
232{ 220{
233 parport_info_t *info = link->priv; 221 parport_info_t *info = link->priv;
234 222
235 DEBUG(0, "parport_release(0x%p)\n", link); 223 dev_dbg(&link->dev, "parport_release\n");
236 224
237 if (info->ndev) { 225 if (info->ndev) {
238 struct parport *p = info->port; 226 struct parport *p = info->port;
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index b952ebc7a78b..416f6ac65b76 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -645,10 +645,13 @@ void __init detect_intel_iommu(void)
645 "x2apic and Intr-remapping.\n"); 645 "x2apic and Intr-remapping.\n");
646#endif 646#endif
647#ifdef CONFIG_DMAR 647#ifdef CONFIG_DMAR
648 if (ret && !no_iommu && !iommu_detected && !swiotlb && 648 if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
649 !dmar_disabled)
650 iommu_detected = 1; 649 iommu_detected = 1;
651#endif 650#endif
651#ifdef CONFIG_X86
652 if (ret)
653 x86_init.iommu.iommu_init = intel_iommu_init;
654#endif
652 } 655 }
653 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); 656 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
654 dmar_tbl = NULL; 657 dmar_tbl = NULL;
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 1840a0578a42..9261327b49f3 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -3266,7 +3266,7 @@ int __init intel_iommu_init(void)
3266 * Check the need for DMA-remapping initialization now. 3266 * Check the need for DMA-remapping initialization now.
3267 * Above initialization will also be used by Interrupt-remapping. 3267 * Above initialization will also be used by Interrupt-remapping.
3268 */ 3268 */
3269 if (no_iommu || swiotlb || dmar_disabled) 3269 if (no_iommu || dmar_disabled)
3270 return -ENODEV; 3270 return -ENODEV;
3271 3271
3272 iommu_init_mempool(); 3272 iommu_init_mempool();
@@ -3287,7 +3287,9 @@ int __init intel_iommu_init(void)
3287 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); 3287 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3288 3288
3289 init_timer(&unmap_timer); 3289 init_timer(&unmap_timer);
3290 force_iommu = 1; 3290#ifdef CONFIG_SWIOTLB
3291 swiotlb = 0;
3292#endif
3291 dma_ops = &intel_dma_ops; 3293 dma_ops = &intel_dma_ops;
3292 3294
3293 init_iommu_sysfs(); 3295 init_iommu_sysfs();
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 17f38a781d47..f3ccbccf5f21 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -17,24 +17,6 @@ menuconfig PCCARD
17 17
18if PCCARD 18if PCCARD
19 19
20config PCMCIA_DEBUG
21 bool "Enable PCCARD debugging"
22 help
23 Say Y here to enable PCMCIA subsystem debugging. You
24 will need to choose the debugging level either via the
25 kernel command line, or module options depending whether
26 you build the PCMCIA as modules.
27
28 The kernel command line options are:
29 pcmcia_core.pc_debug=N
30 pcmcia.pc_debug=N
31 sa11xx_core.pc_debug=N
32
33 The module option is called pc_debug=N
34
35 In all the above examples, N is the debugging verbosity
36 level.
37
38config PCMCIA 20config PCMCIA
39 tristate "16-bit PCMCIA support" 21 tristate "16-bit PCMCIA support"
40 select CRC32 22 select CRC32
@@ -196,9 +178,13 @@ config PCMCIA_BCM63XX
196 tristate "bcm63xx pcmcia support" 178 tristate "bcm63xx pcmcia support"
197 depends on BCM63XX && PCMCIA 179 depends on BCM63XX && PCMCIA
198 180
181config PCMCIA_SOC_COMMON
182 bool
183
199config PCMCIA_SA1100 184config PCMCIA_SA1100
200 tristate "SA1100 support" 185 tristate "SA1100 support"
201 depends on ARM && ARCH_SA1100 && PCMCIA 186 depends on ARM && ARCH_SA1100 && PCMCIA
187 select PCMCIA_SOC_COMMON
202 help 188 help
203 Say Y here to include support for SA11x0-based PCMCIA or CF 189 Say Y here to include support for SA11x0-based PCMCIA or CF
204 sockets, found on HP iPAQs, Yopy, and other StrongARM(R)/ 190 sockets, found on HP iPAQs, Yopy, and other StrongARM(R)/
@@ -209,6 +195,7 @@ config PCMCIA_SA1100
209config PCMCIA_SA1111 195config PCMCIA_SA1111
210 tristate "SA1111 support" 196 tristate "SA1111 support"
211 depends on ARM && ARCH_SA1100 && SA1111 && PCMCIA 197 depends on ARM && ARCH_SA1100 && SA1111 && PCMCIA
198 select PCMCIA_SOC_COMMON
212 help 199 help
213 Say Y here to include support for SA1111-based PCMCIA or CF 200 Say Y here to include support for SA1111-based PCMCIA or CF
214 sockets, found on the Jornada 720, Graphicsmaster and other 201 sockets, found on the Jornada 720, Graphicsmaster and other
@@ -222,9 +209,28 @@ config PCMCIA_PXA2XX
222 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ 209 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
223 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ 210 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
224 || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2) 211 || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2)
212 select PCMCIA_SOC_COMMON
225 help 213 help
226 Say Y here to include support for the PXA2xx PCMCIA controller 214 Say Y here to include support for the PXA2xx PCMCIA controller
227 215
216config PCMCIA_DEBUG
217 bool "Enable debugging"
218 depends on (PCMCIA_SA1111 || PCMCIA_SA1100 || PCMCIA_PXA2XX)
219 help
220 Say Y here to enable debugging for the SoC PCMCIA layer.
221 You will need to choose the debugging level either via the
222 kernel command line, or module options depending whether
223 you build the drivers as modules.
224
225 The kernel command line options are:
226 sa11xx_core.pc_debug=N
227 pxa2xx_core.pc_debug=N
228
229 The module option is called pc_debug=N
230
231 In all the above examples, N is the debugging verbosity
232 level.
233
228config PCMCIA_PROBE 234config PCMCIA_PROBE
229 bool 235 bool
230 default y if ISA && !ARCH_SA1100 && !ARCH_CLPS711X && !PARISC 236 default y if ISA && !ARCH_SA1100 && !ARCH_CLPS711X && !PARISC
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index a03a38acd77d..382938313991 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -22,8 +22,9 @@ obj-$(CONFIG_I82365) += i82365.o
22obj-$(CONFIG_I82092) += i82092.o 22obj-$(CONFIG_I82092) += i82092.o
23obj-$(CONFIG_TCIC) += tcic.o 23obj-$(CONFIG_TCIC) += tcic.o
24obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o 24obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o
25obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_core.o sa1100_cs.o 25obj-$(CONFIG_PCMCIA_SOC_COMMON) += soc_common.o
26obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o 26obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_base.o sa1100_cs.o
27obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_base.o sa1111_cs.o
27obj-$(CONFIG_M32R_PCC) += m32r_pcc.o 28obj-$(CONFIG_M32R_PCC) += m32r_pcc.o
28obj-$(CONFIG_M32R_CFC) += m32r_cfc.o 29obj-$(CONFIG_M32R_CFC) += m32r_cfc.o
29obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o 30obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
@@ -35,9 +36,6 @@ obj-$(CONFIG_BFIN_CFPCMCIA) += bfin_cf_pcmcia.o
35obj-$(CONFIG_AT91_CF) += at91_cf.o 36obj-$(CONFIG_AT91_CF) += at91_cf.o
36obj-$(CONFIG_ELECTRA_CF) += electra_cf.o 37obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
37 38
38sa11xx_core-y += soc_common.o sa11xx_base.o
39pxa2xx_core-y += soc_common.o pxa2xx_base.o
40
41au1x00_ss-y += au1000_generic.o 39au1x00_ss-y += au1000_generic.o
42au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o 40au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o
43au1x00_ss-$(CONFIG_MIPS_PB1100) += au1000_pb1x00.o 41au1x00_ss-$(CONFIG_MIPS_PB1100) += au1000_pb1x00.o
@@ -77,4 +75,4 @@ pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
77pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o 75pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
78pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o 76pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
79 77
80obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_core.o $(pxa2xx-obj-y) 78obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index db77e1f3309a..4cd70d056810 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -91,7 +91,7 @@ static u_int xlate_rom_addr(void __iomem *b, u_int addr)
91static void cb_release_cis_mem(struct pcmcia_socket * s) 91static void cb_release_cis_mem(struct pcmcia_socket * s)
92{ 92{
93 if (s->cb_cis_virt) { 93 if (s->cb_cis_virt) {
94 cs_dbg(s, 1, "cb_release_cis_mem()\n"); 94 dev_dbg(&s->dev, "cb_release_cis_mem()\n");
95 iounmap(s->cb_cis_virt); 95 iounmap(s->cb_cis_virt);
96 s->cb_cis_virt = NULL; 96 s->cb_cis_virt = NULL;
97 s->cb_cis_res = NULL; 97 s->cb_cis_res = NULL;
@@ -132,7 +132,7 @@ int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void
132 struct pci_dev *dev; 132 struct pci_dev *dev;
133 struct resource *res; 133 struct resource *res;
134 134
135 cs_dbg(s, 3, "read_cb_mem(%d, %#x, %u)\n", space, addr, len); 135 dev_dbg(&s->dev, "read_cb_mem(%d, %#x, %u)\n", space, addr, len);
136 136
137 dev = pci_get_slot(s->cb_dev->subordinate, 0); 137 dev = pci_get_slot(s->cb_dev->subordinate, 0);
138 if (!dev) 138 if (!dev)
diff --git a/drivers/pcmcia/cirrus.h b/drivers/pcmcia/cirrus.h
index ecd4fc7f666f..446a4576e73e 100644
--- a/drivers/pcmcia/cirrus.h
+++ b/drivers/pcmcia/cirrus.h
@@ -30,16 +30,6 @@
30#ifndef _LINUX_CIRRUS_H 30#ifndef _LINUX_CIRRUS_H
31#define _LINUX_CIRRUS_H 31#define _LINUX_CIRRUS_H
32 32
33#ifndef PCI_VENDOR_ID_CIRRUS
34#define PCI_VENDOR_ID_CIRRUS 0x1013
35#endif
36#ifndef PCI_DEVICE_ID_CIRRUS_6729
37#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
38#endif
39#ifndef PCI_DEVICE_ID_CIRRUS_6832
40#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
41#endif
42
43#define PD67_MISC_CTL_1 0x16 /* Misc control 1 */ 33#define PD67_MISC_CTL_1 0x16 /* Misc control 1 */
44#define PD67_FIFO_CTL 0x17 /* FIFO control */ 34#define PD67_FIFO_CTL 0x17 /* FIFO control */
45#define PD67_MISC_CTL_2 0x1E /* Misc control 2 */ 35#define PD67_MISC_CTL_2 0x1E /* Misc control 2 */
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 6c4a4fc83630..8c1b73cf021b 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -138,7 +138,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
138 void __iomem *sys, *end; 138 void __iomem *sys, *end;
139 unsigned char *buf = ptr; 139 unsigned char *buf = ptr;
140 140
141 cs_dbg(s, 3, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len); 141 dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
142 142
143 if (attr & IS_INDIRECT) { 143 if (attr & IS_INDIRECT) {
144 /* Indirect accesses use a bunch of special registers at fixed 144 /* Indirect accesses use a bunch of special registers at fixed
@@ -190,7 +190,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
190 addr = 0; 190 addr = 0;
191 } 191 }
192 } 192 }
193 cs_dbg(s, 3, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", 193 dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
194 *(u_char *)(ptr+0), *(u_char *)(ptr+1), 194 *(u_char *)(ptr+0), *(u_char *)(ptr+1),
195 *(u_char *)(ptr+2), *(u_char *)(ptr+3)); 195 *(u_char *)(ptr+2), *(u_char *)(ptr+3));
196 return 0; 196 return 0;
@@ -204,7 +204,7 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
204 void __iomem *sys, *end; 204 void __iomem *sys, *end;
205 unsigned char *buf = ptr; 205 unsigned char *buf = ptr;
206 206
207 cs_dbg(s, 3, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len); 207 dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
208 208
209 if (attr & IS_INDIRECT) { 209 if (attr & IS_INDIRECT) {
210 /* Indirect accesses use a bunch of special registers at fixed 210 /* Indirect accesses use a bunch of special registers at fixed
@@ -584,7 +584,7 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
584 ofs += link[1] + 2; 584 ofs += link[1] + 2;
585 } 585 }
586 if (i == MAX_TUPLES) { 586 if (i == MAX_TUPLES) {
587 cs_dbg(s, 1, "cs: overrun in pcmcia_get_next_tuple\n"); 587 dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n");
588 return -ENOSPC; 588 return -ENOSPC;
589 } 589 }
590 590
@@ -1440,7 +1440,7 @@ int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse)
1440 break; 1440 break;
1441 } 1441 }
1442 if (ret) 1442 if (ret)
1443 __cs_dbg(0, "parse_tuple failed %d\n", ret); 1443 pr_debug("parse_tuple failed %d\n", ret);
1444 return ret; 1444 return ret;
1445} 1445}
1446EXPORT_SYMBOL(pcmcia_parse_tuple); 1446EXPORT_SYMBOL(pcmcia_parse_tuple);
@@ -1482,6 +1482,67 @@ done:
1482} 1482}
1483EXPORT_SYMBOL(pccard_read_tuple); 1483EXPORT_SYMBOL(pccard_read_tuple);
1484 1484
1485
1486/**
1487 * pccard_loop_tuple() - loop over tuples in the CIS
1488 * @s: the struct pcmcia_socket where the card is inserted
1489 * @function: the device function we loop for
1490 * @code: which CIS code shall we look for?
1491 * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
1492 * @priv_data: private data to be passed to the loop_tuple function.
1493 * @loop_tuple: function to call for each CIS entry of type @function. IT
1494 * gets passed the raw tuple, the paresed tuple (if @parse is
1495 * set) and @priv_data.
1496 *
1497 * pccard_loop_tuple() loops over all CIS entries of type @function, and
1498 * calls the @loop_tuple function for each entry. If the call to @loop_tuple
1499 * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
1500 */
1501int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
1502 cisdata_t code, cisparse_t *parse, void *priv_data,
1503 int (*loop_tuple) (tuple_t *tuple,
1504 cisparse_t *parse,
1505 void *priv_data))
1506{
1507 tuple_t tuple;
1508 cisdata_t *buf;
1509 int ret;
1510
1511 buf = kzalloc(256, GFP_KERNEL);
1512 if (buf == NULL) {
1513 dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
1514 return -ENOMEM;
1515 }
1516
1517 tuple.TupleData = buf;
1518 tuple.TupleDataMax = 255;
1519 tuple.TupleOffset = 0;
1520 tuple.DesiredTuple = code;
1521 tuple.Attributes = 0;
1522
1523 ret = pccard_get_first_tuple(s, function, &tuple);
1524 while (!ret) {
1525 if (pccard_get_tuple_data(s, &tuple))
1526 goto next_entry;
1527
1528 if (parse)
1529 if (pcmcia_parse_tuple(&tuple, parse))
1530 goto next_entry;
1531
1532 ret = loop_tuple(&tuple, parse, priv_data);
1533 if (!ret)
1534 break;
1535
1536next_entry:
1537 ret = pccard_get_next_tuple(s, function, &tuple);
1538 }
1539
1540 kfree(buf);
1541 return ret;
1542}
1543EXPORT_SYMBOL(pccard_loop_tuple);
1544
1545
1485/*====================================================================== 1546/*======================================================================
1486 1547
1487 This tries to determine if a card has a sensible CIS. It returns 1548 This tries to determine if a card has a sensible CIS. It returns
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 698d75cda084..790af87a922f 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -61,17 +61,6 @@ INT_MODULE_PARM(unreset_limit, 30); /* unreset_check's */
61/* Access speed for attribute memory windows */ 61/* Access speed for attribute memory windows */
62INT_MODULE_PARM(cis_speed, 300); /* ns */ 62INT_MODULE_PARM(cis_speed, 300); /* ns */
63 63
64#ifdef CONFIG_PCMCIA_DEBUG
65static int pc_debug;
66
67module_param(pc_debug, int, 0644);
68
69int cs_debug_level(int level)
70{
71 return pc_debug > level;
72}
73#endif
74
75 64
76socket_state_t dead_socket = { 65socket_state_t dead_socket = {
77 .csc_mask = SS_DETECT, 66 .csc_mask = SS_DETECT,
@@ -190,7 +179,7 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
190 if (!socket || !socket->ops || !socket->dev.parent || !socket->resource_ops) 179 if (!socket || !socket->ops || !socket->dev.parent || !socket->resource_ops)
191 return -EINVAL; 180 return -EINVAL;
192 181
193 cs_dbg(socket, 0, "pcmcia_register_socket(0x%p)\n", socket->ops); 182 dev_dbg(&socket->dev, "pcmcia_register_socket(0x%p)\n", socket->ops);
194 183
195 spin_lock_init(&socket->lock); 184 spin_lock_init(&socket->lock);
196 185
@@ -262,6 +251,13 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
262 251
263 pcmcia_parse_events(socket, SS_DETECT); 252 pcmcia_parse_events(socket, SS_DETECT);
264 253
254 /*
255 * Let's try to get the PCMCIA module for 16-bit PCMCIA support.
256 * If it fails, it doesn't matter -- we still have 32-bit CardBus
257 * support to offer, so this is not a failure mode.
258 */
259 request_module_nowait("pcmcia");
260
265 return 0; 261 return 0;
266 262
267 err: 263 err:
@@ -282,7 +278,7 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket)
282 if (!socket) 278 if (!socket)
283 return; 279 return;
284 280
285 cs_dbg(socket, 0, "pcmcia_unregister_socket(0x%p)\n", socket->ops); 281 dev_dbg(&socket->dev, "pcmcia_unregister_socket(0x%p)\n", socket->ops);
286 282
287 if (socket->thread) 283 if (socket->thread)
288 kthread_stop(socket->thread); 284 kthread_stop(socket->thread);
@@ -335,7 +331,7 @@ static int send_event(struct pcmcia_socket *s, event_t event, int priority)
335 if (s->state & SOCKET_CARDBUS) 331 if (s->state & SOCKET_CARDBUS)
336 return 0; 332 return 0;
337 333
338 cs_dbg(s, 1, "send_event(event %d, pri %d, callback 0x%p)\n", 334 dev_dbg(&s->dev, "send_event(event %d, pri %d, callback 0x%p)\n",
339 event, priority, s->callback); 335 event, priority, s->callback);
340 336
341 if (!s->callback) 337 if (!s->callback)
@@ -352,7 +348,7 @@ static int send_event(struct pcmcia_socket *s, event_t event, int priority)
352 348
353static void socket_remove_drivers(struct pcmcia_socket *skt) 349static void socket_remove_drivers(struct pcmcia_socket *skt)
354{ 350{
355 cs_dbg(skt, 4, "remove_drivers\n"); 351 dev_dbg(&skt->dev, "remove_drivers\n");
356 352
357 send_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH); 353 send_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
358} 354}
@@ -361,7 +357,7 @@ static int socket_reset(struct pcmcia_socket *skt)
361{ 357{
362 int status, i; 358 int status, i;
363 359
364 cs_dbg(skt, 4, "reset\n"); 360 dev_dbg(&skt->dev, "reset\n");
365 361
366 skt->socket.flags |= SS_OUTPUT_ENA | SS_RESET; 362 skt->socket.flags |= SS_OUTPUT_ENA | SS_RESET;
367 skt->ops->set_socket(skt, &skt->socket); 363 skt->ops->set_socket(skt, &skt->socket);
@@ -383,7 +379,7 @@ static int socket_reset(struct pcmcia_socket *skt)
383 msleep(unreset_check * 10); 379 msleep(unreset_check * 10);
384 } 380 }
385 381
386 cs_err(skt, "time out after reset.\n"); 382 dev_printk(KERN_ERR, &skt->dev, "time out after reset.\n");
387 return -ETIMEDOUT; 383 return -ETIMEDOUT;
388} 384}
389 385
@@ -397,7 +393,7 @@ static void socket_shutdown(struct pcmcia_socket *s)
397{ 393{
398 int status; 394 int status;
399 395
400 cs_dbg(s, 4, "shutdown\n"); 396 dev_dbg(&s->dev, "shutdown\n");
401 397
402 socket_remove_drivers(s); 398 socket_remove_drivers(s);
403 s->state &= SOCKET_INUSE | SOCKET_PRESENT; 399 s->state &= SOCKET_INUSE | SOCKET_PRESENT;
@@ -432,7 +428,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
432{ 428{
433 int status, i; 429 int status, i;
434 430
435 cs_dbg(skt, 4, "setup\n"); 431 dev_dbg(&skt->dev, "setup\n");
436 432
437 skt->ops->get_status(skt, &status); 433 skt->ops->get_status(skt, &status);
438 if (!(status & SS_DETECT)) 434 if (!(status & SS_DETECT))
@@ -452,13 +448,15 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
452 } 448 }
453 449
454 if (status & SS_PENDING) { 450 if (status & SS_PENDING) {
455 cs_err(skt, "voltage interrogation timed out.\n"); 451 dev_printk(KERN_ERR, &skt->dev,
452 "voltage interrogation timed out.\n");
456 return -ETIMEDOUT; 453 return -ETIMEDOUT;
457 } 454 }
458 455
459 if (status & SS_CARDBUS) { 456 if (status & SS_CARDBUS) {
460 if (!(skt->features & SS_CAP_CARDBUS)) { 457 if (!(skt->features & SS_CAP_CARDBUS)) {
461 cs_err(skt, "cardbus cards are not supported.\n"); 458 dev_printk(KERN_ERR, &skt->dev,
459 "cardbus cards are not supported.\n");
462 return -EINVAL; 460 return -EINVAL;
463 } 461 }
464 skt->state |= SOCKET_CARDBUS; 462 skt->state |= SOCKET_CARDBUS;
@@ -472,7 +470,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
472 else if (!(status & SS_XVCARD)) 470 else if (!(status & SS_XVCARD))
473 skt->socket.Vcc = skt->socket.Vpp = 50; 471 skt->socket.Vcc = skt->socket.Vpp = 50;
474 else { 472 else {
475 cs_err(skt, "unsupported voltage key.\n"); 473 dev_printk(KERN_ERR, &skt->dev, "unsupported voltage key.\n");
476 return -EIO; 474 return -EIO;
477 } 475 }
478 476
@@ -489,7 +487,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
489 487
490 skt->ops->get_status(skt, &status); 488 skt->ops->get_status(skt, &status);
491 if (!(status & SS_POWERON)) { 489 if (!(status & SS_POWERON)) {
492 cs_err(skt, "unable to apply power.\n"); 490 dev_printk(KERN_ERR, &skt->dev, "unable to apply power.\n");
493 return -EIO; 491 return -EIO;
494 } 492 }
495 493
@@ -509,7 +507,7 @@ static int socket_insert(struct pcmcia_socket *skt)
509{ 507{
510 int ret; 508 int ret;
511 509
512 cs_dbg(skt, 4, "insert\n"); 510 dev_dbg(&skt->dev, "insert\n");
513 511
514 if (!cs_socket_get(skt)) 512 if (!cs_socket_get(skt))
515 return -ENODEV; 513 return -ENODEV;
@@ -529,7 +527,7 @@ static int socket_insert(struct pcmcia_socket *skt)
529 skt->state |= SOCKET_CARDBUS_CONFIG; 527 skt->state |= SOCKET_CARDBUS_CONFIG;
530 } 528 }
531#endif 529#endif
532 cs_dbg(skt, 4, "insert done\n"); 530 dev_dbg(&skt->dev, "insert done\n");
533 531
534 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW); 532 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
535 } else { 533 } else {
@@ -576,7 +574,7 @@ static int socket_late_resume(struct pcmcia_socket *skt)
576 * FIXME: need a better check here for cardbus cards. 574 * FIXME: need a better check here for cardbus cards.
577 */ 575 */
578 if (verify_cis_cache(skt) != 0) { 576 if (verify_cis_cache(skt) != 0) {
579 cs_dbg(skt, 4, "cis mismatch - different card\n"); 577 dev_dbg(&skt->dev, "cis mismatch - different card\n");
580 socket_remove_drivers(skt); 578 socket_remove_drivers(skt);
581 destroy_cis_cache(skt); 579 destroy_cis_cache(skt);
582 /* 580 /*
@@ -587,7 +585,7 @@ static int socket_late_resume(struct pcmcia_socket *skt)
587 msleep(200); 585 msleep(200);
588 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW); 586 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
589 } else { 587 } else {
590 cs_dbg(skt, 4, "cis matches cache\n"); 588 dev_dbg(&skt->dev, "cis matches cache\n");
591 send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW); 589 send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW);
592 } 590 }
593 } else { 591 } else {
@@ -723,7 +721,7 @@ static int pccardd(void *__skt)
723void pcmcia_parse_events(struct pcmcia_socket *s, u_int events) 721void pcmcia_parse_events(struct pcmcia_socket *s, u_int events)
724{ 722{
725 unsigned long flags; 723 unsigned long flags;
726 cs_dbg(s, 4, "parse_events: events %08x\n", events); 724 dev_dbg(&s->dev, "parse_events: events %08x\n", events);
727 if (s->thread) { 725 if (s->thread) {
728 spin_lock_irqsave(&s->thread_lock, flags); 726 spin_lock_irqsave(&s->thread_lock, flags);
729 s->thread_events |= events; 727 s->thread_events |= events;
@@ -773,19 +771,22 @@ int pcmcia_reset_card(struct pcmcia_socket *skt)
773{ 771{
774 int ret; 772 int ret;
775 773
776 cs_dbg(skt, 1, "resetting socket\n"); 774 dev_dbg(&skt->dev, "resetting socket\n");
777 775
778 mutex_lock(&skt->skt_mutex); 776 mutex_lock(&skt->skt_mutex);
779 do { 777 do {
780 if (!(skt->state & SOCKET_PRESENT)) { 778 if (!(skt->state & SOCKET_PRESENT)) {
779 dev_dbg(&skt->dev, "can't reset, not present\n");
781 ret = -ENODEV; 780 ret = -ENODEV;
782 break; 781 break;
783 } 782 }
784 if (skt->state & SOCKET_SUSPEND) { 783 if (skt->state & SOCKET_SUSPEND) {
784 dev_dbg(&skt->dev, "can't reset, suspended\n");
785 ret = -EBUSY; 785 ret = -EBUSY;
786 break; 786 break;
787 } 787 }
788 if (skt->state & SOCKET_CARDBUS) { 788 if (skt->state & SOCKET_CARDBUS) {
789 dev_dbg(&skt->dev, "can't reset, is cardbus\n");
789 ret = -EPERM; 790 ret = -EPERM;
790 break; 791 break;
791 } 792 }
@@ -818,7 +819,7 @@ int pcmcia_suspend_card(struct pcmcia_socket *skt)
818{ 819{
819 int ret; 820 int ret;
820 821
821 cs_dbg(skt, 1, "suspending socket\n"); 822 dev_dbg(&skt->dev, "suspending socket\n");
822 823
823 mutex_lock(&skt->skt_mutex); 824 mutex_lock(&skt->skt_mutex);
824 do { 825 do {
@@ -848,7 +849,7 @@ int pcmcia_resume_card(struct pcmcia_socket *skt)
848{ 849{
849 int ret; 850 int ret;
850 851
851 cs_dbg(skt, 1, "waking up socket\n"); 852 dev_dbg(&skt->dev, "waking up socket\n");
852 853
853 mutex_lock(&skt->skt_mutex); 854 mutex_lock(&skt->skt_mutex);
854 do { 855 do {
@@ -876,7 +877,7 @@ int pcmcia_eject_card(struct pcmcia_socket *skt)
876{ 877{
877 int ret; 878 int ret;
878 879
879 cs_dbg(skt, 1, "user eject request\n"); 880 dev_dbg(&skt->dev, "user eject request\n");
880 881
881 mutex_lock(&skt->skt_mutex); 882 mutex_lock(&skt->skt_mutex);
882 do { 883 do {
@@ -905,7 +906,7 @@ int pcmcia_insert_card(struct pcmcia_socket *skt)
905{ 906{
906 int ret; 907 int ret;
907 908
908 cs_dbg(skt, 1, "user insert request\n"); 909 dev_dbg(&skt->dev, "user insert request\n");
909 910
910 mutex_lock(&skt->skt_mutex); 911 mutex_lock(&skt->skt_mutex);
911 do { 912 do {
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 1f4098f1354d..3bc02d53a3a3 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -107,28 +107,6 @@ static inline void cs_socket_put(struct pcmcia_socket *skt)
107 } 107 }
108} 108}
109 109
110#ifdef CONFIG_PCMCIA_DEBUG
111extern int cs_debug_level(int);
112
113#define cs_dbg(skt, lvl, fmt, arg...) do { \
114 if (cs_debug_level(lvl)) \
115 dev_printk(KERN_DEBUG, &skt->dev, \
116 "cs: " fmt, ## arg); \
117} while (0)
118#define __cs_dbg(lvl, fmt, arg...) do { \
119 if (cs_debug_level(lvl)) \
120 printk(KERN_DEBUG \
121 "cs: " fmt, ## arg); \
122} while (0)
123
124#else
125#define cs_dbg(skt, lvl, fmt, arg...) do { } while (0)
126#define __cs_dbg(lvl, fmt, arg...) do { } while (0)
127#endif
128
129#define cs_err(skt, fmt, arg...) \
130 dev_printk(KERN_ERR, &skt->dev, "cs: " fmt, ## arg)
131
132 110
133/* 111/*
134 * Stuff internal to module "pcmcia_core": 112 * Stuff internal to module "pcmcia_core":
@@ -170,10 +148,6 @@ extern struct rw_semaphore pcmcia_socket_list_rwsem;
170extern struct list_head pcmcia_socket_list; 148extern struct list_head pcmcia_socket_list;
171extern struct class pcmcia_socket_class; 149extern struct class pcmcia_socket_class;
172 150
173int pcmcia_get_window(struct pcmcia_socket *s,
174 window_handle_t *handle,
175 int idx,
176 win_req_t *req);
177int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c); 151int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c);
178struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr); 152struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr);
179 153
@@ -199,6 +173,22 @@ int pcmcia_replace_cis(struct pcmcia_socket *s,
199 const u8 *data, const size_t len); 173 const u8 *data, const size_t len);
200int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count); 174int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count);
201 175
176/* loop over CIS entries */
177int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
178 cisdata_t code, cisparse_t *parse, void *priv_data,
179 int (*loop_tuple) (tuple_t *tuple,
180 cisparse_t *parse,
181 void *priv_data));
182
183int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function,
184 tuple_t *tuple);
185
186int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function,
187 tuple_t *tuple);
188
189int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple);
190
191
202/* rsrc_mgr.c */ 192/* rsrc_mgr.c */
203int pcmcia_validate_mem(struct pcmcia_socket *s); 193int pcmcia_validate_mem(struct pcmcia_socket *s);
204struct resource *pcmcia_find_io_region(unsigned long base, 194struct resource *pcmcia_find_io_region(unsigned long base,
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index f5b7079f13d3..05893d41dd41 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -41,129 +41,11 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
41MODULE_DESCRIPTION("PCMCIA Driver Services"); 41MODULE_DESCRIPTION("PCMCIA Driver Services");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43 43
44#ifdef CONFIG_PCMCIA_DEBUG
45int ds_pc_debug;
46
47module_param_named(pc_debug, ds_pc_debug, int, 0644);
48
49#define ds_dbg(lvl, fmt, arg...) do { \
50 if (ds_pc_debug > (lvl)) \
51 printk(KERN_DEBUG "ds: " fmt , ## arg); \
52} while (0)
53#define ds_dev_dbg(lvl, dev, fmt, arg...) do { \
54 if (ds_pc_debug > (lvl)) \
55 dev_printk(KERN_DEBUG, dev, "ds: " fmt , ## arg); \
56} while (0)
57#else
58#define ds_dbg(lvl, fmt, arg...) do { } while (0)
59#define ds_dev_dbg(lvl, dev, fmt, arg...) do { } while (0)
60#endif
61 44
62spinlock_t pcmcia_dev_list_lock; 45spinlock_t pcmcia_dev_list_lock;
63 46
64/*====================================================================*/ 47/*====================================================================*/
65 48
66/* code which was in cs.c before */
67
68/* String tables for error messages */
69
70typedef struct lookup_t {
71 const int key;
72 const char *msg;
73} lookup_t;
74
75static const lookup_t error_table[] = {
76 { 0, "Operation succeeded" },
77 { -EIO, "Input/Output error" },
78 { -ENODEV, "No card present" },
79 { -EINVAL, "Bad parameter" },
80 { -EACCES, "Configuration locked" },
81 { -EBUSY, "Resource in use" },
82 { -ENOSPC, "No more items" },
83 { -ENOMEM, "Out of resource" },
84};
85
86
87static const lookup_t service_table[] = {
88 { AccessConfigurationRegister, "AccessConfigurationRegister" },
89 { AddSocketServices, "AddSocketServices" },
90 { AdjustResourceInfo, "AdjustResourceInfo" },
91 { CheckEraseQueue, "CheckEraseQueue" },
92 { CloseMemory, "CloseMemory" },
93 { DeregisterClient, "DeregisterClient" },
94 { DeregisterEraseQueue, "DeregisterEraseQueue" },
95 { GetCardServicesInfo, "GetCardServicesInfo" },
96 { GetClientInfo, "GetClientInfo" },
97 { GetConfigurationInfo, "GetConfigurationInfo" },
98 { GetEventMask, "GetEventMask" },
99 { GetFirstClient, "GetFirstClient" },
100 { GetFirstRegion, "GetFirstRegion" },
101 { GetFirstTuple, "GetFirstTuple" },
102 { GetNextClient, "GetNextClient" },
103 { GetNextRegion, "GetNextRegion" },
104 { GetNextTuple, "GetNextTuple" },
105 { GetStatus, "GetStatus" },
106 { GetTupleData, "GetTupleData" },
107 { MapMemPage, "MapMemPage" },
108 { ModifyConfiguration, "ModifyConfiguration" },
109 { ModifyWindow, "ModifyWindow" },
110 { OpenMemory, "OpenMemory" },
111 { ParseTuple, "ParseTuple" },
112 { ReadMemory, "ReadMemory" },
113 { RegisterClient, "RegisterClient" },
114 { RegisterEraseQueue, "RegisterEraseQueue" },
115 { RegisterMTD, "RegisterMTD" },
116 { ReleaseConfiguration, "ReleaseConfiguration" },
117 { ReleaseIO, "ReleaseIO" },
118 { ReleaseIRQ, "ReleaseIRQ" },
119 { ReleaseWindow, "ReleaseWindow" },
120 { RequestConfiguration, "RequestConfiguration" },
121 { RequestIO, "RequestIO" },
122 { RequestIRQ, "RequestIRQ" },
123 { RequestSocketMask, "RequestSocketMask" },
124 { RequestWindow, "RequestWindow" },
125 { ResetCard, "ResetCard" },
126 { SetEventMask, "SetEventMask" },
127 { ValidateCIS, "ValidateCIS" },
128 { WriteMemory, "WriteMemory" },
129 { BindDevice, "BindDevice" },
130 { BindMTD, "BindMTD" },
131 { ReportError, "ReportError" },
132 { SuspendCard, "SuspendCard" },
133 { ResumeCard, "ResumeCard" },
134 { EjectCard, "EjectCard" },
135 { InsertCard, "InsertCard" },
136 { ReplaceCIS, "ReplaceCIS" }
137};
138
139const char *pcmcia_error_func(int func)
140{
141 int i;
142
143 for (i = 0; i < ARRAY_SIZE(service_table); i++)
144 if (service_table[i].key == func)
145 return service_table[i].msg;
146
147 return "Unknown service number";
148}
149EXPORT_SYMBOL(pcmcia_error_func);
150
151const char *pcmcia_error_ret(int ret)
152{
153 int i;
154
155 for (i = 0; i < ARRAY_SIZE(error_table); i++)
156 if (error_table[i].key == ret)
157 return error_table[i].msg;
158
159 return "unknown";
160}
161EXPORT_SYMBOL(pcmcia_error_ret);
162
163/*======================================================================*/
164
165
166
167static void pcmcia_check_driver(struct pcmcia_driver *p_drv) 49static void pcmcia_check_driver(struct pcmcia_driver *p_drv)
168{ 50{
169 struct pcmcia_device_id *did = p_drv->id_table; 51 struct pcmcia_device_id *did = p_drv->id_table;
@@ -303,7 +185,7 @@ int pcmcia_register_driver(struct pcmcia_driver *driver)
303 spin_lock_init(&driver->dynids.lock); 185 spin_lock_init(&driver->dynids.lock);
304 INIT_LIST_HEAD(&driver->dynids.list); 186 INIT_LIST_HEAD(&driver->dynids.list);
305 187
306 ds_dbg(3, "registering driver %s\n", driver->drv.name); 188 pr_debug("registering driver %s\n", driver->drv.name);
307 189
308 error = driver_register(&driver->drv); 190 error = driver_register(&driver->drv);
309 if (error < 0) 191 if (error < 0)
@@ -323,7 +205,7 @@ EXPORT_SYMBOL(pcmcia_register_driver);
323 */ 205 */
324void pcmcia_unregister_driver(struct pcmcia_driver *driver) 206void pcmcia_unregister_driver(struct pcmcia_driver *driver)
325{ 207{
326 ds_dbg(3, "unregistering driver %s\n", driver->drv.name); 208 pr_debug("unregistering driver %s\n", driver->drv.name);
327 driver_unregister(&driver->drv); 209 driver_unregister(&driver->drv);
328 pcmcia_free_dynids(driver); 210 pcmcia_free_dynids(driver);
329} 211}
@@ -350,14 +232,14 @@ void pcmcia_put_dev(struct pcmcia_device *p_dev)
350static void pcmcia_release_function(struct kref *ref) 232static void pcmcia_release_function(struct kref *ref)
351{ 233{
352 struct config_t *c = container_of(ref, struct config_t, ref); 234 struct config_t *c = container_of(ref, struct config_t, ref);
353 ds_dbg(1, "releasing config_t\n"); 235 pr_debug("releasing config_t\n");
354 kfree(c); 236 kfree(c);
355} 237}
356 238
357static void pcmcia_release_dev(struct device *dev) 239static void pcmcia_release_dev(struct device *dev)
358{ 240{
359 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 241 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
360 ds_dev_dbg(1, dev, "releasing device\n"); 242 dev_dbg(dev, "releasing device\n");
361 pcmcia_put_socket(p_dev->socket); 243 pcmcia_put_socket(p_dev->socket);
362 kfree(p_dev->devname); 244 kfree(p_dev->devname);
363 kref_put(&p_dev->function_config->ref, pcmcia_release_function); 245 kref_put(&p_dev->function_config->ref, pcmcia_release_function);
@@ -367,7 +249,7 @@ static void pcmcia_release_dev(struct device *dev)
367static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc) 249static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc)
368{ 250{
369 if (!s->pcmcia_state.device_add_pending) { 251 if (!s->pcmcia_state.device_add_pending) {
370 ds_dev_dbg(1, &s->dev, "scheduling to add %s secondary" 252 dev_dbg(&s->dev, "scheduling to add %s secondary"
371 " device to %d\n", mfc ? "mfc" : "pfc", s->sock); 253 " device to %d\n", mfc ? "mfc" : "pfc", s->sock);
372 s->pcmcia_state.device_add_pending = 1; 254 s->pcmcia_state.device_add_pending = 1;
373 s->pcmcia_state.mfc_pfc = mfc; 255 s->pcmcia_state.mfc_pfc = mfc;
@@ -405,7 +287,7 @@ static int pcmcia_device_probe(struct device * dev)
405 */ 287 */
406 did = dev_get_drvdata(&p_dev->dev); 288 did = dev_get_drvdata(&p_dev->dev);
407 289
408 ds_dev_dbg(1, dev, "trying to bind to %s\n", p_drv->drv.name); 290 dev_dbg(dev, "trying to bind to %s\n", p_drv->drv.name);
409 291
410 if ((!p_drv->probe) || (!p_dev->function_config) || 292 if ((!p_drv->probe) || (!p_dev->function_config) ||
411 (!try_module_get(p_drv->owner))) { 293 (!try_module_get(p_drv->owner))) {
@@ -428,7 +310,7 @@ static int pcmcia_device_probe(struct device * dev)
428 310
429 ret = p_drv->probe(p_dev); 311 ret = p_drv->probe(p_dev);
430 if (ret) { 312 if (ret) {
431 ds_dev_dbg(1, dev, "binding to %s failed with %d\n", 313 dev_dbg(dev, "binding to %s failed with %d\n",
432 p_drv->drv.name, ret); 314 p_drv->drv.name, ret);
433 goto put_module; 315 goto put_module;
434 } 316 }
@@ -456,7 +338,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
456 struct pcmcia_device *tmp; 338 struct pcmcia_device *tmp;
457 unsigned long flags; 339 unsigned long flags;
458 340
459 ds_dev_dbg(2, leftover ? &leftover->dev : &s->dev, 341 dev_dbg(leftover ? &leftover->dev : &s->dev,
460 "pcmcia_card_remove(%d) %s\n", s->sock, 342 "pcmcia_card_remove(%d) %s\n", s->sock,
461 leftover ? leftover->devname : ""); 343 leftover ? leftover->devname : "");
462 344
@@ -475,7 +357,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
475 p_dev->_removed=1; 357 p_dev->_removed=1;
476 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 358 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
477 359
478 ds_dev_dbg(2, &p_dev->dev, "unregistering device\n"); 360 dev_dbg(&p_dev->dev, "unregistering device\n");
479 device_unregister(&p_dev->dev); 361 device_unregister(&p_dev->dev);
480 } 362 }
481 363
@@ -492,7 +374,7 @@ static int pcmcia_device_remove(struct device * dev)
492 p_dev = to_pcmcia_dev(dev); 374 p_dev = to_pcmcia_dev(dev);
493 p_drv = to_pcmcia_drv(dev->driver); 375 p_drv = to_pcmcia_drv(dev->driver);
494 376
495 ds_dev_dbg(1, dev, "removing device\n"); 377 dev_dbg(dev, "removing device\n");
496 378
497 /* If we're removing the primary module driving a 379 /* If we're removing the primary module driving a
498 * pseudo multi-function card, we need to unbind 380 * pseudo multi-function card, we need to unbind
@@ -572,7 +454,7 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
572 } 454 }
573 if (!pccard_read_tuple(p_dev->socket, p_dev->func, 455 if (!pccard_read_tuple(p_dev->socket, p_dev->func,
574 CISTPL_DEVICE_GEO, devgeo)) { 456 CISTPL_DEVICE_GEO, devgeo)) {
575 ds_dev_dbg(0, &p_dev->dev, 457 dev_dbg(&p_dev->dev,
576 "mem device geometry probably means " 458 "mem device geometry probably means "
577 "FUNCID_MEMORY\n"); 459 "FUNCID_MEMORY\n");
578 p_dev->func_id = CISTPL_FUNCID_MEMORY; 460 p_dev->func_id = CISTPL_FUNCID_MEMORY;
@@ -628,7 +510,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
628 510
629 mutex_lock(&device_add_lock); 511 mutex_lock(&device_add_lock);
630 512
631 ds_dbg(3, "adding device to %d, function %d\n", s->sock, function); 513 pr_debug("adding device to %d, function %d\n", s->sock, function);
632 514
633 /* max of 4 devices per card */ 515 /* max of 4 devices per card */
634 if (s->device_count == 4) 516 if (s->device_count == 4)
@@ -654,7 +536,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
654 p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev)); 536 p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
655 if (!p_dev->devname) 537 if (!p_dev->devname)
656 goto err_free; 538 goto err_free;
657 ds_dev_dbg(3, &p_dev->dev, "devname is %s\n", p_dev->devname); 539 dev_dbg(&p_dev->dev, "devname is %s\n", p_dev->devname);
658 540
659 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 541 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
660 542
@@ -677,7 +559,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
677 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 559 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
678 560
679 if (!p_dev->function_config) { 561 if (!p_dev->function_config) {
680 ds_dev_dbg(3, &p_dev->dev, "creating config_t\n"); 562 dev_dbg(&p_dev->dev, "creating config_t\n");
681 p_dev->function_config = kzalloc(sizeof(struct config_t), 563 p_dev->function_config = kzalloc(sizeof(struct config_t),
682 GFP_KERNEL); 564 GFP_KERNEL);
683 if (!p_dev->function_config) 565 if (!p_dev->function_config)
@@ -722,20 +604,20 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
722 int ret = 0; 604 int ret = 0;
723 605
724 if (!(s->resource_setup_done)) { 606 if (!(s->resource_setup_done)) {
725 ds_dev_dbg(3, &s->dev, 607 dev_dbg(&s->dev,
726 "no resources available, delaying card_add\n"); 608 "no resources available, delaying card_add\n");
727 return -EAGAIN; /* try again, but later... */ 609 return -EAGAIN; /* try again, but later... */
728 } 610 }
729 611
730 if (pcmcia_validate_mem(s)) { 612 if (pcmcia_validate_mem(s)) {
731 ds_dev_dbg(3, &s->dev, "validating mem resources failed, " 613 dev_dbg(&s->dev, "validating mem resources failed, "
732 "delaying card_add\n"); 614 "delaying card_add\n");
733 return -EAGAIN; /* try again, but later... */ 615 return -EAGAIN; /* try again, but later... */
734 } 616 }
735 617
736 ret = pccard_validate_cis(s, &no_chains); 618 ret = pccard_validate_cis(s, &no_chains);
737 if (ret || !no_chains) { 619 if (ret || !no_chains) {
738 ds_dev_dbg(0, &s->dev, "invalid CIS or invalid resources\n"); 620 dev_dbg(&s->dev, "invalid CIS or invalid resources\n");
739 return -ENODEV; 621 return -ENODEV;
740 } 622 }
741 623
@@ -756,7 +638,7 @@ static void pcmcia_delayed_add_device(struct work_struct *work)
756{ 638{
757 struct pcmcia_socket *s = 639 struct pcmcia_socket *s =
758 container_of(work, struct pcmcia_socket, device_add); 640 container_of(work, struct pcmcia_socket, device_add);
759 ds_dev_dbg(1, &s->dev, "adding additional device to %d\n", s->sock); 641 dev_dbg(&s->dev, "adding additional device to %d\n", s->sock);
760 pcmcia_device_add(s, s->pcmcia_state.mfc_pfc); 642 pcmcia_device_add(s, s->pcmcia_state.mfc_pfc);
761 s->pcmcia_state.device_add_pending = 0; 643 s->pcmcia_state.device_add_pending = 0;
762 s->pcmcia_state.mfc_pfc = 0; 644 s->pcmcia_state.mfc_pfc = 0;
@@ -766,7 +648,7 @@ static int pcmcia_requery(struct device *dev, void * _data)
766{ 648{
767 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 649 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
768 if (!p_dev->dev.driver) { 650 if (!p_dev->dev.driver) {
769 ds_dev_dbg(1, dev, "update device information\n"); 651 dev_dbg(dev, "update device information\n");
770 pcmcia_device_query(p_dev); 652 pcmcia_device_query(p_dev);
771 } 653 }
772 654
@@ -780,7 +662,7 @@ static void pcmcia_bus_rescan(struct pcmcia_socket *skt, int new_cis)
780 unsigned long flags; 662 unsigned long flags;
781 663
782 /* must be called with skt_mutex held */ 664 /* must be called with skt_mutex held */
783 ds_dev_dbg(0, &skt->dev, "re-scanning socket %d\n", skt->sock); 665 dev_dbg(&skt->dev, "re-scanning socket %d\n", skt->sock);
784 666
785 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 667 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
786 if (list_empty(&skt->devices_list)) 668 if (list_empty(&skt->devices_list))
@@ -835,7 +717,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
835 if (!filename) 717 if (!filename)
836 return -EINVAL; 718 return -EINVAL;
837 719
838 ds_dev_dbg(1, &dev->dev, "trying to load CIS file %s\n", filename); 720 dev_dbg(&dev->dev, "trying to load CIS file %s\n", filename);
839 721
840 if (request_firmware(&fw, filename, &dev->dev) == 0) { 722 if (request_firmware(&fw, filename, &dev->dev) == 0) {
841 if (fw->size >= CISTPL_MAX_CIS_SIZE) { 723 if (fw->size >= CISTPL_MAX_CIS_SIZE) {
@@ -953,14 +835,14 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
953 * after it has re-checked that there is no possible module 835 * after it has re-checked that there is no possible module
954 * with a prod_id/manf_id/card_id match. 836 * with a prod_id/manf_id/card_id match.
955 */ 837 */
956 ds_dev_dbg(0, &dev->dev, 838 dev_dbg(&dev->dev,
957 "skipping FUNC_ID match until userspace interaction\n"); 839 "skipping FUNC_ID match until userspace interaction\n");
958 if (!dev->allow_func_id_match) 840 if (!dev->allow_func_id_match)
959 return 0; 841 return 0;
960 } 842 }
961 843
962 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { 844 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) {
963 ds_dev_dbg(0, &dev->dev, "device needs a fake CIS\n"); 845 dev_dbg(&dev->dev, "device needs a fake CIS\n");
964 if (!dev->socket->fake_cis) 846 if (!dev->socket->fake_cis)
965 pcmcia_load_firmware(dev, did->cisfile); 847 pcmcia_load_firmware(dev, did->cisfile);
966 848
@@ -992,9 +874,9 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
992 /* match dynamic devices first */ 874 /* match dynamic devices first */
993 spin_lock(&p_drv->dynids.lock); 875 spin_lock(&p_drv->dynids.lock);
994 list_for_each_entry(dynid, &p_drv->dynids.list, node) { 876 list_for_each_entry(dynid, &p_drv->dynids.list, node) {
995 ds_dev_dbg(3, dev, "trying to match to %s\n", drv->name); 877 dev_dbg(dev, "trying to match to %s\n", drv->name);
996 if (pcmcia_devmatch(p_dev, &dynid->id)) { 878 if (pcmcia_devmatch(p_dev, &dynid->id)) {
997 ds_dev_dbg(0, dev, "matched to %s\n", drv->name); 879 dev_dbg(dev, "matched to %s\n", drv->name);
998 spin_unlock(&p_drv->dynids.lock); 880 spin_unlock(&p_drv->dynids.lock);
999 return 1; 881 return 1;
1000 } 882 }
@@ -1004,15 +886,15 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
1004#ifdef CONFIG_PCMCIA_IOCTL 886#ifdef CONFIG_PCMCIA_IOCTL
1005 /* matching by cardmgr */ 887 /* matching by cardmgr */
1006 if (p_dev->cardmgr == p_drv) { 888 if (p_dev->cardmgr == p_drv) {
1007 ds_dev_dbg(0, dev, "cardmgr matched to %s\n", drv->name); 889 dev_dbg(dev, "cardmgr matched to %s\n", drv->name);
1008 return 1; 890 return 1;
1009 } 891 }
1010#endif 892#endif
1011 893
1012 while (did && did->match_flags) { 894 while (did && did->match_flags) {
1013 ds_dev_dbg(3, dev, "trying to match to %s\n", drv->name); 895 dev_dbg(dev, "trying to match to %s\n", drv->name);
1014 if (pcmcia_devmatch(p_dev, did)) { 896 if (pcmcia_devmatch(p_dev, did)) {
1015 ds_dev_dbg(0, dev, "matched to %s\n", drv->name); 897 dev_dbg(dev, "matched to %s\n", drv->name);
1016 return 1; 898 return 1;
1017 } 899 }
1018 did++; 900 did++;
@@ -1218,7 +1100,7 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
1218 if (p_dev->suspended) 1100 if (p_dev->suspended)
1219 return 0; 1101 return 0;
1220 1102
1221 ds_dev_dbg(2, dev, "suspending\n"); 1103 dev_dbg(dev, "suspending\n");
1222 1104
1223 if (dev->driver) 1105 if (dev->driver)
1224 p_drv = to_pcmcia_drv(dev->driver); 1106 p_drv = to_pcmcia_drv(dev->driver);
@@ -1238,7 +1120,7 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
1238 } 1120 }
1239 1121
1240 if (p_dev->device_no == p_dev->func) { 1122 if (p_dev->device_no == p_dev->func) {
1241 ds_dev_dbg(2, dev, "releasing configuration\n"); 1123 dev_dbg(dev, "releasing configuration\n");
1242 pcmcia_release_configuration(p_dev); 1124 pcmcia_release_configuration(p_dev);
1243 } 1125 }
1244 1126
@@ -1258,7 +1140,7 @@ static int pcmcia_dev_resume(struct device * dev)
1258 if (!p_dev->suspended) 1140 if (!p_dev->suspended)
1259 return 0; 1141 return 0;
1260 1142
1261 ds_dev_dbg(2, dev, "resuming\n"); 1143 dev_dbg(dev, "resuming\n");
1262 1144
1263 if (dev->driver) 1145 if (dev->driver)
1264 p_drv = to_pcmcia_drv(dev->driver); 1146 p_drv = to_pcmcia_drv(dev->driver);
@@ -1267,7 +1149,7 @@ static int pcmcia_dev_resume(struct device * dev)
1267 goto out; 1149 goto out;
1268 1150
1269 if (p_dev->device_no == p_dev->func) { 1151 if (p_dev->device_no == p_dev->func) {
1270 ds_dev_dbg(2, dev, "requesting configuration\n"); 1152 dev_dbg(dev, "requesting configuration\n");
1271 ret = pcmcia_request_configuration(p_dev, &p_dev->conf); 1153 ret = pcmcia_request_configuration(p_dev, &p_dev->conf);
1272 if (ret) 1154 if (ret)
1273 goto out; 1155 goto out;
@@ -1309,14 +1191,14 @@ static int pcmcia_bus_resume_callback(struct device *dev, void * _data)
1309 1191
1310static int pcmcia_bus_resume(struct pcmcia_socket *skt) 1192static int pcmcia_bus_resume(struct pcmcia_socket *skt)
1311{ 1193{
1312 ds_dev_dbg(2, &skt->dev, "resuming socket %d\n", skt->sock); 1194 dev_dbg(&skt->dev, "resuming socket %d\n", skt->sock);
1313 bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback); 1195 bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback);
1314 return 0; 1196 return 0;
1315} 1197}
1316 1198
1317static int pcmcia_bus_suspend(struct pcmcia_socket *skt) 1199static int pcmcia_bus_suspend(struct pcmcia_socket *skt)
1318{ 1200{
1319 ds_dev_dbg(2, &skt->dev, "suspending socket %d\n", skt->sock); 1201 dev_dbg(&skt->dev, "suspending socket %d\n", skt->sock);
1320 if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt, 1202 if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt,
1321 pcmcia_bus_suspend_callback)) { 1203 pcmcia_bus_suspend_callback)) {
1322 pcmcia_bus_resume(skt); 1204 pcmcia_bus_resume(skt);
@@ -1348,7 +1230,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
1348 return -ENODEV; 1230 return -ENODEV;
1349 } 1231 }
1350 1232
1351 ds_dev_dbg(1, &skt->dev, "ds_event(0x%06x, %d, 0x%p)\n", 1233 dev_dbg(&skt->dev, "ds_event(0x%06x, %d, 0x%p)\n",
1352 event, priority, skt); 1234 event, priority, skt);
1353 1235
1354 switch (event) { 1236 switch (event) {
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index a4aacb830b80..c13fd9360511 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -63,21 +63,6 @@
63#include "vg468.h" 63#include "vg468.h"
64#include "ricoh.h" 64#include "ricoh.h"
65 65
66#ifdef CONFIG_PCMCIA_DEBUG
67static const char version[] =
68"i82365.c 1.265 1999/11/10 18:36:21 (David Hinds)";
69
70static int pc_debug;
71
72module_param(pc_debug, int, 0644);
73
74#define debug(lvl, fmt, arg...) do { \
75 if (pc_debug > (lvl)) \
76 printk(KERN_DEBUG "i82365: " fmt , ## arg); \
77} while (0)
78#else
79#define debug(lvl, fmt, arg...) do { } while (0)
80#endif
81 66
82static irqreturn_t i365_count_irq(int, void *); 67static irqreturn_t i365_count_irq(int, void *);
83static inline int _check_irq(int irq, int flags) 68static inline int _check_irq(int irq, int flags)
@@ -501,13 +486,13 @@ static irqreturn_t i365_count_irq(int irq, void *dev)
501{ 486{
502 i365_get(irq_sock, I365_CSC); 487 i365_get(irq_sock, I365_CSC);
503 irq_hits++; 488 irq_hits++;
504 debug(2, "-> hit on irq %d\n", irq); 489 pr_debug("i82365: -> hit on irq %d\n", irq);
505 return IRQ_HANDLED; 490 return IRQ_HANDLED;
506} 491}
507 492
508static u_int __init test_irq(u_short sock, int irq) 493static u_int __init test_irq(u_short sock, int irq)
509{ 494{
510 debug(2, " testing ISA irq %d\n", irq); 495 pr_debug("i82365: testing ISA irq %d\n", irq);
511 if (request_irq(irq, i365_count_irq, IRQF_PROBE_SHARED, "scan", 496 if (request_irq(irq, i365_count_irq, IRQF_PROBE_SHARED, "scan",
512 i365_count_irq) != 0) 497 i365_count_irq) != 0)
513 return 1; 498 return 1;
@@ -515,7 +500,7 @@ static u_int __init test_irq(u_short sock, int irq)
515 msleep(10); 500 msleep(10);
516 if (irq_hits) { 501 if (irq_hits) {
517 free_irq(irq, i365_count_irq); 502 free_irq(irq, i365_count_irq);
518 debug(2, " spurious hit!\n"); 503 pr_debug("i82365: spurious hit!\n");
519 return 1; 504 return 1;
520 } 505 }
521 506
@@ -528,7 +513,7 @@ static u_int __init test_irq(u_short sock, int irq)
528 513
529 /* mask all interrupts */ 514 /* mask all interrupts */
530 i365_set(sock, I365_CSCINT, 0); 515 i365_set(sock, I365_CSCINT, 0);
531 debug(2, " hits = %d\n", irq_hits); 516 pr_debug("i82365: hits = %d\n", irq_hits);
532 517
533 return (irq_hits != 1); 518 return (irq_hits != 1);
534} 519}
@@ -854,7 +839,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev)
854 u_long flags = 0; 839 u_long flags = 0;
855 int handled = 0; 840 int handled = 0;
856 841
857 debug(4, "pcic_interrupt(%d)\n", irq); 842 pr_debug("pcic_interrupt(%d)\n", irq);
858 843
859 for (j = 0; j < 20; j++) { 844 for (j = 0; j < 20; j++) {
860 active = 0; 845 active = 0;
@@ -878,7 +863,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev)
878 events |= (csc & I365_CSC_READY) ? SS_READY : 0; 863 events |= (csc & I365_CSC_READY) ? SS_READY : 0;
879 } 864 }
880 ISA_UNLOCK(i, flags); 865 ISA_UNLOCK(i, flags);
881 debug(2, "socket %d event 0x%02x\n", i, events); 866 pr_debug("socket %d event 0x%02x\n", i, events);
882 867
883 if (events) 868 if (events)
884 pcmcia_parse_events(&socket[i].socket, events); 869 pcmcia_parse_events(&socket[i].socket, events);
@@ -890,7 +875,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev)
890 if (j == 20) 875 if (j == 20)
891 printk(KERN_NOTICE "i82365: infinite loop in interrupt handler\n"); 876 printk(KERN_NOTICE "i82365: infinite loop in interrupt handler\n");
892 877
893 debug(4, "interrupt done\n"); 878 pr_debug("pcic_interrupt done\n");
894 return IRQ_RETVAL(handled); 879 return IRQ_RETVAL(handled);
895} /* pcic_interrupt */ 880} /* pcic_interrupt */
896 881
@@ -932,7 +917,7 @@ static int i365_get_status(u_short sock, u_int *value)
932 } 917 }
933 } 918 }
934 919
935 debug(1, "GetStatus(%d) = %#4.4x\n", sock, *value); 920 pr_debug("GetStatus(%d) = %#4.4x\n", sock, *value);
936 return 0; 921 return 0;
937} /* i365_get_status */ 922} /* i365_get_status */
938 923
@@ -943,7 +928,7 @@ static int i365_set_socket(u_short sock, socket_state_t *state)
943 struct i82365_socket *t = &socket[sock]; 928 struct i82365_socket *t = &socket[sock];
944 u_char reg; 929 u_char reg;
945 930
946 debug(1, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 931 pr_debug("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
947 "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags, 932 "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags,
948 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 933 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
949 934
@@ -1052,7 +1037,7 @@ static int i365_set_io_map(u_short sock, struct pccard_io_map *io)
1052{ 1037{
1053 u_char map, ioctl; 1038 u_char map, ioctl;
1054 1039
1055 debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " 1040 pr_debug("SetIOMap(%d, %d, %#2.2x, %d ns, "
1056 "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed, 1041 "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed,
1057 (unsigned long long)io->start, (unsigned long long)io->stop); 1042 (unsigned long long)io->start, (unsigned long long)io->stop);
1058 map = io->map; 1043 map = io->map;
@@ -1082,7 +1067,7 @@ static int i365_set_mem_map(u_short sock, struct pccard_mem_map *mem)
1082 u_short base, i; 1067 u_short base, i;
1083 u_char map; 1068 u_char map;
1084 1069
1085 debug(1, "SetMemMap(%d, %d, %#2.2x, %d ns, %#llx-%#llx, " 1070 pr_debug("SetMemMap(%d, %d, %#2.2x, %d ns, %#llx-%#llx, "
1086 "%#x)\n", sock, mem->map, mem->flags, mem->speed, 1071 "%#x)\n", sock, mem->map, mem->flags, mem->speed,
1087 (unsigned long long)mem->res->start, 1072 (unsigned long long)mem->res->start,
1088 (unsigned long long)mem->res->end, mem->card_start); 1073 (unsigned long long)mem->res->end, mem->card_start);
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 7dfbee1dcd76..26a621c9e2fc 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -38,17 +38,6 @@
38 38
39#include "m32r_cfc.h" 39#include "m32r_cfc.h"
40 40
41#ifdef CONFIG_PCMCIA_DEBUG
42static int m32r_cfc_debug;
43module_param(m32r_cfc_debug, int, 0644);
44#define debug(lvl, fmt, arg...) do { \
45 if (m32r_cfc_debug > (lvl)) \
46 printk(KERN_DEBUG "m32r_cfc: " fmt , ## arg); \
47} while (0)
48#else
49#define debug(n, args...) do { } while (0)
50#endif
51
52/* Poll status interval -- 0 means default to interrupt */ 41/* Poll status interval -- 0 means default to interrupt */
53static int poll_interval = 0; 42static int poll_interval = 0;
54 43
@@ -123,7 +112,7 @@ void pcc_ioread_byte(int sock, unsigned long port, void *buf, size_t size,
123 unsigned char *bp = (unsigned char *)buf; 112 unsigned char *bp = (unsigned char *)buf;
124 unsigned long flags; 113 unsigned long flags;
125 114
126 debug(3, "m32r_cfc: pcc_ioread_byte: sock=%d, port=%#lx, buf=%p, " 115 pr_debug("m32r_cfc: pcc_ioread_byte: sock=%d, port=%#lx, buf=%p, "
127 "size=%u, nmemb=%d, flag=%d\n", 116 "size=%u, nmemb=%d, flag=%d\n",
128 sock, port, buf, size, nmemb, flag); 117 sock, port, buf, size, nmemb, flag);
129 118
@@ -132,7 +121,7 @@ void pcc_ioread_byte(int sock, unsigned long port, void *buf, size_t size,
132 printk("m32r_cfc:ioread_byte null port :%#lx\n",port); 121 printk("m32r_cfc:ioread_byte null port :%#lx\n",port);
133 return; 122 return;
134 } 123 }
135 debug(3, "m32r_cfc: pcc_ioread_byte: addr=%#lx\n", addr); 124 pr_debug("m32r_cfc: pcc_ioread_byte: addr=%#lx\n", addr);
136 125
137 spin_lock_irqsave(&pcc_lock, flags); 126 spin_lock_irqsave(&pcc_lock, flags);
138 /* read Byte */ 127 /* read Byte */
@@ -148,7 +137,7 @@ void pcc_ioread_word(int sock, unsigned long port, void *buf, size_t size,
148 unsigned short *bp = (unsigned short *)buf; 137 unsigned short *bp = (unsigned short *)buf;
149 unsigned long flags; 138 unsigned long flags;
150 139
151 debug(3, "m32r_cfc: pcc_ioread_word: sock=%d, port=%#lx, " 140 pr_debug("m32r_cfc: pcc_ioread_word: sock=%d, port=%#lx, "
152 "buf=%p, size=%u, nmemb=%d, flag=%d\n", 141 "buf=%p, size=%u, nmemb=%d, flag=%d\n",
153 sock, port, buf, size, nmemb, flag); 142 sock, port, buf, size, nmemb, flag);
154 143
@@ -163,7 +152,7 @@ void pcc_ioread_word(int sock, unsigned long port, void *buf, size_t size,
163 printk("m32r_cfc:ioread_word null port :%#lx\n",port); 152 printk("m32r_cfc:ioread_word null port :%#lx\n",port);
164 return; 153 return;
165 } 154 }
166 debug(3, "m32r_cfc: pcc_ioread_word: addr=%#lx\n", addr); 155 pr_debug("m32r_cfc: pcc_ioread_word: addr=%#lx\n", addr);
167 156
168 spin_lock_irqsave(&pcc_lock, flags); 157 spin_lock_irqsave(&pcc_lock, flags);
169 /* read Word */ 158 /* read Word */
@@ -179,7 +168,7 @@ void pcc_iowrite_byte(int sock, unsigned long port, void *buf, size_t size,
179 unsigned char *bp = (unsigned char *)buf; 168 unsigned char *bp = (unsigned char *)buf;
180 unsigned long flags; 169 unsigned long flags;
181 170
182 debug(3, "m32r_cfc: pcc_iowrite_byte: sock=%d, port=%#lx, " 171 pr_debug("m32r_cfc: pcc_iowrite_byte: sock=%d, port=%#lx, "
183 "buf=%p, size=%u, nmemb=%d, flag=%d\n", 172 "buf=%p, size=%u, nmemb=%d, flag=%d\n",
184 sock, port, buf, size, nmemb, flag); 173 sock, port, buf, size, nmemb, flag);
185 174
@@ -189,7 +178,7 @@ void pcc_iowrite_byte(int sock, unsigned long port, void *buf, size_t size,
189 printk("m32r_cfc:iowrite_byte null port:%#lx\n",port); 178 printk("m32r_cfc:iowrite_byte null port:%#lx\n",port);
190 return; 179 return;
191 } 180 }
192 debug(3, "m32r_cfc: pcc_iowrite_byte: addr=%#lx\n", addr); 181 pr_debug("m32r_cfc: pcc_iowrite_byte: addr=%#lx\n", addr);
193 182
194 spin_lock_irqsave(&pcc_lock, flags); 183 spin_lock_irqsave(&pcc_lock, flags);
195 while (nmemb--) 184 while (nmemb--)
@@ -204,7 +193,7 @@ void pcc_iowrite_word(int sock, unsigned long port, void *buf, size_t size,
204 unsigned short *bp = (unsigned short *)buf; 193 unsigned short *bp = (unsigned short *)buf;
205 unsigned long flags; 194 unsigned long flags;
206 195
207 debug(3, "m32r_cfc: pcc_iowrite_word: sock=%d, port=%#lx, " 196 pr_debug("m32r_cfc: pcc_iowrite_word: sock=%d, port=%#lx, "
208 "buf=%p, size=%u, nmemb=%d, flag=%d\n", 197 "buf=%p, size=%u, nmemb=%d, flag=%d\n",
209 sock, port, buf, size, nmemb, flag); 198 sock, port, buf, size, nmemb, flag);
210 199
@@ -226,7 +215,7 @@ void pcc_iowrite_word(int sock, unsigned long port, void *buf, size_t size,
226 return; 215 return;
227 } 216 }
228#endif 217#endif
229 debug(3, "m32r_cfc: pcc_iowrite_word: addr=%#lx\n", addr); 218 pr_debug("m32r_cfc: pcc_iowrite_word: addr=%#lx\n", addr);
230 219
231 spin_lock_irqsave(&pcc_lock, flags); 220 spin_lock_irqsave(&pcc_lock, flags);
232 while (nmemb--) 221 while (nmemb--)
@@ -262,7 +251,7 @@ static struct timer_list poll_timer;
262static unsigned int pcc_get(u_short sock, unsigned int reg) 251static unsigned int pcc_get(u_short sock, unsigned int reg)
263{ 252{
264 unsigned int val = inw(reg); 253 unsigned int val = inw(reg);
265 debug(3, "m32r_cfc: pcc_get: reg(0x%08x)=0x%04x\n", reg, val); 254 pr_debug("m32r_cfc: pcc_get: reg(0x%08x)=0x%04x\n", reg, val);
266 return val; 255 return val;
267} 256}
268 257
@@ -270,7 +259,7 @@ static unsigned int pcc_get(u_short sock, unsigned int reg)
270static void pcc_set(u_short sock, unsigned int reg, unsigned int data) 259static void pcc_set(u_short sock, unsigned int reg, unsigned int data)
271{ 260{
272 outw(data, reg); 261 outw(data, reg);
273 debug(3, "m32r_cfc: pcc_set: reg(0x%08x)=0x%04x\n", reg, data); 262 pr_debug("m32r_cfc: pcc_set: reg(0x%08x)=0x%04x\n", reg, data);
274} 263}
275 264
276/*====================================================================== 265/*======================================================================
@@ -286,14 +275,14 @@ static int __init is_alive(u_short sock)
286{ 275{
287 unsigned int stat; 276 unsigned int stat;
288 277
289 debug(3, "m32r_cfc: is_alive:\n"); 278 pr_debug("m32r_cfc: is_alive:\n");
290 279
291 printk("CF: "); 280 printk("CF: ");
292 stat = pcc_get(sock, (unsigned int)PLD_CFSTS); 281 stat = pcc_get(sock, (unsigned int)PLD_CFSTS);
293 if (!stat) 282 if (!stat)
294 printk("No "); 283 printk("No ");
295 printk("Card is detected at socket %d : stat = 0x%08x\n", sock, stat); 284 printk("Card is detected at socket %d : stat = 0x%08x\n", sock, stat);
296 debug(3, "m32r_cfc: is_alive: sock stat is 0x%04x\n", stat); 285 pr_debug("m32r_cfc: is_alive: sock stat is 0x%04x\n", stat);
297 286
298 return 0; 287 return 0;
299} 288}
@@ -303,7 +292,7 @@ static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
303{ 292{
304 pcc_socket_t *t = &socket[pcc_sockets]; 293 pcc_socket_t *t = &socket[pcc_sockets];
305 294
306 debug(3, "m32r_cfc: add_pcc_socket: base=%#lx, irq=%d, " 295 pr_debug("m32r_cfc: add_pcc_socket: base=%#lx, irq=%d, "
307 "mapaddr=%#lx, ioaddr=%08x\n", 296 "mapaddr=%#lx, ioaddr=%08x\n",
308 base, irq, mapaddr, ioaddr); 297 base, irq, mapaddr, ioaddr);
309 298
@@ -358,7 +347,7 @@ static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
358 /* eject interrupt */ 347 /* eject interrupt */
359 request_irq(irq+1, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt); 348 request_irq(irq+1, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt);
360#endif 349#endif
361 debug(3, "m32r_cfc: enable CFMSK, RDYSEL\n"); 350 pr_debug("m32r_cfc: enable CFMSK, RDYSEL\n");
362 pcc_set(pcc_sockets, (unsigned int)PLD_CFIMASK, 0x01); 351 pcc_set(pcc_sockets, (unsigned int)PLD_CFIMASK, 0x01);
363#endif /* CONFIG_PLAT_USRV */ 352#endif /* CONFIG_PLAT_USRV */
364#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) 353#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT)
@@ -378,26 +367,26 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
378 u_int events = 0; 367 u_int events = 0;
379 int handled = 0; 368 int handled = 0;
380 369
381 debug(3, "m32r_cfc: pcc_interrupt: irq=%d, dev=%p\n", irq, dev); 370 pr_debug("m32r_cfc: pcc_interrupt: irq=%d, dev=%p\n", irq, dev);
382 for (i = 0; i < pcc_sockets; i++) { 371 for (i = 0; i < pcc_sockets; i++) {
383 if (socket[i].cs_irq1 != irq && socket[i].cs_irq2 != irq) 372 if (socket[i].cs_irq1 != irq && socket[i].cs_irq2 != irq)
384 continue; 373 continue;
385 374
386 handled = 1; 375 handled = 1;
387 debug(3, "m32r_cfc: pcc_interrupt: socket %d irq 0x%02x ", 376 pr_debug("m32r_cfc: pcc_interrupt: socket %d irq 0x%02x ",
388 i, irq); 377 i, irq);
389 events |= SS_DETECT; /* insert or eject */ 378 events |= SS_DETECT; /* insert or eject */
390 if (events) 379 if (events)
391 pcmcia_parse_events(&socket[i].socket, events); 380 pcmcia_parse_events(&socket[i].socket, events);
392 } 381 }
393 debug(3, "m32r_cfc: pcc_interrupt: done\n"); 382 pr_debug("m32r_cfc: pcc_interrupt: done\n");
394 383
395 return IRQ_RETVAL(handled); 384 return IRQ_RETVAL(handled);
396} /* pcc_interrupt */ 385} /* pcc_interrupt */
397 386
398static void pcc_interrupt_wrapper(u_long data) 387static void pcc_interrupt_wrapper(u_long data)
399{ 388{
400 debug(3, "m32r_cfc: pcc_interrupt_wrapper:\n"); 389 pr_debug("m32r_cfc: pcc_interrupt_wrapper:\n");
401 pcc_interrupt(0, NULL); 390 pcc_interrupt(0, NULL);
402 init_timer(&poll_timer); 391 init_timer(&poll_timer);
403 poll_timer.expires = jiffies + poll_interval; 392 poll_timer.expires = jiffies + poll_interval;
@@ -410,17 +399,17 @@ static int _pcc_get_status(u_short sock, u_int *value)
410{ 399{
411 u_int status; 400 u_int status;
412 401
413 debug(3, "m32r_cfc: _pcc_get_status:\n"); 402 pr_debug("m32r_cfc: _pcc_get_status:\n");
414 status = pcc_get(sock, (unsigned int)PLD_CFSTS); 403 status = pcc_get(sock, (unsigned int)PLD_CFSTS);
415 *value = (status) ? SS_DETECT : 0; 404 *value = (status) ? SS_DETECT : 0;
416 debug(3, "m32r_cfc: _pcc_get_status: status=0x%08x\n", status); 405 pr_debug("m32r_cfc: _pcc_get_status: status=0x%08x\n", status);
417 406
418#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) 407#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT)
419 if ( status ) { 408 if ( status ) {
420 /* enable CF power */ 409 /* enable CF power */
421 status = inw((unsigned int)PLD_CPCR); 410 status = inw((unsigned int)PLD_CPCR);
422 if (!(status & PLD_CPCR_CF)) { 411 if (!(status & PLD_CPCR_CF)) {
423 debug(3, "m32r_cfc: _pcc_get_status: " 412 pr_debug("m32r_cfc: _pcc_get_status: "
424 "power on (CPCR=0x%08x)\n", status); 413 "power on (CPCR=0x%08x)\n", status);
425 status |= PLD_CPCR_CF; 414 status |= PLD_CPCR_CF;
426 outw(status, (unsigned int)PLD_CPCR); 415 outw(status, (unsigned int)PLD_CPCR);
@@ -439,7 +428,7 @@ static int _pcc_get_status(u_short sock, u_int *value)
439 status &= ~PLD_CPCR_CF; 428 status &= ~PLD_CPCR_CF;
440 outw(status, (unsigned int)PLD_CPCR); 429 outw(status, (unsigned int)PLD_CPCR);
441 udelay(100); 430 udelay(100);
442 debug(3, "m32r_cfc: _pcc_get_status: " 431 pr_debug("m32r_cfc: _pcc_get_status: "
443 "power off (CPCR=0x%08x)\n", status); 432 "power off (CPCR=0x%08x)\n", status);
444 } 433 }
445#elif defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3) 434#elif defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3)
@@ -465,13 +454,13 @@ static int _pcc_get_status(u_short sock, u_int *value)
465 /* disable CF power */ 454 /* disable CF power */
466 pcc_set(sock, (unsigned int)PLD_CPCR, 0); 455 pcc_set(sock, (unsigned int)PLD_CPCR, 0);
467 udelay(100); 456 udelay(100);
468 debug(3, "m32r_cfc: _pcc_get_status: " 457 pr_debug("m32r_cfc: _pcc_get_status: "
469 "power off (CPCR=0x%08x)\n", status); 458 "power off (CPCR=0x%08x)\n", status);
470 } 459 }
471#else 460#else
472#error no platform configuration 461#error no platform configuration
473#endif 462#endif
474 debug(3, "m32r_cfc: _pcc_get_status: GetStatus(%d) = %#4.4x\n", 463 pr_debug("m32r_cfc: _pcc_get_status: GetStatus(%d) = %#4.4x\n",
475 sock, *value); 464 sock, *value);
476 return 0; 465 return 0;
477} /* _get_status */ 466} /* _get_status */
@@ -480,7 +469,7 @@ static int _pcc_get_status(u_short sock, u_int *value)
480 469
481static int _pcc_set_socket(u_short sock, socket_state_t *state) 470static int _pcc_set_socket(u_short sock, socket_state_t *state)
482{ 471{
483 debug(3, "m32r_cfc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 472 pr_debug("m32r_cfc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
484 "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags, 473 "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags,
485 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 474 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
486 475
@@ -492,41 +481,39 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state)
492 } 481 }
493#endif 482#endif
494 if (state->flags & SS_RESET) { 483 if (state->flags & SS_RESET) {
495 debug(3, ":RESET\n"); 484 pr_debug(":RESET\n");
496 pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x101); 485 pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x101);
497 }else{ 486 }else{
498 pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x100); 487 pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x100);
499 } 488 }
500 if (state->flags & SS_OUTPUT_ENA){ 489 if (state->flags & SS_OUTPUT_ENA){
501 debug(3, ":OUTPUT_ENA\n"); 490 pr_debug(":OUTPUT_ENA\n");
502 /* bit clear */ 491 /* bit clear */
503 pcc_set(sock,(unsigned int)PLD_CFBUFCR,0); 492 pcc_set(sock,(unsigned int)PLD_CFBUFCR,0);
504 } else { 493 } else {
505 pcc_set(sock,(unsigned int)PLD_CFBUFCR,1); 494 pcc_set(sock,(unsigned int)PLD_CFBUFCR,1);
506 } 495 }
507 496
508#ifdef CONFIG_PCMCIA_DEBUG
509 if(state->flags & SS_IOCARD){ 497 if(state->flags & SS_IOCARD){
510 debug(3, ":IOCARD"); 498 pr_debug(":IOCARD");
511 } 499 }
512 if (state->flags & SS_PWR_AUTO) { 500 if (state->flags & SS_PWR_AUTO) {
513 debug(3, ":PWR_AUTO"); 501 pr_debug(":PWR_AUTO");
514 } 502 }
515 if (state->csc_mask & SS_DETECT) 503 if (state->csc_mask & SS_DETECT)
516 debug(3, ":csc-SS_DETECT"); 504 pr_debug(":csc-SS_DETECT");
517 if (state->flags & SS_IOCARD) { 505 if (state->flags & SS_IOCARD) {
518 if (state->csc_mask & SS_STSCHG) 506 if (state->csc_mask & SS_STSCHG)
519 debug(3, ":STSCHG"); 507 pr_debug(":STSCHG");
520 } else { 508 } else {
521 if (state->csc_mask & SS_BATDEAD) 509 if (state->csc_mask & SS_BATDEAD)
522 debug(3, ":BATDEAD"); 510 pr_debug(":BATDEAD");
523 if (state->csc_mask & SS_BATWARN) 511 if (state->csc_mask & SS_BATWARN)
524 debug(3, ":BATWARN"); 512 pr_debug(":BATWARN");
525 if (state->csc_mask & SS_READY) 513 if (state->csc_mask & SS_READY)
526 debug(3, ":READY"); 514 pr_debug(":READY");
527 } 515 }
528 debug(3, "\n"); 516 pr_debug("\n");
529#endif
530 return 0; 517 return 0;
531} /* _set_socket */ 518} /* _set_socket */
532 519
@@ -536,7 +523,7 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io)
536{ 523{
537 u_char map; 524 u_char map;
538 525
539 debug(3, "m32r_cfc: SetIOMap(%d, %d, %#2.2x, %d ns, " 526 pr_debug("m32r_cfc: SetIOMap(%d, %d, %#2.2x, %d ns, "
540 "%#llx-%#llx)\n", sock, io->map, io->flags, 527 "%#llx-%#llx)\n", sock, io->map, io->flags,
541 io->speed, (unsigned long long)io->start, 528 io->speed, (unsigned long long)io->start,
542 (unsigned long long)io->stop); 529 (unsigned long long)io->stop);
@@ -554,7 +541,7 @@ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem)
554 u_long addr; 541 u_long addr;
555 pcc_socket_t *t = &socket[sock]; 542 pcc_socket_t *t = &socket[sock];
556 543
557 debug(3, "m32r_cfc: SetMemMap(%d, %d, %#2.2x, %d ns, " 544 pr_debug("m32r_cfc: SetMemMap(%d, %d, %#2.2x, %d ns, "
558 "%#llx, %#x)\n", sock, map, mem->flags, 545 "%#llx, %#x)\n", sock, map, mem->flags,
559 mem->speed, (unsigned long long)mem->static_start, 546 mem->speed, (unsigned long long)mem->static_start,
560 mem->card_start); 547 mem->card_start);
@@ -640,11 +627,11 @@ static int pcc_get_status(struct pcmcia_socket *s, u_int *value)
640 unsigned int sock = container_of(s, struct pcc_socket, socket)->number; 627 unsigned int sock = container_of(s, struct pcc_socket, socket)->number;
641 628
642 if (socket[sock].flags & IS_ALIVE) { 629 if (socket[sock].flags & IS_ALIVE) {
643 debug(3, "m32r_cfc: pcc_get_status: sock(%d) -EINVAL\n", sock); 630 dev_dbg(&s->dev, "pcc_get_status: sock(%d) -EINVAL\n", sock);
644 *value = 0; 631 *value = 0;
645 return -EINVAL; 632 return -EINVAL;
646 } 633 }
647 debug(3, "m32r_cfc: pcc_get_status: sock(%d)\n", sock); 634 dev_dbg(&s->dev, "pcc_get_status: sock(%d)\n", sock);
648 LOCKED(_pcc_get_status(sock, value)); 635 LOCKED(_pcc_get_status(sock, value));
649} 636}
650 637
@@ -653,10 +640,10 @@ static int pcc_set_socket(struct pcmcia_socket *s, socket_state_t *state)
653 unsigned int sock = container_of(s, struct pcc_socket, socket)->number; 640 unsigned int sock = container_of(s, struct pcc_socket, socket)->number;
654 641
655 if (socket[sock].flags & IS_ALIVE) { 642 if (socket[sock].flags & IS_ALIVE) {
656 debug(3, "m32r_cfc: pcc_set_socket: sock(%d) -EINVAL\n", sock); 643 dev_dbg(&s->dev, "pcc_set_socket: sock(%d) -EINVAL\n", sock);
657 return -EINVAL; 644 return -EINVAL;
658 } 645 }
659 debug(3, "m32r_cfc: pcc_set_socket: sock(%d)\n", sock); 646 dev_dbg(&s->dev, "pcc_set_socket: sock(%d)\n", sock);
660 LOCKED(_pcc_set_socket(sock, state)); 647 LOCKED(_pcc_set_socket(sock, state));
661} 648}
662 649
@@ -665,10 +652,10 @@ static int pcc_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
665 unsigned int sock = container_of(s, struct pcc_socket, socket)->number; 652 unsigned int sock = container_of(s, struct pcc_socket, socket)->number;
666 653
667 if (socket[sock].flags & IS_ALIVE) { 654 if (socket[sock].flags & IS_ALIVE) {
668 debug(3, "m32r_cfc: pcc_set_io_map: sock(%d) -EINVAL\n", sock); 655 dev_dbg(&s->dev, "pcc_set_io_map: sock(%d) -EINVAL\n", sock);
669 return -EINVAL; 656 return -EINVAL;
670 } 657 }
671 debug(3, "m32r_cfc: pcc_set_io_map: sock(%d)\n", sock); 658 dev_dbg(&s->dev, "pcc_set_io_map: sock(%d)\n", sock);
672 LOCKED(_pcc_set_io_map(sock, io)); 659 LOCKED(_pcc_set_io_map(sock, io));
673} 660}
674 661
@@ -677,16 +664,16 @@ static int pcc_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *mem)
677 unsigned int sock = container_of(s, struct pcc_socket, socket)->number; 664 unsigned int sock = container_of(s, struct pcc_socket, socket)->number;
678 665
679 if (socket[sock].flags & IS_ALIVE) { 666 if (socket[sock].flags & IS_ALIVE) {
680 debug(3, "m32r_cfc: pcc_set_mem_map: sock(%d) -EINVAL\n", sock); 667 dev_dbg(&s->dev, "pcc_set_mem_map: sock(%d) -EINVAL\n", sock);
681 return -EINVAL; 668 return -EINVAL;
682 } 669 }
683 debug(3, "m32r_cfc: pcc_set_mem_map: sock(%d)\n", sock); 670 dev_dbg(&s->dev, "pcc_set_mem_map: sock(%d)\n", sock);
684 LOCKED(_pcc_set_mem_map(sock, mem)); 671 LOCKED(_pcc_set_mem_map(sock, mem));
685} 672}
686 673
687static int pcc_init(struct pcmcia_socket *s) 674static int pcc_init(struct pcmcia_socket *s)
688{ 675{
689 debug(3, "m32r_cfc: pcc_init()\n"); 676 dev_dbg(&s->dev, "pcc_init()\n");
690 return 0; 677 return 0;
691} 678}
692 679
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index c6524f99ccc3..72844c5a6d05 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -45,16 +45,6 @@
45 45
46#define PCC_DEBUG_DBEX 46#define PCC_DEBUG_DBEX
47 47
48#ifdef CONFIG_PCMCIA_DEBUG
49static int m32r_pcc_debug;
50module_param(m32r_pcc_debug, int, 0644);
51#define debug(lvl, fmt, arg...) do { \
52 if (m32r_pcc_debug > (lvl)) \
53 printk(KERN_DEBUG "m32r_pcc: " fmt , ## arg); \
54} while (0)
55#else
56#define debug(n, args...) do { } while (0)
57#endif
58 48
59/* Poll status interval -- 0 means default to interrupt */ 49/* Poll status interval -- 0 means default to interrupt */
60static int poll_interval = 0; 50static int poll_interval = 0;
@@ -358,7 +348,7 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
358 u_int events, active; 348 u_int events, active;
359 int handled = 0; 349 int handled = 0;
360 350
361 debug(4, "m32r: pcc_interrupt(%d)\n", irq); 351 pr_debug("m32r_pcc: pcc_interrupt(%d)\n", irq);
362 352
363 for (j = 0; j < 20; j++) { 353 for (j = 0; j < 20; j++) {
364 active = 0; 354 active = 0;
@@ -369,13 +359,14 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
369 handled = 1; 359 handled = 1;
370 irc = pcc_get(i, PCIRC); 360 irc = pcc_get(i, PCIRC);
371 irc >>=16; 361 irc >>=16;
372 debug(2, "m32r-pcc:interrupt: socket %d pcirc 0x%02x ", i, irc); 362 pr_debug("m32r_pcc: interrupt: socket %d pcirc 0x%02x ",
363 i, irc);
373 if (!irc) 364 if (!irc)
374 continue; 365 continue;
375 366
376 events = (irc) ? SS_DETECT : 0; 367 events = (irc) ? SS_DETECT : 0;
377 events |= (pcc_get(i,PCCR) & PCCR_PCEN) ? SS_READY : 0; 368 events |= (pcc_get(i,PCCR) & PCCR_PCEN) ? SS_READY : 0;
378 debug(2, " event 0x%02x\n", events); 369 pr_debug("m32r_pcc: event 0x%02x\n", events);
379 370
380 if (events) 371 if (events)
381 pcmcia_parse_events(&socket[i].socket, events); 372 pcmcia_parse_events(&socket[i].socket, events);
@@ -388,7 +379,7 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
388 if (j == 20) 379 if (j == 20)
389 printk(KERN_NOTICE "m32r-pcc: infinite loop in interrupt handler\n"); 380 printk(KERN_NOTICE "m32r-pcc: infinite loop in interrupt handler\n");
390 381
391 debug(4, "m32r-pcc: interrupt done\n"); 382 pr_debug("m32r_pcc: interrupt done\n");
392 383
393 return IRQ_RETVAL(handled); 384 return IRQ_RETVAL(handled);
394} /* pcc_interrupt */ 385} /* pcc_interrupt */
@@ -422,7 +413,7 @@ static int _pcc_get_status(u_short sock, u_int *value)
422 status = pcc_get(sock,PCCSIGCR); 413 status = pcc_get(sock,PCCSIGCR);
423 *value |= (status & PCCSIGCR_VEN) ? SS_POWERON : 0; 414 *value |= (status & PCCSIGCR_VEN) ? SS_POWERON : 0;
424 415
425 debug(3, "m32r-pcc: GetStatus(%d) = %#4.4x\n", sock, *value); 416 pr_debug("m32r_pcc: GetStatus(%d) = %#4.4x\n", sock, *value);
426 return 0; 417 return 0;
427} /* _get_status */ 418} /* _get_status */
428 419
@@ -432,7 +423,7 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state)
432{ 423{
433 u_long reg = 0; 424 u_long reg = 0;
434 425
435 debug(3, "m32r-pcc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 426 pr_debug("m32r_pcc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
436 "io_irq %d, csc_mask %#2.2x)", sock, state->flags, 427 "io_irq %d, csc_mask %#2.2x)", sock, state->flags,
437 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 428 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
438 429
@@ -448,11 +439,11 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state)
448 } 439 }
449 440
450 if (state->flags & SS_RESET) { 441 if (state->flags & SS_RESET) {
451 debug(3, ":RESET\n"); 442 pr_debug("m32r_pcc: :RESET\n");
452 reg |= PCCSIGCR_CRST; 443 reg |= PCCSIGCR_CRST;
453 } 444 }
454 if (state->flags & SS_OUTPUT_ENA){ 445 if (state->flags & SS_OUTPUT_ENA){
455 debug(3, ":OUTPUT_ENA\n"); 446 pr_debug("m32r_pcc: :OUTPUT_ENA\n");
456 /* bit clear */ 447 /* bit clear */
457 } else { 448 } else {
458 reg |= PCCSIGCR_SEN; 449 reg |= PCCSIGCR_SEN;
@@ -460,28 +451,26 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state)
460 451
461 pcc_set(sock,PCCSIGCR,reg); 452 pcc_set(sock,PCCSIGCR,reg);
462 453
463#ifdef CONFIG_PCMCIA_DEBUG
464 if(state->flags & SS_IOCARD){ 454 if(state->flags & SS_IOCARD){
465 debug(3, ":IOCARD"); 455 pr_debug("m32r_pcc: :IOCARD");
466 } 456 }
467 if (state->flags & SS_PWR_AUTO) { 457 if (state->flags & SS_PWR_AUTO) {
468 debug(3, ":PWR_AUTO"); 458 pr_debug("m32r_pcc: :PWR_AUTO");
469 } 459 }
470 if (state->csc_mask & SS_DETECT) 460 if (state->csc_mask & SS_DETECT)
471 debug(3, ":csc-SS_DETECT"); 461 pr_debug("m32r_pcc: :csc-SS_DETECT");
472 if (state->flags & SS_IOCARD) { 462 if (state->flags & SS_IOCARD) {
473 if (state->csc_mask & SS_STSCHG) 463 if (state->csc_mask & SS_STSCHG)
474 debug(3, ":STSCHG"); 464 pr_debug("m32r_pcc: :STSCHG");
475 } else { 465 } else {
476 if (state->csc_mask & SS_BATDEAD) 466 if (state->csc_mask & SS_BATDEAD)
477 debug(3, ":BATDEAD"); 467 pr_debug("m32r_pcc: :BATDEAD");
478 if (state->csc_mask & SS_BATWARN) 468 if (state->csc_mask & SS_BATWARN)
479 debug(3, ":BATWARN"); 469 pr_debug("m32r_pcc: :BATWARN");
480 if (state->csc_mask & SS_READY) 470 if (state->csc_mask & SS_READY)
481 debug(3, ":READY"); 471 pr_debug("m32r_pcc: :READY");
482 } 472 }
483 debug(3, "\n"); 473 pr_debug("m32r_pcc: \n");
484#endif
485 return 0; 474 return 0;
486} /* _set_socket */ 475} /* _set_socket */
487 476
@@ -491,7 +480,7 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io)
491{ 480{
492 u_char map; 481 u_char map;
493 482
494 debug(3, "m32r-pcc: SetIOMap(%d, %d, %#2.2x, %d ns, " 483 pr_debug("m32r_pcc: SetIOMap(%d, %d, %#2.2x, %d ns, "
495 "%#llx-%#llx)\n", sock, io->map, io->flags, 484 "%#llx-%#llx)\n", sock, io->map, io->flags,
496 io->speed, (unsigned long long)io->start, 485 io->speed, (unsigned long long)io->start,
497 (unsigned long long)io->stop); 486 (unsigned long long)io->stop);
@@ -515,7 +504,7 @@ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem)
515#endif 504#endif
516#endif 505#endif
517 506
518 debug(3, "m32r-pcc: SetMemMap(%d, %d, %#2.2x, %d ns, " 507 pr_debug("m32r_pcc: SetMemMap(%d, %d, %#2.2x, %d ns, "
519 "%#llx, %#x)\n", sock, map, mem->flags, 508 "%#llx, %#x)\n", sock, map, mem->flags,
520 mem->speed, (unsigned long long)mem->static_start, 509 mem->speed, (unsigned long long)mem->static_start,
521 mem->card_start); 510 mem->card_start);
@@ -662,7 +651,7 @@ static int pcc_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *mem)
662 651
663static int pcc_init(struct pcmcia_socket *s) 652static int pcc_init(struct pcmcia_socket *s)
664{ 653{
665 debug(4, "m32r-pcc: init call\n"); 654 pr_debug("m32r_pcc: init call\n");
666 return 0; 655 return 0;
667} 656}
668 657
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 403559ba49dd..7f79c4e169ae 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -64,14 +64,6 @@
64#include <pcmcia/cs.h> 64#include <pcmcia/cs.h>
65#include <pcmcia/ss.h> 65#include <pcmcia/ss.h>
66 66
67#ifdef CONFIG_PCMCIA_DEBUG
68static int pc_debug;
69module_param(pc_debug, int, 0);
70#define dprintk(args...) printk(KERN_DEBUG "m8xx_pcmcia: " args);
71#else
72#define dprintk(args...)
73#endif
74
75#define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args) 67#define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args)
76#define pcmcia_error(args...) printk(KERN_ERR "m8xx_pcmcia: "args) 68#define pcmcia_error(args...) printk(KERN_ERR "m8xx_pcmcia: "args)
77 69
@@ -565,7 +557,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
565 unsigned int i, events, pscr, pipr, per; 557 unsigned int i, events, pscr, pipr, per;
566 pcmconf8xx_t *pcmcia = socket[0].pcmcia; 558 pcmconf8xx_t *pcmcia = socket[0].pcmcia;
567 559
568 dprintk("Interrupt!\n"); 560 pr_debug("m8xx_pcmcia: Interrupt!\n");
569 /* get interrupt sources */ 561 /* get interrupt sources */
570 562
571 pscr = in_be32(&pcmcia->pcmc_pscr); 563 pscr = in_be32(&pcmcia->pcmc_pscr);
@@ -614,7 +606,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
614 606
615 /* call the handler */ 607 /* call the handler */
616 608
617 dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, " 609 pr_debug("m8xx_pcmcia: slot %u: events = 0x%02x, pscr = 0x%08x, "
618 "pipr = 0x%08x\n", i, events, pscr, pipr); 610 "pipr = 0x%08x\n", i, events, pscr, pipr);
619 611
620 if (events) { 612 if (events) {
@@ -641,7 +633,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
641 /* clear the interrupt sources */ 633 /* clear the interrupt sources */
642 out_be32(&pcmcia->pcmc_pscr, pscr); 634 out_be32(&pcmcia->pcmc_pscr, pscr);
643 635
644 dprintk("Interrupt done.\n"); 636 pr_debug("m8xx_pcmcia: Interrupt done.\n");
645 637
646 return IRQ_HANDLED; 638 return IRQ_HANDLED;
647} 639}
@@ -815,7 +807,7 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
815 }; 807 };
816 } 808 }
817 809
818 dprintk("GetStatus(%d) = %#2.2x\n", lsock, *value); 810 pr_debug("m8xx_pcmcia: GetStatus(%d) = %#2.2x\n", lsock, *value);
819 return 0; 811 return 0;
820} 812}
821 813
@@ -828,7 +820,7 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state)
828 unsigned long flags; 820 unsigned long flags;
829 pcmconf8xx_t *pcmcia = socket[0].pcmcia; 821 pcmconf8xx_t *pcmcia = socket[0].pcmcia;
830 822
831 dprintk("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 823 pr_debug("m8xx_pcmcia: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
832 "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags, 824 "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
833 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 825 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
834 826
@@ -974,7 +966,7 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
974#define M8XX_SIZE (io->stop - io->start + 1) 966#define M8XX_SIZE (io->stop - io->start + 1)
975#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start) 967#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start)
976 968
977 dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, " 969 pr_debug("m8xx_pcmcia: SetIOMap(%d, %d, %#2.2x, %d ns, "
978 "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags, 970 "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags,
979 io->speed, (unsigned long long)io->start, 971 io->speed, (unsigned long long)io->start,
980 (unsigned long long)io->stop); 972 (unsigned long long)io->stop);
@@ -988,7 +980,7 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
988 980
989 if (io->flags & MAP_ACTIVE) { 981 if (io->flags & MAP_ACTIVE) {
990 982
991 dprintk("io->flags & MAP_ACTIVE\n"); 983 pr_debug("m8xx_pcmcia: io->flags & MAP_ACTIVE\n");
992 984
993 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) 985 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
994 + (lsock * PCMCIA_IO_WIN_NO) + io->map; 986 + (lsock * PCMCIA_IO_WIN_NO) + io->map;
@@ -1018,8 +1010,8 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
1018 1010
1019 out_be32(&w->or, reg); 1011 out_be32(&w->or, reg);
1020 1012
1021 dprintk("Socket %u: Mapped io window %u at %#8.8x, " 1013 pr_debug("m8xx_pcmcia: Socket %u: Mapped io window %u at "
1022 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); 1014 "%#8.8x, OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
1023 } else { 1015 } else {
1024 /* shutdown IO window */ 1016 /* shutdown IO window */
1025 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) 1017 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
@@ -1033,14 +1025,14 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
1033 out_be32(&w->or, 0); /* turn off window */ 1025 out_be32(&w->or, 0); /* turn off window */
1034 out_be32(&w->br, 0); /* turn off base address */ 1026 out_be32(&w->br, 0); /* turn off base address */
1035 1027
1036 dprintk("Socket %u: Unmapped io window %u at %#8.8x, " 1028 pr_debug("m8xx_pcmcia: Socket %u: Unmapped io window %u at "
1037 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); 1029 "%#8.8x, OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
1038 } 1030 }
1039 1031
1040 /* copy the struct and modify the copy */ 1032 /* copy the struct and modify the copy */
1041 s->io_win[io->map] = *io; 1033 s->io_win[io->map] = *io;
1042 s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE); 1034 s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
1043 dprintk("SetIOMap exit\n"); 1035 pr_debug("m8xx_pcmcia: SetIOMap exit\n");
1044 1036
1045 return 0; 1037 return 0;
1046} 1038}
@@ -1055,7 +1047,7 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock,
1055 unsigned int reg, winnr; 1047 unsigned int reg, winnr;
1056 pcmconf8xx_t *pcmcia = s->pcmcia; 1048 pcmconf8xx_t *pcmcia = s->pcmcia;
1057 1049
1058 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " 1050 pr_debug("m8xx_pcmcia: SetMemMap(%d, %d, %#2.2x, %d ns, "
1059 "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, 1051 "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags,
1060 mem->speed, (unsigned long long)mem->static_start, 1052 mem->speed, (unsigned long long)mem->static_start,
1061 mem->card_start); 1053 mem->card_start);
@@ -1098,7 +1090,7 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock,
1098 1090
1099 out_be32(&w->or, reg); 1091 out_be32(&w->or, reg);
1100 1092
1101 dprintk("Socket %u: Mapped memory window %u at %#8.8x, " 1093 pr_debug("m8xx_pcmcia: Socket %u: Mapped memory window %u at %#8.8x, "
1102 "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or); 1094 "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
1103 1095
1104 if (mem->flags & MAP_ACTIVE) { 1096 if (mem->flags & MAP_ACTIVE) {
@@ -1108,7 +1100,7 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock,
1108 + mem->card_start; 1100 + mem->card_start;
1109 } 1101 }
1110 1102
1111 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " 1103 pr_debug("m8xx_pcmcia: SetMemMap(%d, %d, %#2.2x, %d ns, "
1112 "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, 1104 "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags,
1113 mem->speed, (unsigned long long)mem->static_start, 1105 mem->speed, (unsigned long long)mem->static_start,
1114 mem->card_start); 1106 mem->card_start);
@@ -1129,7 +1121,7 @@ static int m8xx_sock_init(struct pcmcia_socket *sock)
1129 pccard_io_map io = { 0, 0, 0, 0, 1 }; 1121 pccard_io_map io = { 0, 0, 0, 0, 1 };
1130 pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 }; 1122 pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
1131 1123
1132 dprintk("sock_init(%d)\n", s); 1124 pr_debug("m8xx_pcmcia: sock_init(%d)\n", s);
1133 1125
1134 m8xx_set_socket(sock, &dead_socket); 1126 m8xx_set_socket(sock, &dead_socket);
1135 for (i = 0; i < PCMCIA_IO_WIN_NO; i++) { 1127 for (i = 0; i < PCMCIA_IO_WIN_NO; i++) {
diff --git a/drivers/pcmcia/o2micro.h b/drivers/pcmcia/o2micro.h
index 72188c462c9c..624442fc0d35 100644
--- a/drivers/pcmcia/o2micro.h
+++ b/drivers/pcmcia/o2micro.h
@@ -30,28 +30,6 @@
30#ifndef _LINUX_O2MICRO_H 30#ifndef _LINUX_O2MICRO_H
31#define _LINUX_O2MICRO_H 31#define _LINUX_O2MICRO_H
32 32
33#ifndef PCI_VENDOR_ID_O2
34#define PCI_VENDOR_ID_O2 0x1217
35#endif
36#ifndef PCI_DEVICE_ID_O2_6729
37#define PCI_DEVICE_ID_O2_6729 0x6729
38#endif
39#ifndef PCI_DEVICE_ID_O2_6730
40#define PCI_DEVICE_ID_O2_6730 0x673a
41#endif
42#ifndef PCI_DEVICE_ID_O2_6832
43#define PCI_DEVICE_ID_O2_6832 0x6832
44#endif
45#ifndef PCI_DEVICE_ID_O2_6836
46#define PCI_DEVICE_ID_O2_6836 0x6836
47#endif
48#ifndef PCI_DEVICE_ID_O2_6812
49#define PCI_DEVICE_ID_O2_6812 0x6872
50#endif
51#ifndef PCI_DEVICE_ID_O2_6933
52#define PCI_DEVICE_ID_O2_6933 0x6933
53#endif
54
55/* Additional PCI configuration registers */ 33/* Additional PCI configuration registers */
56 34
57#define O2_MUX_CONTROL 0x90 /* 32 bit */ 35#define O2_MUX_CONTROL 0x90 /* 32 bit */
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 30cf71d2ee23..c4d7908fa37f 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -58,17 +58,6 @@ typedef struct user_info_t {
58} user_info_t; 58} user_info_t;
59 59
60 60
61#ifdef CONFIG_PCMCIA_DEBUG
62extern int ds_pc_debug;
63
64#define ds_dbg(lvl, fmt, arg...) do { \
65 if (ds_pc_debug >= lvl) \
66 printk(KERN_DEBUG "ds: " fmt , ## arg); \
67} while (0)
68#else
69#define ds_dbg(lvl, fmt, arg...) do { } while (0)
70#endif
71
72static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s, 61static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s,
73 unsigned int function) 62 unsigned int function)
74{ 63{
@@ -229,6 +218,61 @@ static int pcmcia_adjust_resource_info(adjust_t *adj)
229 return (ret); 218 return (ret);
230} 219}
231 220
221
222/** pcmcia_get_window
223 */
224static int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *wh_out,
225 window_handle_t wh, win_req_t *req)
226{
227 pccard_mem_map *win;
228 window_handle_t w;
229
230 wh--;
231 if (!s || !(s->state & SOCKET_PRESENT))
232 return -ENODEV;
233 if (wh >= MAX_WIN)
234 return -EINVAL;
235 for (w = wh; w < MAX_WIN; w++)
236 if (s->state & SOCKET_WIN_REQ(w))
237 break;
238 if (w == MAX_WIN)
239 return -EINVAL;
240 win = &s->win[w];
241 req->Base = win->res->start;
242 req->Size = win->res->end - win->res->start + 1;
243 req->AccessSpeed = win->speed;
244 req->Attributes = 0;
245 if (win->flags & MAP_ATTRIB)
246 req->Attributes |= WIN_MEMORY_TYPE_AM;
247 if (win->flags & MAP_ACTIVE)
248 req->Attributes |= WIN_ENABLE;
249 if (win->flags & MAP_16BIT)
250 req->Attributes |= WIN_DATA_WIDTH_16;
251 if (win->flags & MAP_USE_WAIT)
252 req->Attributes |= WIN_USE_WAIT;
253
254 *wh_out = w + 1;
255 return 0;
256} /* pcmcia_get_window */
257
258
259/** pcmcia_get_mem_page
260 *
261 * Change the card address of an already open memory window.
262 */
263static int pcmcia_get_mem_page(struct pcmcia_socket *skt, window_handle_t wh,
264 memreq_t *req)
265{
266 wh--;
267 if (wh >= MAX_WIN)
268 return -EINVAL;
269
270 req->Page = 0;
271 req->CardOffset = skt->win[wh].card_start;
272 return 0;
273} /* pcmcia_get_mem_page */
274
275
232/** pccard_get_status 276/** pccard_get_status
233 * 277 *
234 * Get the current socket state bits. We don't support the latched 278 * Get the current socket state bits. We don't support the latched
@@ -431,7 +475,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
431 if (!s) 475 if (!s)
432 return -EINVAL; 476 return -EINVAL;
433 477
434 ds_dbg(2, "bind_request(%d, '%s')\n", s->sock, 478 pr_debug("bind_request(%d, '%s')\n", s->sock,
435 (char *)bind_info->dev_info); 479 (char *)bind_info->dev_info);
436 480
437 p_drv = get_pcmcia_driver(&bind_info->dev_info); 481 p_drv = get_pcmcia_driver(&bind_info->dev_info);
@@ -623,7 +667,7 @@ static int ds_open(struct inode *inode, struct file *file)
623 static int warning_printed = 0; 667 static int warning_printed = 0;
624 int ret = 0; 668 int ret = 0;
625 669
626 ds_dbg(0, "ds_open(socket %d)\n", i); 670 pr_debug("ds_open(socket %d)\n", i);
627 671
628 lock_kernel(); 672 lock_kernel();
629 s = pcmcia_get_socket_by_nr(i); 673 s = pcmcia_get_socket_by_nr(i);
@@ -685,7 +729,7 @@ static int ds_release(struct inode *inode, struct file *file)
685 struct pcmcia_socket *s; 729 struct pcmcia_socket *s;
686 user_info_t *user, **link; 730 user_info_t *user, **link;
687 731
688 ds_dbg(0, "ds_release(socket %d)\n", iminor(inode)); 732 pr_debug("ds_release(socket %d)\n", iminor(inode));
689 733
690 user = file->private_data; 734 user = file->private_data;
691 if (CHECK_USER(user)) 735 if (CHECK_USER(user))
@@ -719,7 +763,7 @@ static ssize_t ds_read(struct file *file, char __user *buf,
719 user_info_t *user; 763 user_info_t *user;
720 int ret; 764 int ret;
721 765
722 ds_dbg(2, "ds_read(socket %d)\n", iminor(file->f_path.dentry->d_inode)); 766 pr_debug("ds_read(socket %d)\n", iminor(file->f_path.dentry->d_inode));
723 767
724 if (count < 4) 768 if (count < 4)
725 return -EINVAL; 769 return -EINVAL;
@@ -744,7 +788,7 @@ static ssize_t ds_read(struct file *file, char __user *buf,
744static ssize_t ds_write(struct file *file, const char __user *buf, 788static ssize_t ds_write(struct file *file, const char __user *buf,
745 size_t count, loff_t *ppos) 789 size_t count, loff_t *ppos)
746{ 790{
747 ds_dbg(2, "ds_write(socket %d)\n", iminor(file->f_path.dentry->d_inode)); 791 pr_debug("ds_write(socket %d)\n", iminor(file->f_path.dentry->d_inode));
748 792
749 if (count != 4) 793 if (count != 4)
750 return -EINVAL; 794 return -EINVAL;
@@ -762,7 +806,7 @@ static u_int ds_poll(struct file *file, poll_table *wait)
762 struct pcmcia_socket *s; 806 struct pcmcia_socket *s;
763 user_info_t *user; 807 user_info_t *user;
764 808
765 ds_dbg(2, "ds_poll(socket %d)\n", iminor(file->f_path.dentry->d_inode)); 809 pr_debug("ds_poll(socket %d)\n", iminor(file->f_path.dentry->d_inode));
766 810
767 user = file->private_data; 811 user = file->private_data;
768 if (CHECK_USER(user)) 812 if (CHECK_USER(user))
@@ -790,7 +834,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
790 ds_ioctl_arg_t *buf; 834 ds_ioctl_arg_t *buf;
791 user_info_t *user; 835 user_info_t *user;
792 836
793 ds_dbg(2, "ds_ioctl(socket %d, %#x, %#lx)\n", iminor(inode), cmd, arg); 837 pr_debug("ds_ioctl(socket %d, %#x, %#lx)\n", iminor(inode), cmd, arg);
794 838
795 user = file->private_data; 839 user = file->private_data;
796 if (CHECK_USER(user)) 840 if (CHECK_USER(user))
@@ -809,13 +853,13 @@ static int ds_ioctl(struct inode * inode, struct file * file,
809 853
810 if (cmd & IOC_IN) { 854 if (cmd & IOC_IN) {
811 if (!access_ok(VERIFY_READ, uarg, size)) { 855 if (!access_ok(VERIFY_READ, uarg, size)) {
812 ds_dbg(3, "ds_ioctl(): verify_read = %d\n", -EFAULT); 856 pr_debug("ds_ioctl(): verify_read = %d\n", -EFAULT);
813 return -EFAULT; 857 return -EFAULT;
814 } 858 }
815 } 859 }
816 if (cmd & IOC_OUT) { 860 if (cmd & IOC_OUT) {
817 if (!access_ok(VERIFY_WRITE, uarg, size)) { 861 if (!access_ok(VERIFY_WRITE, uarg, size)) {
818 ds_dbg(3, "ds_ioctl(): verify_write = %d\n", -EFAULT); 862 pr_debug("ds_ioctl(): verify_write = %d\n", -EFAULT);
819 return -EFAULT; 863 return -EFAULT;
820 } 864 }
821 } 865 }
@@ -927,15 +971,15 @@ static int ds_ioctl(struct inode * inode, struct file * file,
927 goto free_out; 971 goto free_out;
928 break; 972 break;
929 case DS_GET_FIRST_WINDOW: 973 case DS_GET_FIRST_WINDOW:
930 ret = pcmcia_get_window(s, &buf->win_info.handle, 0, 974 ret = pcmcia_get_window(s, &buf->win_info.handle, 1,
931 &buf->win_info.window); 975 &buf->win_info.window);
932 break; 976 break;
933 case DS_GET_NEXT_WINDOW: 977 case DS_GET_NEXT_WINDOW:
934 ret = pcmcia_get_window(s, &buf->win_info.handle, 978 ret = pcmcia_get_window(s, &buf->win_info.handle,
935 buf->win_info.handle->index + 1, &buf->win_info.window); 979 buf->win_info.handle + 1, &buf->win_info.window);
936 break; 980 break;
937 case DS_GET_MEM_PAGE: 981 case DS_GET_MEM_PAGE:
938 ret = pcmcia_get_mem_page(buf->win_info.handle, 982 ret = pcmcia_get_mem_page(s, buf->win_info.handle,
939 &buf->win_info.map); 983 &buf->win_info.map);
940 break; 984 break;
941 case DS_REPLACE_CIS: 985 case DS_REPLACE_CIS:
@@ -962,7 +1006,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
962 } 1006 }
963 1007
964 if ((err == 0) && (ret != 0)) { 1008 if ((err == 0) && (ret != 0)) {
965 ds_dbg(2, "ds_ioctl: ret = %d\n", ret); 1009 pr_debug("ds_ioctl: ret = %d\n", ret);
966 switch (ret) { 1010 switch (ret) {
967 case -ENODEV: 1011 case -ENODEV:
968 case -EINVAL: 1012 case -EINVAL:
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index d919e96c0afd..a8bf8c1b45ed 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/netdevice.h>
23 24
24#include <pcmcia/cs_types.h> 25#include <pcmcia/cs_types.h>
25#include <pcmcia/ss.h> 26#include <pcmcia/ss.h>
@@ -43,21 +44,6 @@ static u8 pcmcia_used_irq[NR_IRQS];
43#endif 44#endif
44 45
45 46
46#ifdef CONFIG_PCMCIA_DEBUG
47extern int ds_pc_debug;
48
49#define ds_dbg(skt, lvl, fmt, arg...) do { \
50 if (ds_pc_debug >= lvl) \
51 dev_printk(KERN_DEBUG, &skt->dev, \
52 "pcmcia_resource: " fmt, \
53 ## arg); \
54} while (0)
55#else
56#define ds_dbg(skt, lvl, fmt, arg...) do { } while (0)
57#endif
58
59
60
61/** alloc_io_space 47/** alloc_io_space
62 * 48 *
63 * Special stuff for managing IO windows, because they are scarce 49 * Special stuff for managing IO windows, because they are scarce
@@ -72,14 +58,14 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
72 align = (*base) ? (lines ? 1<<lines : 0) : 1; 58 align = (*base) ? (lines ? 1<<lines : 0) : 1;
73 if (align && (align < num)) { 59 if (align && (align < num)) {
74 if (*base) { 60 if (*base) {
75 ds_dbg(s, 0, "odd IO request: num %#x align %#x\n", 61 dev_dbg(&s->dev, "odd IO request: num %#x align %#x\n",
76 num, align); 62 num, align);
77 align = 0; 63 align = 0;
78 } else 64 } else
79 while (align && (align < num)) align <<= 1; 65 while (align && (align < num)) align <<= 1;
80 } 66 }
81 if (*base & ~(align-1)) { 67 if (*base & ~(align-1)) {
82 ds_dbg(s, 0, "odd IO request: base %#x align %#x\n", 68 dev_dbg(&s->dev, "odd IO request: base %#x align %#x\n",
83 *base, align); 69 *base, align);
84 align = 0; 70 align = 0;
85 } 71 }
@@ -173,8 +159,10 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
173 s = p_dev->socket; 159 s = p_dev->socket;
174 c = p_dev->function_config; 160 c = p_dev->function_config;
175 161
176 if (!(c->state & CONFIG_LOCKED)) 162 if (!(c->state & CONFIG_LOCKED)) {
163 dev_dbg(&s->dev, "Configuration isnt't locked\n");
177 return -EACCES; 164 return -EACCES;
165 }
178 166
179 addr = (c->ConfigBase + reg->Offset) >> 1; 167 addr = (c->ConfigBase + reg->Offset) >> 1;
180 168
@@ -188,6 +176,7 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
188 pcmcia_write_cis_mem(s, 1, addr, 1, &val); 176 pcmcia_write_cis_mem(s, 1, addr, 1, &val);
189 break; 177 break;
190 default: 178 default:
179 dev_dbg(&s->dev, "Invalid conf register request\n");
191 return -EINVAL; 180 return -EINVAL;
192 break; 181 break;
193 } 182 }
@@ -196,68 +185,21 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
196EXPORT_SYMBOL(pcmcia_access_configuration_register); 185EXPORT_SYMBOL(pcmcia_access_configuration_register);
197 186
198 187
199/** pcmcia_get_window 188int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
200 */ 189 memreq_t *req)
201int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle,
202 int idx, win_req_t *req)
203{
204 window_t *win;
205 int w;
206
207 if (!s || !(s->state & SOCKET_PRESENT))
208 return -ENODEV;
209 for (w = idx; w < MAX_WIN; w++)
210 if (s->state & SOCKET_WIN_REQ(w))
211 break;
212 if (w == MAX_WIN)
213 return -EINVAL;
214 win = &s->win[w];
215 req->Base = win->ctl.res->start;
216 req->Size = win->ctl.res->end - win->ctl.res->start + 1;
217 req->AccessSpeed = win->ctl.speed;
218 req->Attributes = 0;
219 if (win->ctl.flags & MAP_ATTRIB)
220 req->Attributes |= WIN_MEMORY_TYPE_AM;
221 if (win->ctl.flags & MAP_ACTIVE)
222 req->Attributes |= WIN_ENABLE;
223 if (win->ctl.flags & MAP_16BIT)
224 req->Attributes |= WIN_DATA_WIDTH_16;
225 if (win->ctl.flags & MAP_USE_WAIT)
226 req->Attributes |= WIN_USE_WAIT;
227 *handle = win;
228 return 0;
229} /* pcmcia_get_window */
230EXPORT_SYMBOL(pcmcia_get_window);
231
232
233/** pcmcia_get_mem_page
234 *
235 * Change the card address of an already open memory window.
236 */
237int pcmcia_get_mem_page(window_handle_t win, memreq_t *req)
238{ 190{
239 if ((win == NULL) || (win->magic != WINDOW_MAGIC)) 191 struct pcmcia_socket *s = p_dev->socket;
240 return -EINVAL;
241 req->Page = 0;
242 req->CardOffset = win->ctl.card_start;
243 return 0;
244} /* pcmcia_get_mem_page */
245EXPORT_SYMBOL(pcmcia_get_mem_page);
246
247 192
248int pcmcia_map_mem_page(window_handle_t win, memreq_t *req) 193 wh--;
249{ 194 if (wh >= MAX_WIN)
250 struct pcmcia_socket *s;
251 if ((win == NULL) || (win->magic != WINDOW_MAGIC))
252 return -EINVAL; 195 return -EINVAL;
253 s = win->sock;
254 if (req->Page != 0) { 196 if (req->Page != 0) {
255 ds_dbg(s, 0, "failure: requested page is zero\n"); 197 dev_dbg(&s->dev, "failure: requested page is zero\n");
256 return -EINVAL; 198 return -EINVAL;
257 } 199 }
258 win->ctl.card_start = req->CardOffset; 200 s->win[wh].card_start = req->CardOffset;
259 if (s->ops->set_mem_map(s, &win->ctl) != 0) { 201 if (s->ops->set_mem_map(s, &s->win[wh]) != 0) {
260 ds_dbg(s, 0, "failed to set_mem_map\n"); 202 dev_dbg(&s->dev, "failed to set_mem_map\n");
261 return -EIO; 203 return -EIO;
262 } 204 }
263 return 0; 205 return 0;
@@ -278,10 +220,14 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
278 s = p_dev->socket; 220 s = p_dev->socket;
279 c = p_dev->function_config; 221 c = p_dev->function_config;
280 222
281 if (!(s->state & SOCKET_PRESENT)) 223 if (!(s->state & SOCKET_PRESENT)) {
224 dev_dbg(&s->dev, "No card present\n");
282 return -ENODEV; 225 return -ENODEV;
283 if (!(c->state & CONFIG_LOCKED)) 226 }
227 if (!(c->state & CONFIG_LOCKED)) {
228 dev_dbg(&s->dev, "Configuration isnt't locked\n");
284 return -EACCES; 229 return -EACCES;
230 }
285 231
286 if (mod->Attributes & CONF_IRQ_CHANGE_VALID) { 232 if (mod->Attributes & CONF_IRQ_CHANGE_VALID) {
287 if (mod->Attributes & CONF_ENABLE_IRQ) { 233 if (mod->Attributes & CONF_ENABLE_IRQ) {
@@ -295,7 +241,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
295 } 241 }
296 242
297 if (mod->Attributes & CONF_VCC_CHANGE_VALID) { 243 if (mod->Attributes & CONF_VCC_CHANGE_VALID) {
298 ds_dbg(s, 0, "changing Vcc is not allowed at this time\n"); 244 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
299 return -EINVAL; 245 return -EINVAL;
300 } 246 }
301 247
@@ -303,7 +249,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
303 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && 249 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
304 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 250 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
305 if (mod->Vpp1 != mod->Vpp2) { 251 if (mod->Vpp1 != mod->Vpp2) {
306 ds_dbg(s, 0, "Vpp1 and Vpp2 must be the same\n"); 252 dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
307 return -EINVAL; 253 return -EINVAL;
308 } 254 }
309 s->socket.Vpp = mod->Vpp1; 255 s->socket.Vpp = mod->Vpp1;
@@ -314,7 +260,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
314 } 260 }
315 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || 261 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
316 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 262 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
317 ds_dbg(s, 0, "changing Vcc is not allowed at this time\n"); 263 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
318 return -EINVAL; 264 return -EINVAL;
319 } 265 }
320 266
@@ -425,11 +371,11 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
425 if (c->state & CONFIG_LOCKED) 371 if (c->state & CONFIG_LOCKED)
426 return -EACCES; 372 return -EACCES;
427 if (c->irq.Attributes != req->Attributes) { 373 if (c->irq.Attributes != req->Attributes) {
428 ds_dbg(s, 0, "IRQ attributes must match assigned ones\n"); 374 dev_dbg(&s->dev, "IRQ attributes must match assigned ones\n");
429 return -EINVAL; 375 return -EINVAL;
430 } 376 }
431 if (s->irq.AssignedIRQ != req->AssignedIRQ) { 377 if (s->irq.AssignedIRQ != req->AssignedIRQ) {
432 ds_dbg(s, 0, "IRQ must match assigned one\n"); 378 dev_dbg(&s->dev, "IRQ must match assigned one\n");
433 return -EINVAL; 379 return -EINVAL;
434 } 380 }
435 if (--s->irq.Config == 0) { 381 if (--s->irq.Config == 0) {
@@ -437,8 +383,8 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
437 s->irq.AssignedIRQ = 0; 383 s->irq.AssignedIRQ = 0;
438 } 384 }
439 385
440 if (req->Attributes & IRQ_HANDLE_PRESENT) { 386 if (req->Handler) {
441 free_irq(req->AssignedIRQ, req->Instance); 387 free_irq(req->AssignedIRQ, p_dev->priv);
442 } 388 }
443 389
444#ifdef CONFIG_PCMCIA_PROBE 390#ifdef CONFIG_PCMCIA_PROBE
@@ -449,30 +395,34 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
449} /* pcmcia_release_irq */ 395} /* pcmcia_release_irq */
450 396
451 397
452int pcmcia_release_window(window_handle_t win) 398int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh)
453{ 399{
454 struct pcmcia_socket *s; 400 struct pcmcia_socket *s = p_dev->socket;
401 pccard_mem_map *win;
455 402
456 if ((win == NULL) || (win->magic != WINDOW_MAGIC)) 403 wh--;
404 if (wh >= MAX_WIN)
457 return -EINVAL; 405 return -EINVAL;
458 s = win->sock; 406
459 if (!(win->handle->_win & CLIENT_WIN_REQ(win->index))) 407 win = &s->win[wh];
408
409 if (!(p_dev->_win & CLIENT_WIN_REQ(wh))) {
410 dev_dbg(&s->dev, "not releasing unknown window\n");
460 return -EINVAL; 411 return -EINVAL;
412 }
461 413
462 /* Shut down memory window */ 414 /* Shut down memory window */
463 win->ctl.flags &= ~MAP_ACTIVE; 415 win->flags &= ~MAP_ACTIVE;
464 s->ops->set_mem_map(s, &win->ctl); 416 s->ops->set_mem_map(s, win);
465 s->state &= ~SOCKET_WIN_REQ(win->index); 417 s->state &= ~SOCKET_WIN_REQ(wh);
466 418
467 /* Release system memory */ 419 /* Release system memory */
468 if (win->ctl.res) { 420 if (win->res) {
469 release_resource(win->ctl.res); 421 release_resource(win->res);
470 kfree(win->ctl.res); 422 kfree(win->res);
471 win->ctl.res = NULL; 423 win->res = NULL;
472 } 424 }
473 win->handle->_win &= ~CLIENT_WIN_REQ(win->index); 425 p_dev->_win &= ~CLIENT_WIN_REQ(wh);
474
475 win->magic = 0;
476 426
477 return 0; 427 return 0;
478} /* pcmcia_release_window */ 428} /* pcmcia_release_window */
@@ -492,12 +442,14 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
492 return -ENODEV; 442 return -ENODEV;
493 443
494 if (req->IntType & INT_CARDBUS) { 444 if (req->IntType & INT_CARDBUS) {
495 ds_dbg(p_dev->socket, 0, "IntType may not be INT_CARDBUS\n"); 445 dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n");
496 return -EINVAL; 446 return -EINVAL;
497 } 447 }
498 c = p_dev->function_config; 448 c = p_dev->function_config;
499 if (c->state & CONFIG_LOCKED) 449 if (c->state & CONFIG_LOCKED) {
450 dev_dbg(&s->dev, "Configuration is locked\n");
500 return -EACCES; 451 return -EACCES;
452 }
501 453
502 /* Do power control. We don't allow changes in Vcc. */ 454 /* Do power control. We don't allow changes in Vcc. */
503 s->socket.Vpp = req->Vpp; 455 s->socket.Vpp = req->Vpp;
@@ -609,40 +561,44 @@ int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req)
609 struct pcmcia_socket *s = p_dev->socket; 561 struct pcmcia_socket *s = p_dev->socket;
610 config_t *c; 562 config_t *c;
611 563
612 if (!(s->state & SOCKET_PRESENT)) 564 if (!(s->state & SOCKET_PRESENT)) {
565 dev_dbg(&s->dev, "No card present\n");
613 return -ENODEV; 566 return -ENODEV;
567 }
614 568
615 if (!req) 569 if (!req)
616 return -EINVAL; 570 return -EINVAL;
617 c = p_dev->function_config; 571 c = p_dev->function_config;
618 if (c->state & CONFIG_LOCKED) 572 if (c->state & CONFIG_LOCKED) {
573 dev_dbg(&s->dev, "Configuration is locked\n");
619 return -EACCES; 574 return -EACCES;
575 }
620 if (c->state & CONFIG_IO_REQ) { 576 if (c->state & CONFIG_IO_REQ) {
621 ds_dbg(s, 0, "IO already configured\n"); 577 dev_dbg(&s->dev, "IO already configured\n");
622 return -EBUSY; 578 return -EBUSY;
623 } 579 }
624 if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)) { 580 if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)) {
625 ds_dbg(s, 0, "bad attribute setting for IO region 1\n"); 581 dev_dbg(&s->dev, "bad attribute setting for IO region 1\n");
626 return -EINVAL; 582 return -EINVAL;
627 } 583 }
628 if ((req->NumPorts2 > 0) && 584 if ((req->NumPorts2 > 0) &&
629 (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))) { 585 (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))) {
630 ds_dbg(s, 0, "bad attribute setting for IO region 2\n"); 586 dev_dbg(&s->dev, "bad attribute setting for IO region 2\n");
631 return -EINVAL; 587 return -EINVAL;
632 } 588 }
633 589
634 ds_dbg(s, 1, "trying to allocate resource 1\n"); 590 dev_dbg(&s->dev, "trying to allocate resource 1\n");
635 if (alloc_io_space(s, req->Attributes1, &req->BasePort1, 591 if (alloc_io_space(s, req->Attributes1, &req->BasePort1,
636 req->NumPorts1, req->IOAddrLines)) { 592 req->NumPorts1, req->IOAddrLines)) {
637 ds_dbg(s, 0, "allocation of resource 1 failed\n"); 593 dev_dbg(&s->dev, "allocation of resource 1 failed\n");
638 return -EBUSY; 594 return -EBUSY;
639 } 595 }
640 596
641 if (req->NumPorts2) { 597 if (req->NumPorts2) {
642 ds_dbg(s, 1, "trying to allocate resource 2\n"); 598 dev_dbg(&s->dev, "trying to allocate resource 2\n");
643 if (alloc_io_space(s, req->Attributes2, &req->BasePort2, 599 if (alloc_io_space(s, req->Attributes2, &req->BasePort2,
644 req->NumPorts2, req->IOAddrLines)) { 600 req->NumPorts2, req->IOAddrLines)) {
645 ds_dbg(s, 0, "allocation of resource 2 failed\n"); 601 dev_dbg(&s->dev, "allocation of resource 2 failed\n");
646 release_io_space(s, req->BasePort1, req->NumPorts1); 602 release_io_space(s, req->BasePort1, req->NumPorts1);
647 return -EBUSY; 603 return -EBUSY;
648 } 604 }
@@ -680,13 +636,17 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
680 int ret = -EINVAL, irq = 0; 636 int ret = -EINVAL, irq = 0;
681 int type; 637 int type;
682 638
683 if (!(s->state & SOCKET_PRESENT)) 639 if (!(s->state & SOCKET_PRESENT)) {
640 dev_dbg(&s->dev, "No card present\n");
684 return -ENODEV; 641 return -ENODEV;
642 }
685 c = p_dev->function_config; 643 c = p_dev->function_config;
686 if (c->state & CONFIG_LOCKED) 644 if (c->state & CONFIG_LOCKED) {
645 dev_dbg(&s->dev, "Configuration is locked\n");
687 return -EACCES; 646 return -EACCES;
647 }
688 if (c->state & CONFIG_IRQ_REQ) { 648 if (c->state & CONFIG_IRQ_REQ) {
689 ds_dbg(s, 0, "IRQ already configured\n"); 649 dev_dbg(&s->dev, "IRQ already configured\n");
690 return -EBUSY; 650 return -EBUSY;
691 } 651 }
692 652
@@ -704,7 +664,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
704 /* if the underlying IRQ infrastructure allows for it, only allocate 664 /* if the underlying IRQ infrastructure allows for it, only allocate
705 * the IRQ, but do not enable it 665 * the IRQ, but do not enable it
706 */ 666 */
707 if (!(req->Attributes & IRQ_HANDLE_PRESENT)) 667 if (!(req->Handler))
708 type |= IRQ_NOAUTOEN; 668 type |= IRQ_NOAUTOEN;
709#endif /* IRQ_NOAUTOEN */ 669#endif /* IRQ_NOAUTOEN */
710 670
@@ -714,7 +674,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
714 } else { 674 } else {
715 int try; 675 int try;
716 u32 mask = s->irq_mask; 676 u32 mask = s->irq_mask;
717 void *data = &p_dev->dev.driver; /* something unique to this device */ 677 void *data = p_dev; /* something unique to this device */
718 678
719 for (try = 0; try < 64; try++) { 679 for (try = 0; try < 64; try++) {
720 irq = try % 32; 680 irq = try % 32;
@@ -731,12 +691,12 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
731 * registering a dummy handle works, i.e. if the IRQ isn't 691 * registering a dummy handle works, i.e. if the IRQ isn't
732 * marked as used by the kernel resource management core */ 692 * marked as used by the kernel resource management core */
733 ret = request_irq(irq, 693 ret = request_irq(irq,
734 (req->Attributes & IRQ_HANDLE_PRESENT) ? req->Handler : test_action, 694 (req->Handler) ? req->Handler : test_action,
735 type, 695 type,
736 p_dev->devname, 696 p_dev->devname,
737 (req->Attributes & IRQ_HANDLE_PRESENT) ? req->Instance : data); 697 (req->Handler) ? p_dev->priv : data);
738 if (!ret) { 698 if (!ret) {
739 if (!(req->Attributes & IRQ_HANDLE_PRESENT)) 699 if (!req->Handler)
740 free_irq(irq, data); 700 free_irq(irq, data);
741 break; 701 break;
742 } 702 }
@@ -745,17 +705,22 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
745#endif 705#endif
746 /* only assign PCI irq if no IRQ already assigned */ 706 /* only assign PCI irq if no IRQ already assigned */
747 if (ret && !s->irq.AssignedIRQ) { 707 if (ret && !s->irq.AssignedIRQ) {
748 if (!s->pci_irq) 708 if (!s->pci_irq) {
709 dev_printk(KERN_INFO, &s->dev, "no IRQ found\n");
749 return ret; 710 return ret;
711 }
750 type = IRQF_SHARED; 712 type = IRQF_SHARED;
751 irq = s->pci_irq; 713 irq = s->pci_irq;
752 } 714 }
753 715
754 if (ret && (req->Attributes & IRQ_HANDLE_PRESENT)) { 716 if (ret && req->Handler) {
755 ret = request_irq(irq, req->Handler, type, 717 ret = request_irq(irq, req->Handler, type,
756 p_dev->devname, req->Instance); 718 p_dev->devname, p_dev->priv);
757 if (ret) 719 if (ret) {
720 dev_printk(KERN_INFO, &s->dev,
721 "request_irq() failed\n");
758 return ret; 722 return ret;
723 }
759 } 724 }
760 725
761 /* Make sure the fact the request type was overridden is passed back */ 726 /* Make sure the fact the request type was overridden is passed back */
@@ -787,17 +752,19 @@ EXPORT_SYMBOL(pcmcia_request_irq);
787 * Request_window() establishes a mapping between card memory space 752 * Request_window() establishes a mapping between card memory space
788 * and system memory space. 753 * and system memory space.
789 */ 754 */
790int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_handle_t *wh) 755int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_handle_t *wh)
791{ 756{
792 struct pcmcia_socket *s = (*p_dev)->socket; 757 struct pcmcia_socket *s = p_dev->socket;
793 window_t *win; 758 pccard_mem_map *win;
794 u_long align; 759 u_long align;
795 int w; 760 int w;
796 761
797 if (!(s->state & SOCKET_PRESENT)) 762 if (!(s->state & SOCKET_PRESENT)) {
763 dev_dbg(&s->dev, "No card present\n");
798 return -ENODEV; 764 return -ENODEV;
765 }
799 if (req->Attributes & (WIN_PAGED | WIN_SHARED)) { 766 if (req->Attributes & (WIN_PAGED | WIN_SHARED)) {
800 ds_dbg(s, 0, "bad attribute setting for iomem region\n"); 767 dev_dbg(&s->dev, "bad attribute setting for iomem region\n");
801 return -EINVAL; 768 return -EINVAL;
802 } 769 }
803 770
@@ -808,12 +775,12 @@ int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_h
808 (req->Attributes & WIN_STRICT_ALIGN)) ? 775 (req->Attributes & WIN_STRICT_ALIGN)) ?
809 req->Size : s->map_size); 776 req->Size : s->map_size);
810 if (req->Size & (s->map_size-1)) { 777 if (req->Size & (s->map_size-1)) {
811 ds_dbg(s, 0, "invalid map size\n"); 778 dev_dbg(&s->dev, "invalid map size\n");
812 return -EINVAL; 779 return -EINVAL;
813 } 780 }
814 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || 781 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
815 (req->Base & (align-1))) { 782 (req->Base & (align-1))) {
816 ds_dbg(s, 0, "invalid base address\n"); 783 dev_dbg(&s->dev, "invalid base address\n");
817 return -EINVAL; 784 return -EINVAL;
818 } 785 }
819 if (req->Base) 786 if (req->Base)
@@ -823,52 +790,48 @@ int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_h
823 for (w = 0; w < MAX_WIN; w++) 790 for (w = 0; w < MAX_WIN; w++)
824 if (!(s->state & SOCKET_WIN_REQ(w))) break; 791 if (!(s->state & SOCKET_WIN_REQ(w))) break;
825 if (w == MAX_WIN) { 792 if (w == MAX_WIN) {
826 ds_dbg(s, 0, "all windows are used already\n"); 793 dev_dbg(&s->dev, "all windows are used already\n");
827 return -EINVAL; 794 return -EINVAL;
828 } 795 }
829 796
830 win = &s->win[w]; 797 win = &s->win[w];
831 win->magic = WINDOW_MAGIC;
832 win->index = w;
833 win->handle = *p_dev;
834 win->sock = s;
835 798
836 if (!(s->features & SS_CAP_STATIC_MAP)) { 799 if (!(s->features & SS_CAP_STATIC_MAP)) {
837 win->ctl.res = pcmcia_find_mem_region(req->Base, req->Size, align, 800 win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
838 (req->Attributes & WIN_MAP_BELOW_1MB), s); 801 (req->Attributes & WIN_MAP_BELOW_1MB), s);
839 if (!win->ctl.res) { 802 if (!win->res) {
840 ds_dbg(s, 0, "allocating mem region failed\n"); 803 dev_dbg(&s->dev, "allocating mem region failed\n");
841 return -EINVAL; 804 return -EINVAL;
842 } 805 }
843 } 806 }
844 (*p_dev)->_win |= CLIENT_WIN_REQ(w); 807 p_dev->_win |= CLIENT_WIN_REQ(w);
845 808
846 /* Configure the socket controller */ 809 /* Configure the socket controller */
847 win->ctl.map = w+1; 810 win->map = w+1;
848 win->ctl.flags = 0; 811 win->flags = 0;
849 win->ctl.speed = req->AccessSpeed; 812 win->speed = req->AccessSpeed;
850 if (req->Attributes & WIN_MEMORY_TYPE) 813 if (req->Attributes & WIN_MEMORY_TYPE)
851 win->ctl.flags |= MAP_ATTRIB; 814 win->flags |= MAP_ATTRIB;
852 if (req->Attributes & WIN_ENABLE) 815 if (req->Attributes & WIN_ENABLE)
853 win->ctl.flags |= MAP_ACTIVE; 816 win->flags |= MAP_ACTIVE;
854 if (req->Attributes & WIN_DATA_WIDTH_16) 817 if (req->Attributes & WIN_DATA_WIDTH_16)
855 win->ctl.flags |= MAP_16BIT; 818 win->flags |= MAP_16BIT;
856 if (req->Attributes & WIN_USE_WAIT) 819 if (req->Attributes & WIN_USE_WAIT)
857 win->ctl.flags |= MAP_USE_WAIT; 820 win->flags |= MAP_USE_WAIT;
858 win->ctl.card_start = 0; 821 win->card_start = 0;
859 if (s->ops->set_mem_map(s, &win->ctl) != 0) { 822 if (s->ops->set_mem_map(s, win) != 0) {
860 ds_dbg(s, 0, "failed to set memory mapping\n"); 823 dev_dbg(&s->dev, "failed to set memory mapping\n");
861 return -EIO; 824 return -EIO;
862 } 825 }
863 s->state |= SOCKET_WIN_REQ(w); 826 s->state |= SOCKET_WIN_REQ(w);
864 827
865 /* Return window handle */ 828 /* Return window handle */
866 if (s->features & SS_CAP_STATIC_MAP) { 829 if (s->features & SS_CAP_STATIC_MAP) {
867 req->Base = win->ctl.static_start; 830 req->Base = win->static_start;
868 } else { 831 } else {
869 req->Base = win->ctl.res->start; 832 req->Base = win->res->start;
870 } 833 }
871 *wh = win; 834 *wh = w + 1;
872 835
873 return 0; 836 return 0;
874} /* pcmcia_request_window */ 837} /* pcmcia_request_window */
@@ -879,19 +842,46 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev) {
879 pcmcia_release_io(p_dev, &p_dev->io); 842 pcmcia_release_io(p_dev, &p_dev->io);
880 pcmcia_release_irq(p_dev, &p_dev->irq); 843 pcmcia_release_irq(p_dev, &p_dev->irq);
881 if (p_dev->win) 844 if (p_dev->win)
882 pcmcia_release_window(p_dev->win); 845 pcmcia_release_window(p_dev, p_dev->win);
883} 846}
884EXPORT_SYMBOL(pcmcia_disable_device); 847EXPORT_SYMBOL(pcmcia_disable_device);
885 848
886 849
887struct pcmcia_cfg_mem { 850struct pcmcia_cfg_mem {
888 tuple_t tuple; 851 struct pcmcia_device *p_dev;
852 void *priv_data;
853 int (*conf_check) (struct pcmcia_device *p_dev,
854 cistpl_cftable_entry_t *cfg,
855 cistpl_cftable_entry_t *dflt,
856 unsigned int vcc,
857 void *priv_data);
889 cisparse_t parse; 858 cisparse_t parse;
890 u8 buf[256];
891 cistpl_cftable_entry_t dflt; 859 cistpl_cftable_entry_t dflt;
892}; 860};
893 861
894/** 862/**
863 * pcmcia_do_loop_config() - internal helper for pcmcia_loop_config()
864 *
865 * pcmcia_do_loop_config() is the internal callback for the call from
866 * pcmcia_loop_config() to pccard_loop_tuple(). Data is transferred
867 * by a struct pcmcia_cfg_mem.
868 */
869static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv)
870{
871 cistpl_cftable_entry_t *cfg = &parse->cftable_entry;
872 struct pcmcia_cfg_mem *cfg_mem = priv;
873
874 /* default values */
875 cfg_mem->p_dev->conf.ConfigIndex = cfg->index;
876 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
877 cfg_mem->dflt = *cfg;
878
879 return cfg_mem->conf_check(cfg_mem->p_dev, cfg, &cfg_mem->dflt,
880 cfg_mem->p_dev->socket->socket.Vcc,
881 cfg_mem->priv_data);
882}
883
884/**
895 * pcmcia_loop_config() - loop over configuration options 885 * pcmcia_loop_config() - loop over configuration options
896 * @p_dev: the struct pcmcia_device which we need to loop for. 886 * @p_dev: the struct pcmcia_device which we need to loop for.
897 * @conf_check: function to call for each configuration option. 887 * @conf_check: function to call for each configuration option.
@@ -913,48 +903,174 @@ int pcmcia_loop_config(struct pcmcia_device *p_dev,
913 void *priv_data) 903 void *priv_data)
914{ 904{
915 struct pcmcia_cfg_mem *cfg_mem; 905 struct pcmcia_cfg_mem *cfg_mem;
916
917 tuple_t *tuple;
918 int ret; 906 int ret;
919 unsigned int vcc;
920 907
921 cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL); 908 cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
922 if (cfg_mem == NULL) 909 if (cfg_mem == NULL)
923 return -ENOMEM; 910 return -ENOMEM;
924 911
925 /* get the current Vcc setting */ 912 cfg_mem->p_dev = p_dev;
926 vcc = p_dev->socket->socket.Vcc; 913 cfg_mem->conf_check = conf_check;
914 cfg_mem->priv_data = priv_data;
927 915
928 tuple = &cfg_mem->tuple; 916 ret = pccard_loop_tuple(p_dev->socket, p_dev->func,
929 tuple->TupleData = cfg_mem->buf; 917 CISTPL_CFTABLE_ENTRY, &cfg_mem->parse,
930 tuple->TupleDataMax = 255; 918 cfg_mem, pcmcia_do_loop_config);
931 tuple->TupleOffset = 0;
932 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
933 tuple->Attributes = 0;
934 919
935 ret = pcmcia_get_first_tuple(p_dev, tuple); 920 kfree(cfg_mem);
936 while (!ret) { 921 return ret;
937 cistpl_cftable_entry_t *cfg = &cfg_mem->parse.cftable_entry; 922}
923EXPORT_SYMBOL(pcmcia_loop_config);
924
925
926struct pcmcia_loop_mem {
927 struct pcmcia_device *p_dev;
928 void *priv_data;
929 int (*loop_tuple) (struct pcmcia_device *p_dev,
930 tuple_t *tuple,
931 void *priv_data);
932};
933
934/**
935 * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config()
936 *
937 * pcmcia_do_loop_tuple() is the internal callback for the call from
938 * pcmcia_loop_tuple() to pccard_loop_tuple(). Data is transferred
939 * by a struct pcmcia_cfg_mem.
940 */
941static int pcmcia_do_loop_tuple(tuple_t *tuple, cisparse_t *parse, void *priv)
942{
943 struct pcmcia_loop_mem *loop = priv;
944
945 return loop->loop_tuple(loop->p_dev, tuple, loop->priv_data);
946};
947
948/**
949 * pcmcia_loop_tuple() - loop over tuples in the CIS
950 * @p_dev: the struct pcmcia_device which we need to loop for.
951 * @code: which CIS code shall we look for?
952 * @priv_data: private data to be passed to the loop_tuple function.
953 * @loop_tuple: function to call for each CIS entry of type @function. IT
954 * gets passed the raw tuple and @priv_data.
955 *
956 * pcmcia_loop_tuple() loops over all CIS entries of type @function, and
957 * calls the @loop_tuple function for each entry. If the call to @loop_tuple
958 * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
959 */
960int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
961 int (*loop_tuple) (struct pcmcia_device *p_dev,
962 tuple_t *tuple,
963 void *priv_data),
964 void *priv_data)
965{
966 struct pcmcia_loop_mem loop = {
967 .p_dev = p_dev,
968 .loop_tuple = loop_tuple,
969 .priv_data = priv_data};
938 970
939 if (pcmcia_get_tuple_data(p_dev, tuple)) 971 return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL,
940 goto next_entry; 972 &loop, pcmcia_do_loop_tuple);
973};
974EXPORT_SYMBOL(pcmcia_loop_tuple);
941 975
942 if (pcmcia_parse_tuple(tuple, &cfg_mem->parse))
943 goto next_entry;
944 976
945 /* default values */ 977struct pcmcia_loop_get {
946 p_dev->conf.ConfigIndex = cfg->index; 978 size_t len;
947 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) 979 cisdata_t **buf;
948 cfg_mem->dflt = *cfg; 980};
949 981
950 ret = conf_check(p_dev, cfg, &cfg_mem->dflt, vcc, priv_data); 982/**
951 if (!ret) 983 * pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple()
952 break; 984 *
985 * pcmcia_do_get_tuple() is the internal callback for the call from
986 * pcmcia_get_tuple() to pcmcia_loop_tuple(). As we're only interested in
987 * the first tuple, return 0 unconditionally. Create a memory buffer large
988 * enough to hold the content of the tuple, and fill it with the tuple data.
989 * The caller is responsible to free the buffer.
990 */
991static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple,
992 void *priv)
993{
994 struct pcmcia_loop_get *get = priv;
995
996 *get->buf = kzalloc(tuple->TupleDataLen, GFP_KERNEL);
997 if (*get->buf) {
998 get->len = tuple->TupleDataLen;
999 memcpy(*get->buf, tuple->TupleData, tuple->TupleDataLen);
1000 } else
1001 dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n");
1002 return 0;
1003};
953 1004
954next_entry: 1005/**
955 ret = pcmcia_get_next_tuple(p_dev, tuple); 1006 * pcmcia_get_tuple() - get first tuple from CIS
1007 * @p_dev: the struct pcmcia_device which we need to loop for.
1008 * @code: which CIS code shall we look for?
1009 * @buf: pointer to store the buffer to.
1010 *
1011 * pcmcia_get_tuple() gets the content of the first CIS entry of type @code.
1012 * It returns the buffer length (or zero). The caller is responsible to free
1013 * the buffer passed in @buf.
1014 */
1015size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
1016 unsigned char **buf)
1017{
1018 struct pcmcia_loop_get get = {
1019 .len = 0,
1020 .buf = buf,
1021 };
1022
1023 *get.buf = NULL;
1024 pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get);
1025
1026 return get.len;
1027};
1028EXPORT_SYMBOL(pcmcia_get_tuple);
1029
1030
1031/**
1032 * pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
1033 *
1034 * pcmcia_do_get_mac() is the internal callback for the call from
1035 * pcmcia_get_mac_from_cis() to pcmcia_loop_tuple(). We check whether the
1036 * tuple contains a proper LAN_NODE_ID of length 6, and copy the data
1037 * to struct net_device->dev_addr[i].
1038 */
1039static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
1040 void *priv)
1041{
1042 struct net_device *dev = priv;
1043 int i;
1044
1045 if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
1046 return -EINVAL;
1047 if (tuple->TupleDataLen < ETH_ALEN + 2) {
1048 dev_warn(&p_dev->dev, "Invalid CIS tuple length for "
1049 "LAN_NODE_ID\n");
1050 return -EINVAL;
956 } 1051 }
957 1052
958 return ret; 1053 if (tuple->TupleData[1] != ETH_ALEN) {
959} 1054 dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
960EXPORT_SYMBOL(pcmcia_loop_config); 1055 return -EINVAL;
1056 }
1057 for (i = 0; i < 6; i++)
1058 dev->dev_addr[i] = tuple->TupleData[i+2];
1059 return 0;
1060};
1061
1062/**
1063 * pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE
1064 * @p_dev: the struct pcmcia_device for which we want the address.
1065 * @dev: a properly prepared struct net_device to store the info to.
1066 *
1067 * pcmcia_get_mac_from_cis() reads out the hardware MAC address from
1068 * CISTPL_FUNCE and stores it into struct net_device *dev->dev_addr which
1069 * must be set up properly by the driver (see examples!).
1070 */
1071int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
1072{
1073 return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev);
1074};
1075EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
1076
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 70a33468bcd0..e1741cd875aa 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -213,7 +213,8 @@ static irqreturn_t pd6729_interrupt(int irq, void *dev)
213 213
214 if (csc & I365_CSC_DETECT) { 214 if (csc & I365_CSC_DETECT) {
215 events |= SS_DETECT; 215 events |= SS_DETECT;
216 dprintk("Card detected in socket %i!\n", i); 216 dev_vdbg(&socket[i].socket.dev,
217 "Card detected in socket %i!\n", i);
217 } 218 }
218 219
219 if (indirect_read(&socket[i], I365_INTCTL) 220 if (indirect_read(&socket[i], I365_INTCTL)
@@ -331,11 +332,11 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
331 reg = I365_PWR_NORESET; /* default: disable resetdrv on resume */ 332 reg = I365_PWR_NORESET; /* default: disable resetdrv on resume */
332 333
333 if (state->flags & SS_PWR_AUTO) { 334 if (state->flags & SS_PWR_AUTO) {
334 dprintk("Auto power\n"); 335 dev_dbg(&sock->dev, "Auto power\n");
335 reg |= I365_PWR_AUTO; /* automatic power mngmnt */ 336 reg |= I365_PWR_AUTO; /* automatic power mngmnt */
336 } 337 }
337 if (state->flags & SS_OUTPUT_ENA) { 338 if (state->flags & SS_OUTPUT_ENA) {
338 dprintk("Power Enabled\n"); 339 dev_dbg(&sock->dev, "Power Enabled\n");
339 reg |= I365_PWR_OUT; /* enable power */ 340 reg |= I365_PWR_OUT; /* enable power */
340 } 341 }
341 342
@@ -343,40 +344,44 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
343 case 0: 344 case 0:
344 break; 345 break;
345 case 33: 346 case 33:
346 dprintk("setting voltage to Vcc to 3.3V on socket %i\n", 347 dev_dbg(&sock->dev,
348 "setting voltage to Vcc to 3.3V on socket %i\n",
347 socket->number); 349 socket->number);
348 reg |= I365_VCC_5V; 350 reg |= I365_VCC_5V;
349 indirect_setbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); 351 indirect_setbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V);
350 break; 352 break;
351 case 50: 353 case 50:
352 dprintk("setting voltage to Vcc to 5V on socket %i\n", 354 dev_dbg(&sock->dev,
355 "setting voltage to Vcc to 5V on socket %i\n",
353 socket->number); 356 socket->number);
354 reg |= I365_VCC_5V; 357 reg |= I365_VCC_5V;
355 indirect_resetbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); 358 indirect_resetbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V);
356 break; 359 break;
357 default: 360 default:
358 dprintk("pd6729: pd6729_set_socket called with " 361 dev_dbg(&sock->dev,
359 "invalid VCC power value: %i\n", 362 "pd6729_set_socket called with invalid VCC power "
360 state->Vcc); 363 "value: %i\n", state->Vcc);
361 return -EINVAL; 364 return -EINVAL;
362 } 365 }
363 366
364 switch (state->Vpp) { 367 switch (state->Vpp) {
365 case 0: 368 case 0:
366 dprintk("not setting Vpp on socket %i\n", socket->number); 369 dev_dbg(&sock->dev, "not setting Vpp on socket %i\n",
370 socket->number);
367 break; 371 break;
368 case 33: 372 case 33:
369 case 50: 373 case 50:
370 dprintk("setting Vpp to Vcc for socket %i\n", socket->number); 374 dev_dbg(&sock->dev, "setting Vpp to Vcc for socket %i\n",
375 socket->number);
371 reg |= I365_VPP1_5V; 376 reg |= I365_VPP1_5V;
372 break; 377 break;
373 case 120: 378 case 120:
374 dprintk("setting Vpp to 12.0\n"); 379 dev_dbg(&sock->dev, "setting Vpp to 12.0\n");
375 reg |= I365_VPP1_12V; 380 reg |= I365_VPP1_12V;
376 break; 381 break;
377 default: 382 default:
378 dprintk("pd6729: pd6729_set_socket called with invalid VPP power value: %i\n", 383 dev_dbg(&sock->dev, "pd6729: pd6729_set_socket called with "
379 state->Vpp); 384 "invalid VPP power value: %i\n", state->Vpp);
380 return -EINVAL; 385 return -EINVAL;
381 } 386 }
382 387
@@ -438,7 +443,7 @@ static int pd6729_set_io_map(struct pcmcia_socket *sock,
438 443
439 /* Check error conditions */ 444 /* Check error conditions */
440 if (map > 1) { 445 if (map > 1) {
441 dprintk("pd6729_set_io_map with invalid map"); 446 dev_dbg(&sock->dev, "pd6729_set_io_map with invalid map\n");
442 return -EINVAL; 447 return -EINVAL;
443 } 448 }
444 449
@@ -446,7 +451,7 @@ static int pd6729_set_io_map(struct pcmcia_socket *sock,
446 if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_IO(map)) 451 if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_IO(map))
447 indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_IO(map)); 452 indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_IO(map));
448 453
449 /* dprintk("set_io_map: Setting range to %x - %x\n", 454 /* dev_dbg(&sock->dev, "set_io_map: Setting range to %x - %x\n",
450 io->start, io->stop);*/ 455 io->start, io->stop);*/
451 456
452 /* write the new values */ 457 /* write the new values */
@@ -478,12 +483,12 @@ static int pd6729_set_mem_map(struct pcmcia_socket *sock,
478 483
479 map = mem->map; 484 map = mem->map;
480 if (map > 4) { 485 if (map > 4) {
481 printk("pd6729_set_mem_map: invalid map"); 486 dev_warn(&sock->dev, "invalid map requested\n");
482 return -EINVAL; 487 return -EINVAL;
483 } 488 }
484 489
485 if ((mem->res->start > mem->res->end) || (mem->speed > 1000)) { 490 if ((mem->res->start > mem->res->end) || (mem->speed > 1000)) {
486 printk("pd6729_set_mem_map: invalid address / speed"); 491 dev_warn(&sock->dev, "invalid invalid address / speed\n");
487 return -EINVAL; 492 return -EINVAL;
488 } 493 }
489 494
@@ -529,12 +534,12 @@ static int pd6729_set_mem_map(struct pcmcia_socket *sock,
529 if (mem->flags & MAP_WRPROT) 534 if (mem->flags & MAP_WRPROT)
530 i |= I365_MEM_WRPROT; 535 i |= I365_MEM_WRPROT;
531 if (mem->flags & MAP_ATTRIB) { 536 if (mem->flags & MAP_ATTRIB) {
532 /* dprintk("requesting attribute memory for socket %i\n", 537 /* dev_dbg(&sock->dev, "requesting attribute memory for "
533 socket->number);*/ 538 "socket %i\n", socket->number);*/
534 i |= I365_MEM_REG; 539 i |= I365_MEM_REG;
535 } else { 540 } else {
536 /* dprintk("requesting normal memory for socket %i\n", 541 /* dev_dbg(&sock->dev, "requesting normal memory for "
537 socket->number);*/ 542 "socket %i\n", socket->number);*/
538 } 543 }
539 indirect_write16(socket, base + I365_W_OFF, i); 544 indirect_write16(socket, base + I365_W_OFF, i);
540 545
@@ -577,7 +582,7 @@ static struct pccard_operations pd6729_operations = {
577 582
578static irqreturn_t pd6729_test(int irq, void *dev) 583static irqreturn_t pd6729_test(int irq, void *dev)
579{ 584{
580 dprintk("-> hit on irq %d\n", irq); 585 pr_devel("-> hit on irq %d\n", irq);
581 return IRQ_HANDLED; 586 return IRQ_HANDLED;
582} 587}
583 588
@@ -642,13 +647,13 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
642 goto err_out_free_mem; 647 goto err_out_free_mem;
643 648
644 if (!pci_resource_start(dev, 0)) { 649 if (!pci_resource_start(dev, 0)) {
645 printk(KERN_INFO "pd6729: refusing to load the driver " 650 dev_warn(&dev->dev, "refusing to load the driver as the "
646 "as the io_base is 0.\n"); 651 "io_base is NULL.\n");
647 goto err_out_free_mem; 652 goto err_out_free_mem;
648 } 653 }
649 654
650 printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge " 655 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
651 "at 0x%llx on irq %d\n", 656 "on irq %d\n",
652 (unsigned long long)pci_resource_start(dev, 0), dev->irq); 657 (unsigned long long)pci_resource_start(dev, 0), dev->irq);
653 /* 658 /*
654 * Since we have no memory BARs some firmware may not 659 * Since we have no memory BARs some firmware may not
@@ -656,14 +661,14 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
656 */ 661 */
657 pci_read_config_byte(dev, PCI_COMMAND, &configbyte); 662 pci_read_config_byte(dev, PCI_COMMAND, &configbyte);
658 if (!(configbyte & PCI_COMMAND_MEMORY)) { 663 if (!(configbyte & PCI_COMMAND_MEMORY)) {
659 printk(KERN_DEBUG "pd6729: Enabling PCI_COMMAND_MEMORY.\n"); 664 dev_dbg(&dev->dev, "pd6729: Enabling PCI_COMMAND_MEMORY.\n");
660 configbyte |= PCI_COMMAND_MEMORY; 665 configbyte |= PCI_COMMAND_MEMORY;
661 pci_write_config_byte(dev, PCI_COMMAND, configbyte); 666 pci_write_config_byte(dev, PCI_COMMAND, configbyte);
662 } 667 }
663 668
664 ret = pci_request_regions(dev, "pd6729"); 669 ret = pci_request_regions(dev, "pd6729");
665 if (ret) { 670 if (ret) {
666 printk(KERN_INFO "pd6729: pci request region failed.\n"); 671 dev_warn(&dev->dev, "pci request region failed.\n");
667 goto err_out_disable; 672 goto err_out_disable;
668 } 673 }
669 674
@@ -672,7 +677,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
672 677
673 mask = pd6729_isa_scan(); 678 mask = pd6729_isa_scan();
674 if (irq_mode == 0 && mask == 0) { 679 if (irq_mode == 0 && mask == 0) {
675 printk(KERN_INFO "pd6729: no ISA interrupt is available.\n"); 680 dev_warn(&dev->dev, "no ISA interrupt is available.\n");
676 goto err_out_free_res; 681 goto err_out_free_res;
677 } 682 }
678 683
@@ -697,8 +702,8 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
697 /* Register the interrupt handler */ 702 /* Register the interrupt handler */
698 if ((ret = request_irq(dev->irq, pd6729_interrupt, IRQF_SHARED, 703 if ((ret = request_irq(dev->irq, pd6729_interrupt, IRQF_SHARED,
699 "pd6729", socket))) { 704 "pd6729", socket))) {
700 printk(KERN_ERR "pd6729: Failed to register irq %d, " 705 dev_err(&dev->dev, "Failed to register irq %d\n",
701 "aborting\n", dev->irq); 706 dev->irq);
702 goto err_out_free_res; 707 goto err_out_free_res;
703 } 708 }
704 } else { 709 } else {
@@ -713,8 +718,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
713 for (i = 0; i < MAX_SOCKETS; i++) { 718 for (i = 0; i < MAX_SOCKETS; i++) {
714 ret = pcmcia_register_socket(&socket[i].socket); 719 ret = pcmcia_register_socket(&socket[i].socket);
715 if (ret) { 720 if (ret) {
716 printk(KERN_INFO "pd6729: pcmcia_register_socket " 721 dev_warn(&dev->dev, "pcmcia_register_socket failed.\n");
717 "failed.\n");
718 for (j = 0; j < i ; j++) 722 for (j = 0; j < i ; j++)
719 pcmcia_unregister_socket(&socket[j].socket); 723 pcmcia_unregister_socket(&socket[j].socket);
720 goto err_out_free_res2; 724 goto err_out_free_res2;
diff --git a/drivers/pcmcia/pd6729.h b/drivers/pcmcia/pd6729.h
index f392e458cdfd..41418d394c55 100644
--- a/drivers/pcmcia/pd6729.h
+++ b/drivers/pcmcia/pd6729.h
@@ -1,13 +1,6 @@
1#ifndef _INCLUDE_GUARD_PD6729_H_ 1#ifndef _INCLUDE_GUARD_PD6729_H_
2#define _INCLUDE_GUARD_PD6729_H_ 2#define _INCLUDE_GUARD_PD6729_H_
3 3
4/* Debuging defines */
5#ifdef NOTRACE
6#define dprintk(fmt, args...) printk(fmt , ## args)
7#else
8#define dprintk(fmt, args...) do {} while (0)
9#endif
10
11/* Flags for I365_GENCTL */ 4/* Flags for I365_GENCTL */
12#define I365_DF_VS1 0x40 /* DF-step Voltage Sense */ 5#define I365_DF_VS1 0x40 /* DF-step Voltage Sense */
13#define I365_DF_VS2 0x80 6#define I365_DF_VS2 0x80
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 0e35acb1366b..84dde7768ad5 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -228,9 +228,43 @@ static const char *skt_names[] = {
228#define SKT_DEV_INFO_SIZE(n) \ 228#define SKT_DEV_INFO_SIZE(n) \
229 (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) 229 (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket))
230 230
231int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt)
232{
233 skt->res_skt.start = _PCMCIA(skt->nr);
234 skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1;
235 skt->res_skt.name = skt_names[skt->nr];
236 skt->res_skt.flags = IORESOURCE_MEM;
237
238 skt->res_io.start = _PCMCIAIO(skt->nr);
239 skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1;
240 skt->res_io.name = "io";
241 skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
242
243 skt->res_mem.start = _PCMCIAMem(skt->nr);
244 skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1;
245 skt->res_mem.name = "memory";
246 skt->res_mem.flags = IORESOURCE_MEM;
247
248 skt->res_attr.start = _PCMCIAAttr(skt->nr);
249 skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1;
250 skt->res_attr.name = "attribute";
251 skt->res_attr.flags = IORESOURCE_MEM;
252
253 return soc_pcmcia_add_one(skt);
254}
255
256void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
257{
258 /* Provide our PXA2xx specific timing routines. */
259 ops->set_timing = pxa2xx_pcmcia_set_timing;
260#ifdef CONFIG_CPU_FREQ
261 ops->frequency_change = pxa2xx_pcmcia_frequency_change;
262#endif
263}
264
231int __pxa2xx_drv_pcmcia_probe(struct device *dev) 265int __pxa2xx_drv_pcmcia_probe(struct device *dev)
232{ 266{
233 int i, ret; 267 int i, ret = 0;
234 struct pcmcia_low_level *ops; 268 struct pcmcia_low_level *ops;
235 struct skt_dev_info *sinfo; 269 struct skt_dev_info *sinfo;
236 struct soc_pcmcia_socket *skt; 270 struct soc_pcmcia_socket *skt;
@@ -240,6 +274,8 @@ int __pxa2xx_drv_pcmcia_probe(struct device *dev)
240 274
241 ops = (struct pcmcia_low_level *)dev->platform_data; 275 ops = (struct pcmcia_low_level *)dev->platform_data;
242 276
277 pxa2xx_drv_pcmcia_ops(ops);
278
243 sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL); 279 sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL);
244 if (!sinfo) 280 if (!sinfo)
245 return -ENOMEM; 281 return -ENOMEM;
@@ -250,40 +286,25 @@ int __pxa2xx_drv_pcmcia_probe(struct device *dev)
250 for (i = 0; i < ops->nr; i++) { 286 for (i = 0; i < ops->nr; i++) {
251 skt = &sinfo->skt[i]; 287 skt = &sinfo->skt[i];
252 288
253 skt->nr = ops->first + i; 289 skt->nr = ops->first + i;
254 skt->irq = NO_IRQ; 290 skt->ops = ops;
255 291 skt->socket.owner = ops->owner;
256 skt->res_skt.start = _PCMCIA(skt->nr); 292 skt->socket.dev.parent = dev;
257 skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1; 293 skt->socket.pci_irq = NO_IRQ;
258 skt->res_skt.name = skt_names[skt->nr];
259 skt->res_skt.flags = IORESOURCE_MEM;
260
261 skt->res_io.start = _PCMCIAIO(skt->nr);
262 skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1;
263 skt->res_io.name = "io";
264 skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
265 294
266 skt->res_mem.start = _PCMCIAMem(skt->nr); 295 ret = pxa2xx_drv_pcmcia_add_one(skt);
267 skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1; 296 if (ret)
268 skt->res_mem.name = "memory"; 297 break;
269 skt->res_mem.flags = IORESOURCE_MEM;
270
271 skt->res_attr.start = _PCMCIAAttr(skt->nr);
272 skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1;
273 skt->res_attr.name = "attribute";
274 skt->res_attr.flags = IORESOURCE_MEM;
275 } 298 }
276 299
277 /* Provide our PXA2xx specific timing routines. */ 300 if (ret) {
278 ops->set_timing = pxa2xx_pcmcia_set_timing; 301 while (--i >= 0)
279#ifdef CONFIG_CPU_FREQ 302 soc_pcmcia_remove_one(&sinfo->skt[i]);
280 ops->frequency_change = pxa2xx_pcmcia_frequency_change; 303 kfree(sinfo);
281#endif 304 } else {
282
283 ret = soc_common_drv_pcmcia_probe(dev, ops, sinfo);
284
285 if (!ret)
286 pxa2xx_configure_sockets(dev); 305 pxa2xx_configure_sockets(dev);
306 dev_set_drvdata(dev, sinfo);
307 }
287 308
288 return ret; 309 return ret;
289} 310}
@@ -297,7 +318,16 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
297 318
298static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev) 319static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
299{ 320{
300 return soc_common_drv_pcmcia_remove(&dev->dev); 321 struct skt_dev_info *sinfo = platform_get_drvdata(dev);
322 int i;
323
324 platform_set_drvdata(dev, NULL);
325
326 for (i = 0; i < sinfo->nskt; i++)
327 soc_pcmcia_remove_one(&sinfo->skt[i]);
328
329 kfree(sinfo);
330 return 0;
301} 331}
302 332
303static int pxa2xx_drv_pcmcia_suspend(struct device *dev) 333static int pxa2xx_drv_pcmcia_suspend(struct device *dev)
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index 235d681652c3..cb5efaec886f 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,3 +1,6 @@
1/* temporary measure */ 1/* temporary measure */
2extern int __pxa2xx_drv_pcmcia_probe(struct device *); 2extern int __pxa2xx_drv_pcmcia_probe(struct device *);
3 3
4int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
5void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
6
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
index 5143a760153b..05913d0bbdbe 100644
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ b/drivers/pcmcia/pxa2xx_cm_x255.c
@@ -44,7 +44,7 @@ static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
44 return ret; 44 return ret;
45 gpio_direction_output(GPIO_PCMCIA_RESET, 0); 45 gpio_direction_output(GPIO_PCMCIA_RESET, 0);
46 46
47 skt->irq = skt->nr == 0 ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT; 47 skt->socket.pci_irq = skt->nr == 0 ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT;
48 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 48 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
49 if (!ret) 49 if (!ret)
50 gpio_free(GPIO_PCMCIA_RESET); 50 gpio_free(GPIO_PCMCIA_RESET);
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index a7b943d01e34..5662646b84da 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -38,7 +38,7 @@ static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
38 return ret; 38 return ret;
39 gpio_direction_output(GPIO_PCMCIA_RESET, 0); 39 gpio_direction_output(GPIO_PCMCIA_RESET, 0);
40 40
41 skt->irq = PCMCIA_S0_RDYINT; 41 skt->socket.pci_irq = PCMCIA_S0_RDYINT;
42 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 42 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
43 if (!ret) 43 if (!ret)
44 gpio_free(GPIO_PCMCIA_RESET); 44 gpio_free(GPIO_PCMCIA_RESET);
diff --git a/drivers/pcmcia/pxa2xx_e740.c b/drivers/pcmcia/pxa2xx_e740.c
index d09c0dc4a31a..8bfbd4dca131 100644
--- a/drivers/pcmcia/pxa2xx_e740.c
+++ b/drivers/pcmcia/pxa2xx_e740.c
@@ -38,7 +38,7 @@ static struct pcmcia_irqs cd_irqs[] = {
38 38
39static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 39static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
40{ 40{
41 skt->irq = skt->nr == 0 ? IRQ_GPIO(GPIO_E740_PCMCIA_RDY0) : 41 skt->socket.pci_irq = skt->nr == 0 ? IRQ_GPIO(GPIO_E740_PCMCIA_RDY0) :
42 IRQ_GPIO(GPIO_E740_PCMCIA_RDY1); 42 IRQ_GPIO(GPIO_E740_PCMCIA_RDY1);
43 43
44 return soc_pcmcia_request_irqs(skt, &cd_irqs[skt->nr], 1); 44 return soc_pcmcia_request_irqs(skt, &cd_irqs[skt->nr], 1);
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index 6cbb1b1f7cfd..b9f8c8fb42bd 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -32,6 +32,7 @@ static int
32lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, 32lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
33 const socket_state_t *state) 33 const socket_state_t *state)
34{ 34{
35 struct sa1111_pcmcia_socket *s = to_skt(skt);
35 unsigned int pa_dwr_mask, pa_dwr_set, misc_mask, misc_set; 36 unsigned int pa_dwr_mask, pa_dwr_set, misc_mask, misc_set;
36 int ret = 0; 37 int ret = 0;
37 38
@@ -149,7 +150,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
149 150
150 if (ret == 0) { 151 if (ret == 0) {
151 lubbock_set_misc_wr(misc_mask, misc_set); 152 lubbock_set_misc_wr(misc_mask, misc_set);
152 sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set); 153 sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set);
153 } 154 }
154 155
155#if 1 156#if 1
@@ -175,7 +176,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
175 * Switch to 5V, Configure socket with 5V voltage 176 * Switch to 5V, Configure socket with 5V voltage
176 */ 177 */
177 lubbock_set_misc_wr(misc_mask, 0); 178 lubbock_set_misc_wr(misc_mask, 0);
178 sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, 0); 179 sa1111_set_io(s->dev, pa_dwr_mask, 0);
179 180
180 /* 181 /*
181 * It takes about 100ms to turn off Vcc. 182 * It takes about 100ms to turn off Vcc.
@@ -200,12 +201,8 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
200 201
201static struct pcmcia_low_level lubbock_pcmcia_ops = { 202static struct pcmcia_low_level lubbock_pcmcia_ops = {
202 .owner = THIS_MODULE, 203 .owner = THIS_MODULE,
203 .hw_init = sa1111_pcmcia_hw_init,
204 .hw_shutdown = sa1111_pcmcia_hw_shutdown,
205 .socket_state = sa1111_pcmcia_socket_state,
206 .configure_socket = lubbock_pcmcia_configure_socket, 204 .configure_socket = lubbock_pcmcia_configure_socket,
207 .socket_init = sa1111_pcmcia_socket_init, 205 .socket_init = sa1111_pcmcia_socket_init,
208 .socket_suspend = sa1111_pcmcia_socket_suspend,
209 .first = 0, 206 .first = 0,
210 .nr = 2, 207 .nr = 2,
211}; 208};
@@ -228,8 +225,9 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
228 /* Set CF Socket 1 power to standby mode. */ 225 /* Set CF Socket 1 power to standby mode. */
229 lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); 226 lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
230 227
231 sadev->dev.platform_data = &lubbock_pcmcia_ops; 228 pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
232 ret = __pxa2xx_drv_pcmcia_probe(&sadev->dev); 229 ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
230 pxa2xx_drv_pcmcia_add_one);
233 } 231 }
234 232
235 return ret; 233 return ret;
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 1138551ba8f6..92016fe932b4 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -44,7 +44,7 @@ static int mst_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
44 * before we enable them as outputs. 44 * before we enable them as outputs.
45 */ 45 */
46 46
47 skt->irq = (skt->nr == 0) ? MAINSTONE_S0_IRQ : MAINSTONE_S1_IRQ; 47 skt->socket.pci_irq = (skt->nr == 0) ? MAINSTONE_S0_IRQ : MAINSTONE_S1_IRQ;
48 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 48 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
49} 49}
50 50
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c
index 5ba9b3664a00..6fb6f7f0672e 100644
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ b/drivers/pcmcia/pxa2xx_palmld.c
@@ -45,7 +45,7 @@ static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
45 if (ret) 45 if (ret)
46 goto err4; 46 goto err4;
47 47
48 skt->irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY); 48 skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY);
49 return 0; 49 return 0;
50 50
51err4: 51err4:
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
index e07b5c51ec5b..b07b247a399f 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -53,7 +53,7 @@ static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
53 if (ret) 53 if (ret)
54 goto err5; 54 goto err5;
55 55
56 skt->irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY); 56 skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY);
57 return 0; 57 return 0;
58 58
59err5: 59err5:
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index bc43f78f6f0b..0ea3b29440e6 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -66,7 +66,7 @@ static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
66 } 66 }
67 } 67 }
68 68
69 skt->irq = SCOOP_DEV[skt->nr].irq; 69 skt->socket.pci_irq = SCOOP_DEV[skt->nr].irq;
70 70
71 return 0; 71 return 0;
72} 72}
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c
index e0e5cb339b4a..b7e596620db1 100644
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ b/drivers/pcmcia/pxa2xx_trizeps4.c
@@ -53,7 +53,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
53 gpio_free(GPIO_PRDY); 53 gpio_free(GPIO_PRDY);
54 return -EINVAL; 54 return -EINVAL;
55 } 55 }
56 skt->irq = IRQ_GPIO(GPIO_PRDY); 56 skt->socket.pci_irq = IRQ_GPIO(GPIO_PRDY);
57 break; 57 break;
58 58
59#ifndef CONFIG_MACH_TRIZEPS_CONXS 59#ifndef CONFIG_MACH_TRIZEPS_CONXS
@@ -63,7 +63,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
63 break; 63 break;
64 } 64 }
65 /* release the reset of this card */ 65 /* release the reset of this card */
66 pr_debug("%s: sock %d irq %d\n", __func__, skt->nr, skt->irq); 66 pr_debug("%s: sock %d irq %d\n", __func__, skt->nr, skt->socket.pci_irq);
67 67
68 /* supplementory irqs for the socket */ 68 /* supplementory irqs for the socket */
69 for (i = 0; i < ARRAY_SIZE(irqs); i++) { 69 for (i = 0; i < ARRAY_SIZE(irqs); i++) {
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index 17871360fe99..27be2e154df2 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -40,7 +40,7 @@ static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
40{ 40{
41 unsigned long flags; 41 unsigned long flags;
42 42
43 skt->irq = gpio_to_irq(VIPER_CF_RDY_GPIO); 43 skt->socket.pci_irq = gpio_to_irq(VIPER_CF_RDY_GPIO);
44 44
45 if (gpio_request(VIPER_CF_CD_GPIO, "CF detect")) 45 if (gpio_request(VIPER_CF_CD_GPIO, "CF detect"))
46 goto err_request_cd; 46 goto err_request_cd;
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index e592e0e0d7ed..de0e770ce6a3 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -18,6 +18,7 @@
18#include <pcmcia/cs_types.h> 18#include <pcmcia/cs_types.h>
19#include <pcmcia/ss.h> 19#include <pcmcia/ss.h>
20#include <pcmcia/cs.h> 20#include <pcmcia/cs.h>
21#include <pcmcia/cistpl.h>
21#include "cs_internal.h" 22#include "cs_internal.h"
22 23
23 24
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index ac8aa09ba0da..fd013a1ef47a 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -27,7 +27,7 @@ static struct pcmcia_irqs irqs[] = {
27 27
28static int assabet_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 28static int assabet_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
29{ 29{
30 skt->irq = ASSABET_IRQ_GPIO_CF_IRQ; 30 skt->socket.pci_irq = ASSABET_IRQ_GPIO_CF_IRQ;
31 31
32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
33} 33}
diff --git a/drivers/pcmcia/sa1100_badge4.c b/drivers/pcmcia/sa1100_badge4.c
index 1ca9737ea79e..1ce53f493bef 100644
--- a/drivers/pcmcia/sa1100_badge4.c
+++ b/drivers/pcmcia/sa1100_badge4.c
@@ -127,13 +127,10 @@ badge4_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state
127 127
128static struct pcmcia_low_level badge4_pcmcia_ops = { 128static struct pcmcia_low_level badge4_pcmcia_ops = {
129 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
130 .hw_init = sa1111_pcmcia_hw_init,
131 .hw_shutdown = sa1111_pcmcia_hw_shutdown,
132 .socket_state = sa1111_pcmcia_socket_state,
133 .configure_socket = badge4_pcmcia_configure_socket, 130 .configure_socket = badge4_pcmcia_configure_socket,
134
135 .socket_init = sa1111_pcmcia_socket_init, 131 .socket_init = sa1111_pcmcia_socket_init,
136 .socket_suspend = sa1111_pcmcia_socket_suspend, 132 .first = 0,
133 .nr = 2,
137}; 134};
138 135
139int pcmcia_badge4_init(struct device *dev) 136int pcmcia_badge4_init(struct device *dev)
@@ -146,7 +143,9 @@ int pcmcia_badge4_init(struct device *dev)
146 __func__, 143 __func__,
147 badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc); 144 badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
148 145
149 ret = sa11xx_drv_pcmcia_probe(dev, &badge4_pcmcia_ops, 0, 2); 146 sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
147 ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
148 sa11xx_drv_pcmcia_add_one);
150 } 149 }
151 150
152 return ret; 151 return ret;
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index 63e6bc431a0d..9bf088b17275 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -27,7 +27,7 @@ static struct pcmcia_irqs irqs[] = {
27 27
28static int cerf_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 28static int cerf_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
29{ 29{
30 skt->irq = CERF_IRQ_GPIO_CF_IRQ; 30 skt->socket.pci_irq = CERF_IRQ_GPIO_CF_IRQ;
31 31
32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
33} 33}
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 2d0e99751530..11cc3ba1260a 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -83,7 +83,16 @@ static int sa11x0_drv_pcmcia_probe(struct platform_device *dev)
83 83
84static int sa11x0_drv_pcmcia_remove(struct platform_device *dev) 84static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
85{ 85{
86 return soc_common_drv_pcmcia_remove(&dev->dev); 86 struct skt_dev_info *sinfo = platform_get_drvdata(dev);
87 int i;
88
89 platform_set_drvdata(dev, NULL);
90
91 for (i = 0; i < sinfo->nskt; i++)
92 soc_pcmcia_remove_one(&sinfo->skt[i]);
93
94 kfree(sinfo);
95 return 0;
87} 96}
88 97
89static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev, 98static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev,
diff --git a/drivers/pcmcia/sa1100_h3600.c b/drivers/pcmcia/sa1100_h3600.c
index 0cc3748f3758..3a121ac697d6 100644
--- a/drivers/pcmcia/sa1100_h3600.c
+++ b/drivers/pcmcia/sa1100_h3600.c
@@ -25,8 +25,8 @@ static struct pcmcia_irqs irqs[] = {
25 25
26static int h3600_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 26static int h3600_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
27{ 27{
28 skt->irq = skt->nr ? IRQ_GPIO_H3600_PCMCIA_IRQ1 28 skt->socket.pci_irq = skt->nr ? IRQ_GPIO_H3600_PCMCIA_IRQ1
29 : IRQ_GPIO_H3600_PCMCIA_IRQ0; 29 : IRQ_GPIO_H3600_PCMCIA_IRQ0;
30 30
31 31
32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
diff --git a/drivers/pcmcia/sa1100_jornada720.c b/drivers/pcmcia/sa1100_jornada720.c
index 7eedb42f800c..6bcabee6bde4 100644
--- a/drivers/pcmcia/sa1100_jornada720.c
+++ b/drivers/pcmcia/sa1100_jornada720.c
@@ -22,25 +22,10 @@
22#define SOCKET1_POWER (GPIO_GPIO1 | GPIO_GPIO3) 22#define SOCKET1_POWER (GPIO_GPIO1 | GPIO_GPIO3)
23#define SOCKET1_3V GPIO_GPIO3 23#define SOCKET1_3V GPIO_GPIO3
24 24
25static int jornada720_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
26{
27 unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
28
29 /*
30 * What is all this crap for?
31 */
32 GRER |= 0x00000002;
33 /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
34 sa1111_set_io_dir(SA1111_DEV(skt->dev), pin, 0, 0);
35 sa1111_set_io(SA1111_DEV(skt->dev), pin, 0);
36 sa1111_set_sleep_io(SA1111_DEV(skt->dev), pin, 0);
37
38 return sa1111_pcmcia_hw_init(skt);
39}
40
41static int 25static int
42jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) 26jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
43{ 27{
28 struct sa1111_pcmcia_socket *s = to_skt(skt);
44 unsigned int pa_dwr_mask, pa_dwr_set; 29 unsigned int pa_dwr_mask, pa_dwr_set;
45 int ret; 30 int ret;
46 31
@@ -97,7 +82,7 @@ jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
97 unsigned long flags; 82 unsigned long flags;
98 83
99 local_irq_save(flags); 84 local_irq_save(flags);
100 sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set); 85 sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set);
101 local_irq_restore(flags); 86 local_irq_restore(flags);
102 } 87 }
103 88
@@ -106,21 +91,30 @@ jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
106 91
107static struct pcmcia_low_level jornada720_pcmcia_ops = { 92static struct pcmcia_low_level jornada720_pcmcia_ops = {
108 .owner = THIS_MODULE, 93 .owner = THIS_MODULE,
109 .hw_init = jornada720_pcmcia_hw_init,
110 .hw_shutdown = sa1111_pcmcia_hw_shutdown,
111 .socket_state = sa1111_pcmcia_socket_state,
112 .configure_socket = jornada720_pcmcia_configure_socket, 94 .configure_socket = jornada720_pcmcia_configure_socket,
113
114 .socket_init = sa1111_pcmcia_socket_init, 95 .socket_init = sa1111_pcmcia_socket_init,
115 .socket_suspend = sa1111_pcmcia_socket_suspend, 96 .first = 0,
97 .nr = 2,
116}; 98};
117 99
118int __devinit pcmcia_jornada720_init(struct device *dev) 100int __devinit pcmcia_jornada720_init(struct device *dev)
119{ 101{
120 int ret = -ENODEV; 102 int ret = -ENODEV;
121 103
122 if (machine_is_jornada720()) 104 if (machine_is_jornada720()) {
123 ret = sa11xx_drv_pcmcia_probe(dev, &jornada720_pcmcia_ops, 0, 2); 105 unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
106
107 GRER |= 0x00000002;
108
109 /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
110 sa1111_set_io_dir(dev, pin, 0, 0);
111 sa1111_set_io(dev, pin, 0);
112 sa1111_set_sleep_io(dev, pin, 0);
113
114 sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
115 ret = sa1111_pcmcia_add(dev, &jornada720_pcmcia_ops,
116 sa11xx_drv_pcmcia_add_one);
117 }
124 118
125 return ret; 119 return ret;
126} 120}
diff --git a/drivers/pcmcia/sa1100_neponset.c b/drivers/pcmcia/sa1100_neponset.c
index 0c76d337815b..c95639b5f2a0 100644
--- a/drivers/pcmcia/sa1100_neponset.c
+++ b/drivers/pcmcia/sa1100_neponset.c
@@ -43,6 +43,7 @@
43static int 43static int
44neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) 44neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
45{ 45{
46 struct sa1111_pcmcia_socket *s = to_skt(skt);
46 unsigned int ncr_mask, ncr_set, pa_dwr_mask, pa_dwr_set; 47 unsigned int ncr_mask, ncr_set, pa_dwr_mask, pa_dwr_set;
47 int ret; 48 int ret;
48 49
@@ -99,7 +100,7 @@ neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_sta
99 NCR_0 = (NCR_0 & ~ncr_mask) | ncr_set; 100 NCR_0 = (NCR_0 & ~ncr_mask) | ncr_set;
100 101
101 local_irq_restore(flags); 102 local_irq_restore(flags);
102 sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set); 103 sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set);
103 } 104 }
104 105
105 return 0; 106 return 0;
@@ -115,12 +116,10 @@ static void neponset_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
115 116
116static struct pcmcia_low_level neponset_pcmcia_ops = { 117static struct pcmcia_low_level neponset_pcmcia_ops = {
117 .owner = THIS_MODULE, 118 .owner = THIS_MODULE,
118 .hw_init = sa1111_pcmcia_hw_init,
119 .hw_shutdown = sa1111_pcmcia_hw_shutdown,
120 .socket_state = sa1111_pcmcia_socket_state,
121 .configure_socket = neponset_pcmcia_configure_socket, 119 .configure_socket = neponset_pcmcia_configure_socket,
122 .socket_init = neponset_pcmcia_socket_init, 120 .socket_init = neponset_pcmcia_socket_init,
123 .socket_suspend = sa1111_pcmcia_socket_suspend, 121 .first = 0,
122 .nr = 2,
124}; 123};
125 124
126int pcmcia_neponset_init(struct sa1111_dev *sadev) 125int pcmcia_neponset_init(struct sa1111_dev *sadev)
@@ -135,7 +134,9 @@ int pcmcia_neponset_init(struct sa1111_dev *sadev)
135 sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); 134 sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
136 sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); 135 sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
137 sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); 136 sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
138 ret = sa11xx_drv_pcmcia_probe(&sadev->dev, &neponset_pcmcia_ops, 0, 2); 137 sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
138 ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
139 sa11xx_drv_pcmcia_add_one);
139 } 140 }
140 141
141 return ret; 142 return ret;
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c
index 46d8c1977c2a..c4d51867a050 100644
--- a/drivers/pcmcia/sa1100_shannon.c
+++ b/drivers/pcmcia/sa1100_shannon.c
@@ -28,7 +28,7 @@ static int shannon_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
28 GAFR &= ~(SHANNON_GPIO_EJECT_0 | SHANNON_GPIO_EJECT_1 | 28 GAFR &= ~(SHANNON_GPIO_EJECT_0 | SHANNON_GPIO_EJECT_1 |
29 SHANNON_GPIO_RDY_0 | SHANNON_GPIO_RDY_1); 29 SHANNON_GPIO_RDY_0 | SHANNON_GPIO_RDY_1);
30 30
31 skt->irq = skt->nr ? SHANNON_IRQ_GPIO_RDY_1 : SHANNON_IRQ_GPIO_RDY_0; 31 skt->socket.pci_irq = skt->nr ? SHANNON_IRQ_GPIO_RDY_1 : SHANNON_IRQ_GPIO_RDY_0;
32 32
33 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 33 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
34} 34}
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index 33a08ae09fdf..05bd504e6f18 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -28,7 +28,7 @@ static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
28 28
29 clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); 29 clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
30 30
31 skt->irq = IRQ_GPIO_CF_IRQ; 31 skt->socket.pci_irq = IRQ_GPIO_CF_IRQ;
32 32
33 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 33 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
34} 34}
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 4be4e172ffa1..de6bc333d299 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -28,23 +28,20 @@ static struct pcmcia_irqs irqs[] = {
28 { 1, IRQ_S1_BVD1_STSCHG, "SA1111 CF BVD1" }, 28 { 1, IRQ_S1_BVD1_STSCHG, "SA1111 CF BVD1" },
29}; 29};
30 30
31int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 31static int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
32{ 32{
33 if (skt->irq == NO_IRQ)
34 skt->irq = skt->nr ? IRQ_S1_READY_NINT : IRQ_S0_READY_NINT;
35
36 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 33 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
37} 34}
38 35
39void sa1111_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 36static void sa1111_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
40{ 37{
41 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); 38 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
42} 39}
43 40
44void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) 41void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state)
45{ 42{
46 struct sa1111_dev *sadev = SA1111_DEV(skt->dev); 43 struct sa1111_pcmcia_socket *s = to_skt(skt);
47 unsigned long status = sa1111_readl(sadev->mapbase + SA1111_PCSR); 44 unsigned long status = sa1111_readl(s->dev->mapbase + SA1111_PCSR);
48 45
49 switch (skt->nr) { 46 switch (skt->nr) {
50 case 0: 47 case 0:
@@ -71,7 +68,7 @@ void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_sta
71 68
72int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) 69int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
73{ 70{
74 struct sa1111_dev *sadev = SA1111_DEV(skt->dev); 71 struct sa1111_pcmcia_socket *s = to_skt(skt);
75 unsigned int pccr_skt_mask, pccr_set_mask, val; 72 unsigned int pccr_skt_mask, pccr_set_mask, val;
76 unsigned long flags; 73 unsigned long flags;
77 74
@@ -100,10 +97,10 @@ int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
100 pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT; 97 pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT;
101 98
102 local_irq_save(flags); 99 local_irq_save(flags);
103 val = sa1111_readl(sadev->mapbase + SA1111_PCCR); 100 val = sa1111_readl(s->dev->mapbase + SA1111_PCCR);
104 val &= ~pccr_skt_mask; 101 val &= ~pccr_skt_mask;
105 val |= pccr_set_mask & pccr_skt_mask; 102 val |= pccr_set_mask & pccr_skt_mask;
106 sa1111_writel(val, sadev->mapbase + SA1111_PCCR); 103 sa1111_writel(val, s->dev->mapbase + SA1111_PCCR);
107 local_irq_restore(flags); 104 local_irq_restore(flags);
108 105
109 return 0; 106 return 0;
@@ -114,15 +111,51 @@ void sa1111_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
114 soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs)); 111 soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs));
115} 112}
116 113
117void sa1111_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) 114static void sa1111_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
118{ 115{
119 soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs)); 116 soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs));
120} 117}
121 118
119int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
120 int (*add)(struct soc_pcmcia_socket *))
121{
122 struct sa1111_pcmcia_socket *s;
123 int i, ret = 0;
124
125 ops->hw_init = sa1111_pcmcia_hw_init;
126 ops->hw_shutdown = sa1111_pcmcia_hw_shutdown;
127 ops->socket_state = sa1111_pcmcia_socket_state;
128 ops->socket_suspend = sa1111_pcmcia_socket_suspend;
129
130 for (i = 0; i < ops->nr; i++) {
131 s = kzalloc(sizeof(*s), GFP_KERNEL);
132 if (!s)
133 return -ENOMEM;
134
135 s->soc.nr = ops->first + i;
136 s->soc.ops = ops;
137 s->soc.socket.owner = ops->owner;
138 s->soc.socket.dev.parent = &dev->dev;
139 s->soc.socket.pci_irq = s->soc.nr ? IRQ_S1_READY_NINT : IRQ_S0_READY_NINT;
140 s->dev = dev;
141
142 ret = add(&s->soc);
143 if (ret == 0) {
144 s->next = dev_get_drvdata(&dev->dev);
145 dev_set_drvdata(&dev->dev, s);
146 } else
147 kfree(s);
148 }
149
150 return ret;
151}
152
122static int pcmcia_probe(struct sa1111_dev *dev) 153static int pcmcia_probe(struct sa1111_dev *dev)
123{ 154{
124 void __iomem *base; 155 void __iomem *base;
125 156
157 dev_set_drvdata(&dev->dev, NULL);
158
126 if (!request_mem_region(dev->res.start, 512, 159 if (!request_mem_region(dev->res.start, 512,
127 SA1111_DRIVER_NAME(dev))) 160 SA1111_DRIVER_NAME(dev)))
128 return -EBUSY; 161 return -EBUSY;
@@ -152,7 +185,15 @@ static int pcmcia_probe(struct sa1111_dev *dev)
152 185
153static int __devexit pcmcia_remove(struct sa1111_dev *dev) 186static int __devexit pcmcia_remove(struct sa1111_dev *dev)
154{ 187{
155 soc_common_drv_pcmcia_remove(&dev->dev); 188 struct sa1111_pcmcia_socket *next, *s = dev_get_drvdata(&dev->dev);
189
190 dev_set_drvdata(&dev->dev, NULL);
191
192 for (; next = s->next, s; s = next) {
193 soc_pcmcia_remove_one(&s->soc);
194 kfree(s);
195 }
196
156 release_mem_region(dev->res.start, 512); 197 release_mem_region(dev->res.start, 512);
157 return 0; 198 return 0;
158} 199}
diff --git a/drivers/pcmcia/sa1111_generic.h b/drivers/pcmcia/sa1111_generic.h
index 10ced4a210d7..02dc8577cdaf 100644
--- a/drivers/pcmcia/sa1111_generic.h
+++ b/drivers/pcmcia/sa1111_generic.h
@@ -1,12 +1,23 @@
1#include "soc_common.h" 1#include "soc_common.h"
2#include "sa11xx_base.h" 2#include "sa11xx_base.h"
3 3
4extern int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *); 4struct sa1111_pcmcia_socket {
5extern void sa1111_pcmcia_hw_shutdown(struct soc_pcmcia_socket *); 5 struct soc_pcmcia_socket soc;
6 struct sa1111_dev *dev;
7 struct sa1111_pcmcia_socket *next;
8};
9
10static inline struct sa1111_pcmcia_socket *to_skt(struct soc_pcmcia_socket *s)
11{
12 return container_of(s, struct sa1111_pcmcia_socket, soc);
13}
14
15int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
16 int (*add)(struct soc_pcmcia_socket *));
17
6extern void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *, struct pcmcia_state *); 18extern void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *, struct pcmcia_state *);
7extern int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *, const socket_state_t *); 19extern int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *, const socket_state_t *);
8extern void sa1111_pcmcia_socket_init(struct soc_pcmcia_socket *); 20extern void sa1111_pcmcia_socket_init(struct soc_pcmcia_socket *);
9extern void sa1111_pcmcia_socket_suspend(struct soc_pcmcia_socket *);
10 21
11extern int pcmcia_badge4_init(struct device *); 22extern int pcmcia_badge4_init(struct device *);
12extern int pcmcia_jornada720_init(struct device *); 23extern int pcmcia_jornada720_init(struct device *);
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index e15d59f2d8a9..fc9a6527019b 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -171,12 +171,58 @@ static const char *skt_names[] = {
171#define SKT_DEV_INFO_SIZE(n) \ 171#define SKT_DEV_INFO_SIZE(n) \
172 (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) 172 (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket))
173 173
174int sa11xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt)
175{
176 skt->res_skt.start = _PCMCIA(skt->nr);
177 skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1;
178 skt->res_skt.name = skt_names[skt->nr];
179 skt->res_skt.flags = IORESOURCE_MEM;
180
181 skt->res_io.start = _PCMCIAIO(skt->nr);
182 skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1;
183 skt->res_io.name = "io";
184 skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
185
186 skt->res_mem.start = _PCMCIAMem(skt->nr);
187 skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1;
188 skt->res_mem.name = "memory";
189 skt->res_mem.flags = IORESOURCE_MEM;
190
191 skt->res_attr.start = _PCMCIAAttr(skt->nr);
192 skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1;
193 skt->res_attr.name = "attribute";
194 skt->res_attr.flags = IORESOURCE_MEM;
195
196 return soc_pcmcia_add_one(skt);
197}
198EXPORT_SYMBOL(sa11xx_drv_pcmcia_add_one);
199
200void sa11xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
201{
202 /*
203 * set default MECR calculation if the board specific
204 * code did not specify one...
205 */
206 if (!ops->get_timing)
207 ops->get_timing = sa1100_pcmcia_default_mecr_timing;
208
209 /* Provide our SA11x0 specific timing routines. */
210 ops->set_timing = sa1100_pcmcia_set_timing;
211 ops->show_timing = sa1100_pcmcia_show_timing;
212#ifdef CONFIG_CPU_FREQ
213 ops->frequency_change = sa1100_pcmcia_frequency_change;
214#endif
215}
216EXPORT_SYMBOL(sa11xx_drv_pcmcia_ops);
217
174int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, 218int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
175 int first, int nr) 219 int first, int nr)
176{ 220{
177 struct skt_dev_info *sinfo; 221 struct skt_dev_info *sinfo;
178 struct soc_pcmcia_socket *skt; 222 struct soc_pcmcia_socket *skt;
179 int i; 223 int i, ret = 0;
224
225 sa11xx_drv_pcmcia_ops(ops);
180 226
181 sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL); 227 sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
182 if (!sinfo) 228 if (!sinfo)
@@ -188,45 +234,26 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
188 for (i = 0; i < nr; i++) { 234 for (i = 0; i < nr; i++) {
189 skt = &sinfo->skt[i]; 235 skt = &sinfo->skt[i];
190 236
191 skt->nr = first + i; 237 skt->nr = first + i;
192 skt->irq = NO_IRQ; 238 skt->ops = ops;
193 239 skt->socket.owner = ops->owner;
194 skt->res_skt.start = _PCMCIA(skt->nr); 240 skt->socket.dev.parent = dev;
195 skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1; 241 skt->socket.pci_irq = NO_IRQ;
196 skt->res_skt.name = skt_names[skt->nr];
197 skt->res_skt.flags = IORESOURCE_MEM;
198
199 skt->res_io.start = _PCMCIAIO(skt->nr);
200 skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1;
201 skt->res_io.name = "io";
202 skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
203 242
204 skt->res_mem.start = _PCMCIAMem(skt->nr); 243 ret = sa11xx_drv_pcmcia_add_one(skt);
205 skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1; 244 if (ret)
206 skt->res_mem.name = "memory"; 245 break;
207 skt->res_mem.flags = IORESOURCE_MEM;
208
209 skt->res_attr.start = _PCMCIAAttr(skt->nr);
210 skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1;
211 skt->res_attr.name = "attribute";
212 skt->res_attr.flags = IORESOURCE_MEM;
213 } 246 }
214 247
215 /* 248 if (ret) {
216 * set default MECR calculation if the board specific 249 while (--i >= 0)
217 * code did not specify one... 250 soc_pcmcia_remove_one(&sinfo->skt[i]);
218 */ 251 kfree(sinfo);
219 if (!ops->get_timing) 252 } else {
220 ops->get_timing = sa1100_pcmcia_default_mecr_timing; 253 dev_set_drvdata(dev, sinfo);
221 254 }
222 /* Provide our SA11x0 specific timing routines. */
223 ops->set_timing = sa1100_pcmcia_set_timing;
224 ops->show_timing = sa1100_pcmcia_show_timing;
225#ifdef CONFIG_CPU_FREQ
226 ops->frequency_change = sa1100_pcmcia_frequency_change;
227#endif
228 255
229 return soc_common_drv_pcmcia_probe(dev, ops, sinfo); 256 return ret;
230} 257}
231EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe); 258EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe);
232 259
diff --git a/drivers/pcmcia/sa11xx_base.h b/drivers/pcmcia/sa11xx_base.h
index 7bc208280527..3d76d720f463 100644
--- a/drivers/pcmcia/sa11xx_base.h
+++ b/drivers/pcmcia/sa11xx_base.h
@@ -118,6 +118,8 @@ static inline unsigned int sa1100_pcmcia_cmd_time(unsigned int cpu_clock_khz,
118} 118}
119 119
120 120
121int sa11xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
122void sa11xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
121extern int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr); 123extern int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr);
122 124
123#endif /* !defined(_PCMCIA_SA1100_H) */ 125#endif /* !defined(_PCMCIA_SA1100_H) */
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index ef7e9e58782b..6f1a86b43c60 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -144,10 +144,10 @@ soc_common_pcmcia_config_skt(struct soc_pcmcia_socket *skt, socket_state_t *stat
144 */ 144 */
145 if (skt->irq_state != 1 && state->io_irq) { 145 if (skt->irq_state != 1 && state->io_irq) {
146 skt->irq_state = 1; 146 skt->irq_state = 1;
147 set_irq_type(skt->irq, IRQ_TYPE_EDGE_FALLING); 147 set_irq_type(skt->socket.pci_irq, IRQ_TYPE_EDGE_FALLING);
148 } else if (skt->irq_state == 1 && state->io_irq == 0) { 148 } else if (skt->irq_state == 1 && state->io_irq == 0) {
149 skt->irq_state = 0; 149 skt->irq_state = 0;
150 set_irq_type(skt->irq, IRQ_TYPE_NONE); 150 set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE);
151 } 151 }
152 152
153 skt->cs_state = *state; 153 skt->cs_state = *state;
@@ -492,7 +492,8 @@ static ssize_t show_status(struct device *dev, struct device_attribute *attr, ch
492 492
493 p+=sprintf(p, "Vcc : %d\n", skt->cs_state.Vcc); 493 p+=sprintf(p, "Vcc : %d\n", skt->cs_state.Vcc);
494 p+=sprintf(p, "Vpp : %d\n", skt->cs_state.Vpp); 494 p+=sprintf(p, "Vpp : %d\n", skt->cs_state.Vpp);
495 p+=sprintf(p, "IRQ : %d (%d)\n", skt->cs_state.io_irq, skt->irq); 495 p+=sprintf(p, "IRQ : %d (%d)\n", skt->cs_state.io_irq,
496 skt->socket.pci_irq);
496 if (skt->ops->show_timing) 497 if (skt->ops->show_timing)
497 p+=skt->ops->show_timing(skt, p); 498 p+=skt->ops->show_timing(skt, p);
498 499
@@ -574,7 +575,7 @@ void soc_pcmcia_enable_irqs(struct soc_pcmcia_socket *skt,
574EXPORT_SYMBOL(soc_pcmcia_enable_irqs); 575EXPORT_SYMBOL(soc_pcmcia_enable_irqs);
575 576
576 577
577LIST_HEAD(soc_pcmcia_sockets); 578static LIST_HEAD(soc_pcmcia_sockets);
578static DEFINE_MUTEX(soc_pcmcia_sockets_lock); 579static DEFINE_MUTEX(soc_pcmcia_sockets_lock);
579 580
580#ifdef CONFIG_CPU_FREQ 581#ifdef CONFIG_CPU_FREQ
@@ -609,177 +610,137 @@ static int soc_pcmcia_cpufreq_register(void)
609 "notifier for PCMCIA (%d)\n", ret); 610 "notifier for PCMCIA (%d)\n", ret);
610 return ret; 611 return ret;
611} 612}
613fs_initcall(soc_pcmcia_cpufreq_register);
612 614
613static void soc_pcmcia_cpufreq_unregister(void) 615static void soc_pcmcia_cpufreq_unregister(void)
614{ 616{
615 cpufreq_unregister_notifier(&soc_pcmcia_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); 617 cpufreq_unregister_notifier(&soc_pcmcia_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
616} 618}
619module_exit(soc_pcmcia_cpufreq_unregister);
617 620
618#else
619static int soc_pcmcia_cpufreq_register(void) { return 0; }
620static void soc_pcmcia_cpufreq_unregister(void) {}
621#endif 621#endif
622 622
623int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, 623void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt)
624 struct skt_dev_info *sinfo)
625{ 624{
626 struct soc_pcmcia_socket *skt;
627 int ret, i;
628
629 mutex_lock(&soc_pcmcia_sockets_lock); 625 mutex_lock(&soc_pcmcia_sockets_lock);
626 del_timer_sync(&skt->poll_timer);
630 627
631 /* 628 pcmcia_unregister_socket(&skt->socket);
632 * Initialise the per-socket structure.
633 */
634 for (i = 0; i < sinfo->nskt; i++) {
635 skt = &sinfo->skt[i];
636 629
637 skt->socket.ops = &soc_common_pcmcia_operations; 630 flush_scheduled_work();
638 skt->socket.owner = ops->owner;
639 skt->socket.dev.parent = dev;
640 631
641 init_timer(&skt->poll_timer); 632 skt->ops->hw_shutdown(skt);
642 skt->poll_timer.function = soc_common_pcmcia_poll_event;
643 skt->poll_timer.data = (unsigned long)skt;
644 skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;
645 633
646 skt->dev = dev; 634 soc_common_pcmcia_config_skt(skt, &dead_socket);
647 skt->ops = ops;
648 635
649 ret = request_resource(&iomem_resource, &skt->res_skt); 636 list_del(&skt->node);
650 if (ret) 637 mutex_unlock(&soc_pcmcia_sockets_lock);
651 goto out_err_1;
652 638
653 ret = request_resource(&skt->res_skt, &skt->res_io); 639 iounmap(skt->virt_io);
654 if (ret) 640 skt->virt_io = NULL;
655 goto out_err_2; 641 release_resource(&skt->res_attr);
642 release_resource(&skt->res_mem);
643 release_resource(&skt->res_io);
644 release_resource(&skt->res_skt);
645}
646EXPORT_SYMBOL(soc_pcmcia_remove_one);
656 647
657 ret = request_resource(&skt->res_skt, &skt->res_mem); 648int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
658 if (ret) 649{
659 goto out_err_3; 650 int ret;
660 651
661 ret = request_resource(&skt->res_skt, &skt->res_attr); 652 init_timer(&skt->poll_timer);
662 if (ret) 653 skt->poll_timer.function = soc_common_pcmcia_poll_event;
663 goto out_err_4; 654 skt->poll_timer.data = (unsigned long)skt;
655 skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;
664 656
665 skt->virt_io = ioremap(skt->res_io.start, 0x10000); 657 ret = request_resource(&iomem_resource, &skt->res_skt);
666 if (skt->virt_io == NULL) { 658 if (ret)
667 ret = -ENOMEM; 659 goto out_err_1;
668 goto out_err_5;
669 }
670 660
671 if (list_empty(&soc_pcmcia_sockets)) 661 ret = request_resource(&skt->res_skt, &skt->res_io);
672 soc_pcmcia_cpufreq_register(); 662 if (ret)
663 goto out_err_2;
673 664
674 list_add(&skt->node, &soc_pcmcia_sockets); 665 ret = request_resource(&skt->res_skt, &skt->res_mem);
666 if (ret)
667 goto out_err_3;
675 668
676 /* 669 ret = request_resource(&skt->res_skt, &skt->res_attr);
677 * We initialize default socket timing here, because 670 if (ret)
678 * we are not guaranteed to see a SetIOMap operation at 671 goto out_err_4;
679 * runtime.
680 */
681 ops->set_timing(skt);
682 672
683 ret = ops->hw_init(skt); 673 skt->virt_io = ioremap(skt->res_io.start, 0x10000);
684 if (ret) 674 if (skt->virt_io == NULL) {
685 goto out_err_6; 675 ret = -ENOMEM;
676 goto out_err_5;
677 }
686 678
687 skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD; 679 mutex_lock(&soc_pcmcia_sockets_lock);
688 skt->socket.resource_ops = &pccard_static_ops;
689 skt->socket.irq_mask = 0;
690 skt->socket.map_size = PAGE_SIZE;
691 skt->socket.pci_irq = skt->irq;
692 skt->socket.io_offset = (unsigned long)skt->virt_io;
693 680
694 skt->status = soc_common_pcmcia_skt_state(skt); 681 list_add(&skt->node, &soc_pcmcia_sockets);
695 682
696 ret = pcmcia_register_socket(&skt->socket); 683 /*
697 if (ret) 684 * We initialize default socket timing here, because
698 goto out_err_7; 685 * we are not guaranteed to see a SetIOMap operation at
686 * runtime.
687 */
688 skt->ops->set_timing(skt);
699 689
700 WARN_ON(skt->socket.sock != i); 690 ret = skt->ops->hw_init(skt);
691 if (ret)
692 goto out_err_6;
701 693
702 add_timer(&skt->poll_timer); 694 skt->socket.ops = &soc_common_pcmcia_operations;
695 skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD;
696 skt->socket.resource_ops = &pccard_static_ops;
697 skt->socket.irq_mask = 0;
698 skt->socket.map_size = PAGE_SIZE;
699 skt->socket.io_offset = (unsigned long)skt->virt_io;
703 700
704 ret = device_create_file(&skt->socket.dev, &dev_attr_status); 701 skt->status = soc_common_pcmcia_skt_state(skt);
705 if (ret)
706 goto out_err_8;
707 }
708 702
709 dev_set_drvdata(dev, sinfo); 703 ret = pcmcia_register_socket(&skt->socket);
710 ret = 0; 704 if (ret)
711 goto out; 705 goto out_err_7;
712 706
713 do { 707 add_timer(&skt->poll_timer);
714 skt = &sinfo->skt[i]; 708
709 mutex_unlock(&soc_pcmcia_sockets_lock);
710
711 ret = device_create_file(&skt->socket.dev, &dev_attr_status);
712 if (ret)
713 goto out_err_8;
714
715 return ret;
715 716
716 device_remove_file(&skt->socket.dev, &dev_attr_status);
717 out_err_8: 717 out_err_8:
718 del_timer_sync(&skt->poll_timer); 718 mutex_lock(&soc_pcmcia_sockets_lock);
719 pcmcia_unregister_socket(&skt->socket); 719 del_timer_sync(&skt->poll_timer);
720 pcmcia_unregister_socket(&skt->socket);
720 721
721 out_err_7: 722 out_err_7:
722 flush_scheduled_work(); 723 flush_scheduled_work();
723 724
724 ops->hw_shutdown(skt); 725 skt->ops->hw_shutdown(skt);
725 out_err_6: 726 out_err_6:
726 list_del(&skt->node); 727 list_del(&skt->node);
727 iounmap(skt->virt_io); 728 mutex_unlock(&soc_pcmcia_sockets_lock);
729 iounmap(skt->virt_io);
728 out_err_5: 730 out_err_5:
729 release_resource(&skt->res_attr); 731 release_resource(&skt->res_attr);
730 out_err_4: 732 out_err_4:
731 release_resource(&skt->res_mem); 733 release_resource(&skt->res_mem);
732 out_err_3: 734 out_err_3:
733 release_resource(&skt->res_io); 735 release_resource(&skt->res_io);
734 out_err_2: 736 out_err_2:
735 release_resource(&skt->res_skt); 737 release_resource(&skt->res_skt);
736 out_err_1: 738 out_err_1:
737 i--;
738 } while (i > 0);
739 739
740 kfree(sinfo);
741
742 out:
743 mutex_unlock(&soc_pcmcia_sockets_lock);
744 return ret; 740 return ret;
745} 741}
742EXPORT_SYMBOL(soc_pcmcia_add_one);
746 743
747int soc_common_drv_pcmcia_remove(struct device *dev) 744MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>");
748{ 745MODULE_DESCRIPTION("Linux PCMCIA Card Services: Common SoC support");
749 struct skt_dev_info *sinfo = dev_get_drvdata(dev); 746MODULE_LICENSE("Dual MPL/GPL");
750 int i;
751
752 dev_set_drvdata(dev, NULL);
753
754 mutex_lock(&soc_pcmcia_sockets_lock);
755 for (i = 0; i < sinfo->nskt; i++) {
756 struct soc_pcmcia_socket *skt = &sinfo->skt[i];
757
758 del_timer_sync(&skt->poll_timer);
759
760 pcmcia_unregister_socket(&skt->socket);
761
762 flush_scheduled_work();
763
764 skt->ops->hw_shutdown(skt);
765
766 soc_common_pcmcia_config_skt(skt, &dead_socket);
767
768 list_del(&skt->node);
769 iounmap(skt->virt_io);
770 skt->virt_io = NULL;
771 release_resource(&skt->res_attr);
772 release_resource(&skt->res_mem);
773 release_resource(&skt->res_io);
774 release_resource(&skt->res_skt);
775 }
776 if (list_empty(&soc_pcmcia_sockets))
777 soc_pcmcia_cpufreq_unregister();
778
779 mutex_unlock(&soc_pcmcia_sockets_lock);
780
781 kfree(sinfo);
782
783 return 0;
784}
785EXPORT_SYMBOL(soc_common_drv_pcmcia_remove);
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index 290e143839ee..e40824ce6b0b 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -30,14 +30,12 @@ struct soc_pcmcia_socket {
30 /* 30 /*
31 * Info from low level handler 31 * Info from low level handler
32 */ 32 */
33 struct device *dev;
34 unsigned int nr; 33 unsigned int nr;
35 unsigned int irq;
36 34
37 /* 35 /*
38 * Core PCMCIA state 36 * Core PCMCIA state
39 */ 37 */
40 struct pcmcia_low_level *ops; 38 const struct pcmcia_low_level *ops;
41 39
42 unsigned int status; 40 unsigned int status;
43 socket_state_t cs_state; 41 socket_state_t cs_state;
@@ -135,10 +133,8 @@ extern void soc_pcmcia_enable_irqs(struct soc_pcmcia_socket *skt, struct pcmcia_
135extern void soc_common_pcmcia_get_timing(struct soc_pcmcia_socket *, struct soc_pcmcia_timing *); 133extern void soc_common_pcmcia_get_timing(struct soc_pcmcia_socket *, struct soc_pcmcia_timing *);
136 134
137 135
138extern struct list_head soc_pcmcia_sockets; 136void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt);
139 137int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt);
140extern int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, struct skt_dev_info *sinfo);
141extern int soc_common_drv_pcmcia_remove(struct device *dev);
142 138
143 139
144#ifdef CONFIG_PCMCIA_DEBUG 140#ifdef CONFIG_PCMCIA_DEBUG
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 6918849d511e..12c49ee135e1 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -55,21 +55,6 @@
55#include <pcmcia/ss.h> 55#include <pcmcia/ss.h>
56#include "tcic.h" 56#include "tcic.h"
57 57
58#ifdef CONFIG_PCMCIA_DEBUG
59static int pc_debug;
60
61module_param(pc_debug, int, 0644);
62static const char version[] =
63"tcic.c 1.111 2000/02/15 04:13:12 (David Hinds)";
64
65#define debug(lvl, fmt, arg...) do { \
66 if (pc_debug > (lvl)) \
67 printk(KERN_DEBUG "tcic: " fmt , ## arg); \
68} while (0)
69#else
70#define debug(lvl, fmt, arg...) do { } while (0)
71#endif
72
73MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); 58MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
74MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver"); 59MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver");
75MODULE_LICENSE("Dual MPL/GPL"); 60MODULE_LICENSE("Dual MPL/GPL");
@@ -574,7 +559,7 @@ static irqreturn_t tcic_interrupt(int irq, void *dev)
574 } else 559 } else
575 active = 1; 560 active = 1;
576 561
577 debug(2, "tcic_interrupt()\n"); 562 pr_debug("tcic_interrupt()\n");
578 563
579 for (i = 0; i < sockets; i++) { 564 for (i = 0; i < sockets; i++) {
580 psock = socket_table[i].psock; 565 psock = socket_table[i].psock;
@@ -611,13 +596,13 @@ static irqreturn_t tcic_interrupt(int irq, void *dev)
611 } 596 }
612 active = 0; 597 active = 0;
613 598
614 debug(2, "interrupt done\n"); 599 pr_debug("interrupt done\n");
615 return IRQ_HANDLED; 600 return IRQ_HANDLED;
616} /* tcic_interrupt */ 601} /* tcic_interrupt */
617 602
618static void tcic_timer(u_long data) 603static void tcic_timer(u_long data)
619{ 604{
620 debug(2, "tcic_timer()\n"); 605 pr_debug("tcic_timer()\n");
621 tcic_timer_pending = 0; 606 tcic_timer_pending = 0;
622 tcic_interrupt(0, NULL); 607 tcic_interrupt(0, NULL);
623} /* tcic_timer */ 608} /* tcic_timer */
@@ -644,7 +629,7 @@ static int tcic_get_status(struct pcmcia_socket *sock, u_int *value)
644 reg = tcic_getb(TCIC_PWR); 629 reg = tcic_getb(TCIC_PWR);
645 if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock))) 630 if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock)))
646 *value |= SS_POWERON; 631 *value |= SS_POWERON;
647 debug(1, "GetStatus(%d) = %#2.2x\n", psock, *value); 632 dev_dbg(&sock->dev, "GetStatus(%d) = %#2.2x\n", psock, *value);
648 return 0; 633 return 0;
649} /* tcic_get_status */ 634} /* tcic_get_status */
650 635
@@ -656,7 +641,7 @@ static int tcic_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
656 u_char reg; 641 u_char reg;
657 u_short scf1, scf2; 642 u_short scf1, scf2;
658 643
659 debug(1, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 644 dev_dbg(&sock->dev, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
660 "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags, 645 "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags,
661 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 646 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
662 tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG); 647 tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG);
@@ -731,7 +716,7 @@ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
731 u_int addr; 716 u_int addr;
732 u_short base, len, ioctl; 717 u_short base, len, ioctl;
733 718
734 debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " 719 dev_dbg(&sock->dev, "SetIOMap(%d, %d, %#2.2x, %d ns, "
735 "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, 720 "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed,
736 (unsigned long long)io->start, (unsigned long long)io->stop); 721 (unsigned long long)io->start, (unsigned long long)io->stop);
737 if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || 722 if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
@@ -768,7 +753,7 @@ static int tcic_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m
768 u_short addr, ctl; 753 u_short addr, ctl;
769 u_long base, len, mmap; 754 u_long base, len, mmap;
770 755
771 debug(1, "SetMemMap(%d, %d, %#2.2x, %d ns, " 756 dev_dbg(&sock->dev, "SetMemMap(%d, %d, %#2.2x, %d ns, "
772 "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags, 757 "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags,
773 mem->speed, (unsigned long long)mem->res->start, 758 mem->speed, (unsigned long long)mem->res->start,
774 (unsigned long long)mem->res->end, mem->card_start); 759 (unsigned long long)mem->res->end, mem->card_start);
diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h
index edccfa5bb400..615a45a8fe86 100644
--- a/drivers/pcmcia/topic.h
+++ b/drivers/pcmcia/topic.h
@@ -114,22 +114,17 @@ static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff)
114 reg_zv |= TOPIC97_ZV_CONTROL_ENABLE; 114 reg_zv |= TOPIC97_ZV_CONTROL_ENABLE;
115 config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv); 115 config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv);
116 116
117 reg = config_readb(socket, TOPIC97_MISC2);
118 reg |= TOPIC97_MISC2_ZV_ENABLE;
119 config_writeb(socket, TOPIC97_MISC2, reg);
120
121 /* not sure this is needed, doc is unclear */
122#if 0
123 reg = config_readb(socket, TOPIC97_AUDIO_VIDEO_SWITCH); 117 reg = config_readb(socket, TOPIC97_AUDIO_VIDEO_SWITCH);
124 reg |= TOPIC97_AVS_AUDIO_CONTROL | TOPIC97_AVS_VIDEO_CONTROL; 118 reg |= TOPIC97_AVS_AUDIO_CONTROL | TOPIC97_AVS_VIDEO_CONTROL;
125 config_writeb(socket, TOPIC97_AUDIO_VIDEO_SWITCH, reg); 119 config_writeb(socket, TOPIC97_AUDIO_VIDEO_SWITCH, reg);
126#endif 120 } else {
127 }
128 else {
129 reg_zv &= ~TOPIC97_ZV_CONTROL_ENABLE; 121 reg_zv &= ~TOPIC97_ZV_CONTROL_ENABLE;
130 config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv); 122 config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv);
131 }
132 123
124 reg = config_readb(socket, TOPIC97_AUDIO_VIDEO_SWITCH);
125 reg &= ~(TOPIC97_AVS_AUDIO_CONTROL | TOPIC97_AVS_VIDEO_CONTROL);
126 config_writeb(socket, TOPIC97_AUDIO_VIDEO_SWITCH, reg);
127 }
133} 128}
134 129
135static int topic97_override(struct yenta_socket *socket) 130static int topic97_override(struct yenta_socket *socket)
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 1d8d9879d3a1..48857008758c 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -167,6 +167,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
167 return -ENOMEM; 167 return -ENOMEM;
168 } 168 }
169 169
170 isink->wm831x = wm831x;
171
170 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 172 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
171 if (res == NULL) { 173 if (res == NULL) {
172 dev_err(&pdev->dev, "No I/O resource\n"); 174 dev_err(&pdev->dev, "No I/O resource\n");
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 33a10c47260e..4c5d5d0c4cfc 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -292,8 +292,9 @@ static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
292 &pcf50633_rtc_ops, THIS_MODULE); 292 &pcf50633_rtc_ops, THIS_MODULE);
293 293
294 if (IS_ERR(rtc->rtc_dev)) { 294 if (IS_ERR(rtc->rtc_dev)) {
295 int ret = PTR_ERR(rtc->rtc_dev);
295 kfree(rtc); 296 kfree(rtc);
296 return PTR_ERR(rtc->rtc_dev); 297 return ret;
297 } 298 }
298 299
299 pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM, 300 pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM,
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 310c10795e9a..6583c1a8b070 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -195,7 +195,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
195 /* year, since the rtc epoch*/ 195 /* year, since the rtc epoch*/
196 buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100); 196 buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
197 buf[CCR_WDAY] = tm->tm_wday & 0x07; 197 buf[CCR_WDAY] = tm->tm_wday & 0x07;
198 buf[CCR_Y2K] = bin2bcd(tm->tm_year / 100); 198 buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
199 } 199 }
200 200
201 /* If writing alarm registers, set compare bits on registers 0-4 */ 201 /* If writing alarm registers, set compare bits on registers 0-4 */
@@ -280,9 +280,9 @@ static int x1205_fix_osc(struct i2c_client *client)
280 int err; 280 int err;
281 struct rtc_time tm; 281 struct rtc_time tm;
282 282
283 tm.tm_hour = tm.tm_min = tm.tm_sec = 0; 283 memset(&tm, 0, sizeof(tm));
284 284
285 err = x1205_set_datetime(client, &tm, 0, X1205_CCR_BASE, 0); 285 err = x1205_set_datetime(client, &tm, 1, X1205_CCR_BASE, 0);
286 if (err < 0) 286 if (err < 0)
287 dev_err(&client->dev, "unable to restart the oscillator\n"); 287 dev_err(&client->dev, "unable to restart the oscillator\n");
288 288
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 67cde0138061..528733b4a392 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -54,15 +54,6 @@
54#include <pcmcia/cistpl.h> 54#include <pcmcia/cistpl.h>
55#include <pcmcia/ds.h> 55#include <pcmcia/ds.h>
56 56
57#ifdef PCMCIA_DEBUG
58static int pc_debug = PCMCIA_DEBUG;
59module_param(pc_debug, int, 0644);
60#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
61static char *version =
62"aha152x_cs.c 1.54 2000/06/12 21:27:25 (David Hinds)";
63#else
64#define DEBUG(n, args...)
65#endif
66 57
67/*====================================================================*/ 58/*====================================================================*/
68 59
@@ -103,7 +94,7 @@ static int aha152x_probe(struct pcmcia_device *link)
103{ 94{
104 scsi_info_t *info; 95 scsi_info_t *info;
105 96
106 DEBUG(0, "aha152x_attach()\n"); 97 dev_dbg(&link->dev, "aha152x_attach()\n");
107 98
108 /* Create new SCSI device */ 99 /* Create new SCSI device */
109 info = kzalloc(sizeof(*info), GFP_KERNEL); 100 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -115,7 +106,6 @@ static int aha152x_probe(struct pcmcia_device *link)
115 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 106 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
116 link->io.IOAddrLines = 10; 107 link->io.IOAddrLines = 10;
117 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 108 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
118 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
119 link->conf.Attributes = CONF_ENABLE_IRQ; 109 link->conf.Attributes = CONF_ENABLE_IRQ;
120 link->conf.IntType = INT_MEMORY_AND_IO; 110 link->conf.IntType = INT_MEMORY_AND_IO;
121 link->conf.Present = PRESENT_OPTION; 111 link->conf.Present = PRESENT_OPTION;
@@ -127,7 +117,7 @@ static int aha152x_probe(struct pcmcia_device *link)
127 117
128static void aha152x_detach(struct pcmcia_device *link) 118static void aha152x_detach(struct pcmcia_device *link)
129{ 119{
130 DEBUG(0, "aha152x_detach(0x%p)\n", link); 120 dev_dbg(&link->dev, "aha152x_detach\n");
131 121
132 aha152x_release_cs(link); 122 aha152x_release_cs(link);
133 123
@@ -137,9 +127,6 @@ static void aha152x_detach(struct pcmcia_device *link)
137 127
138/*====================================================================*/ 128/*====================================================================*/
139 129
140#define CS_CHECK(fn, ret) \
141do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
142
143static int aha152x_config_check(struct pcmcia_device *p_dev, 130static int aha152x_config_check(struct pcmcia_device *p_dev,
144 cistpl_cftable_entry_t *cfg, 131 cistpl_cftable_entry_t *cfg,
145 cistpl_cftable_entry_t *dflt, 132 cistpl_cftable_entry_t *dflt,
@@ -164,19 +151,22 @@ static int aha152x_config_cs(struct pcmcia_device *link)
164{ 151{
165 scsi_info_t *info = link->priv; 152 scsi_info_t *info = link->priv;
166 struct aha152x_setup s; 153 struct aha152x_setup s;
167 int last_ret, last_fn; 154 int ret;
168 struct Scsi_Host *host; 155 struct Scsi_Host *host;
169 156
170 DEBUG(0, "aha152x_config(0x%p)\n", link); 157 dev_dbg(&link->dev, "aha152x_config\n");
171 158
172 last_ret = pcmcia_loop_config(link, aha152x_config_check, NULL); 159 ret = pcmcia_loop_config(link, aha152x_config_check, NULL);
173 if (last_ret) { 160 if (ret)
174 cs_error(link, RequestIO, last_ret); 161 goto failed;
175 goto failed;
176 }
177 162
178 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 163 ret = pcmcia_request_irq(link, &link->irq);
179 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 164 if (ret)
165 goto failed;
166
167 ret = pcmcia_request_configuration(link, &link->conf);
168 if (ret)
169 goto failed;
180 170
181 /* Set configuration options for the aha152x driver */ 171 /* Set configuration options for the aha152x driver */
182 memset(&s, 0, sizeof(s)); 172 memset(&s, 0, sizeof(s));
@@ -194,7 +184,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
194 host = aha152x_probe_one(&s); 184 host = aha152x_probe_one(&s);
195 if (host == NULL) { 185 if (host == NULL) {
196 printk(KERN_INFO "aha152x_cs: no SCSI devices found\n"); 186 printk(KERN_INFO "aha152x_cs: no SCSI devices found\n");
197 goto cs_failed; 187 goto failed;
198 } 188 }
199 189
200 sprintf(info->node.dev_name, "scsi%d", host->host_no); 190 sprintf(info->node.dev_name, "scsi%d", host->host_no);
@@ -203,8 +193,6 @@ static int aha152x_config_cs(struct pcmcia_device *link)
203 193
204 return 0; 194 return 0;
205 195
206cs_failed:
207 cs_error(link, last_fn, last_ret);
208failed: 196failed:
209 aha152x_release_cs(link); 197 aha152x_release_cs(link);
210 return -ENODEV; 198 return -ENODEV;
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 06254f46a0dd..914040684079 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -59,16 +59,6 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
59MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver"); 59MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver");
60MODULE_LICENSE("Dual MPL/GPL"); 60MODULE_LICENSE("Dual MPL/GPL");
61 61
62#ifdef PCMCIA_DEBUG
63static int pc_debug = PCMCIA_DEBUG;
64module_param(pc_debug, int, 0);
65#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
66static char *version =
67"fdomain_cs.c 1.47 2001/10/13 00:08:52 (David Hinds)";
68#else
69#define DEBUG(n, args...)
70#endif
71
72/*====================================================================*/ 62/*====================================================================*/
73 63
74typedef struct scsi_info_t { 64typedef struct scsi_info_t {
@@ -86,7 +76,7 @@ static int fdomain_probe(struct pcmcia_device *link)
86{ 76{
87 scsi_info_t *info; 77 scsi_info_t *info;
88 78
89 DEBUG(0, "fdomain_attach()\n"); 79 dev_dbg(&link->dev, "fdomain_attach()\n");
90 80
91 /* Create new SCSI device */ 81 /* Create new SCSI device */
92 info = kzalloc(sizeof(*info), GFP_KERNEL); 82 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -99,7 +89,6 @@ static int fdomain_probe(struct pcmcia_device *link)
99 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 89 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
100 link->io.IOAddrLines = 10; 90 link->io.IOAddrLines = 10;
101 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 91 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
102 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
103 link->conf.Attributes = CONF_ENABLE_IRQ; 92 link->conf.Attributes = CONF_ENABLE_IRQ;
104 link->conf.IntType = INT_MEMORY_AND_IO; 93 link->conf.IntType = INT_MEMORY_AND_IO;
105 link->conf.Present = PRESENT_OPTION; 94 link->conf.Present = PRESENT_OPTION;
@@ -111,7 +100,7 @@ static int fdomain_probe(struct pcmcia_device *link)
111 100
112static void fdomain_detach(struct pcmcia_device *link) 101static void fdomain_detach(struct pcmcia_device *link)
113{ 102{
114 DEBUG(0, "fdomain_detach(0x%p)\n", link); 103 dev_dbg(&link->dev, "fdomain_detach\n");
115 104
116 fdomain_release(link); 105 fdomain_release(link);
117 106
@@ -120,9 +109,6 @@ static void fdomain_detach(struct pcmcia_device *link)
120 109
121/*====================================================================*/ 110/*====================================================================*/
122 111
123#define CS_CHECK(fn, ret) \
124do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
125
126static int fdomain_config_check(struct pcmcia_device *p_dev, 112static int fdomain_config_check(struct pcmcia_device *p_dev,
127 cistpl_cftable_entry_t *cfg, 113 cistpl_cftable_entry_t *cfg,
128 cistpl_cftable_entry_t *dflt, 114 cistpl_cftable_entry_t *dflt,
@@ -137,20 +123,22 @@ static int fdomain_config_check(struct pcmcia_device *p_dev,
137static int fdomain_config(struct pcmcia_device *link) 123static int fdomain_config(struct pcmcia_device *link)
138{ 124{
139 scsi_info_t *info = link->priv; 125 scsi_info_t *info = link->priv;
140 int last_ret, last_fn; 126 int ret;
141 char str[22]; 127 char str[22];
142 struct Scsi_Host *host; 128 struct Scsi_Host *host;
143 129
144 DEBUG(0, "fdomain_config(0x%p)\n", link); 130 dev_dbg(&link->dev, "fdomain_config\n");
145 131
146 last_ret = pcmcia_loop_config(link, fdomain_config_check, NULL); 132 ret = pcmcia_loop_config(link, fdomain_config_check, NULL);
147 if (last_ret) { 133 if (ret)
148 cs_error(link, RequestIO, last_ret);
149 goto failed; 134 goto failed;
150 }
151 135
152 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 136 ret = pcmcia_request_irq(link, &link->irq);
153 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 137 if (ret)
138 goto failed;
139 ret = pcmcia_request_configuration(link, &link->conf);
140 if (ret)
141 goto failed;
154 142
155 /* A bad hack... */ 143 /* A bad hack... */
156 release_region(link->io.BasePort1, link->io.NumPorts1); 144 release_region(link->io.BasePort1, link->io.NumPorts1);
@@ -162,11 +150,11 @@ static int fdomain_config(struct pcmcia_device *link)
162 host = __fdomain_16x0_detect(&fdomain_driver_template); 150 host = __fdomain_16x0_detect(&fdomain_driver_template);
163 if (!host) { 151 if (!host) {
164 printk(KERN_INFO "fdomain_cs: no SCSI devices found\n"); 152 printk(KERN_INFO "fdomain_cs: no SCSI devices found\n");
165 goto cs_failed; 153 goto failed;
166 } 154 }
167 155
168 if (scsi_add_host(host, NULL)) 156 if (scsi_add_host(host, NULL))
169 goto cs_failed; 157 goto failed;
170 scsi_scan_host(host); 158 scsi_scan_host(host);
171 159
172 sprintf(info->node.dev_name, "scsi%d", host->host_no); 160 sprintf(info->node.dev_name, "scsi%d", host->host_no);
@@ -175,8 +163,6 @@ static int fdomain_config(struct pcmcia_device *link)
175 163
176 return 0; 164 return 0;
177 165
178cs_failed:
179 cs_error(link, last_fn, last_ret);
180failed: 166failed:
181 fdomain_release(link); 167 fdomain_release(link);
182 return -ENODEV; 168 return -ENODEV;
@@ -188,7 +174,7 @@ static void fdomain_release(struct pcmcia_device *link)
188{ 174{
189 scsi_info_t *info = link->priv; 175 scsi_info_t *info = link->priv;
190 176
191 DEBUG(0, "fdomain_release(0x%p)\n", link); 177 dev_dbg(&link->dev, "fdomain_release\n");
192 178
193 scsi_remove_host(info->host); 179 scsi_remove_host(info->host);
194 pcmcia_disable_device(link); 180 pcmcia_disable_device(link);
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index e32c344d7ad8..c2341af587a3 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1564,12 +1564,10 @@ static int nsp_cs_probe(struct pcmcia_device *link)
1564 link->io.IOAddrLines = 10; /* not used */ 1564 link->io.IOAddrLines = 10; /* not used */
1565 1565
1566 /* Interrupt setup */ 1566 /* Interrupt setup */
1567 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 1567 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
1568 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
1569 1568
1570 /* Interrupt handler */ 1569 /* Interrupt handler */
1571 link->irq.Handler = &nspintr; 1570 link->irq.Handler = &nspintr;
1572 link->irq.Instance = info;
1573 link->irq.Attributes |= IRQF_SHARED; 1571 link->irq.Attributes |= IRQF_SHARED;
1574 1572
1575 /* General socket configuration */ 1573 /* General socket configuration */
@@ -1684,10 +1682,10 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev,
1684 if (cfg_mem->req.Size < 0x1000) 1682 if (cfg_mem->req.Size < 0x1000)
1685 cfg_mem->req.Size = 0x1000; 1683 cfg_mem->req.Size = 0x1000;
1686 cfg_mem->req.AccessSpeed = 0; 1684 cfg_mem->req.AccessSpeed = 0;
1687 if (pcmcia_request_window(&p_dev, &cfg_mem->req, &p_dev->win) != 0) 1685 if (pcmcia_request_window(p_dev, &cfg_mem->req, &p_dev->win) != 0)
1688 goto next_entry; 1686 goto next_entry;
1689 map.Page = 0; map.CardOffset = mem->win[0].card_addr; 1687 map.Page = 0; map.CardOffset = mem->win[0].card_addr;
1690 if (pcmcia_map_mem_page(p_dev->win, &map) != 0) 1688 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
1691 goto next_entry; 1689 goto next_entry;
1692 1690
1693 cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size); 1691 cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size);
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 20c3e5e6d88a..f85f094870b4 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -62,15 +62,6 @@
62 62
63static char qlogic_name[] = "qlogic_cs"; 63static char qlogic_name[] = "qlogic_cs";
64 64
65#ifdef PCMCIA_DEBUG
66static int pc_debug = PCMCIA_DEBUG;
67module_param(pc_debug, int, 0644);
68#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
69static char *version = "qlogic_cs.c 1.79-ac 2002/10/26 (David Hinds)";
70#else
71#define DEBUG(n, args...)
72#endif
73
74static struct scsi_host_template qlogicfas_driver_template = { 65static struct scsi_host_template qlogicfas_driver_template = {
75 .module = THIS_MODULE, 66 .module = THIS_MODULE,
76 .name = qlogic_name, 67 .name = qlogic_name,
@@ -159,7 +150,7 @@ static int qlogic_probe(struct pcmcia_device *link)
159{ 150{
160 scsi_info_t *info; 151 scsi_info_t *info;
161 152
162 DEBUG(0, "qlogic_attach()\n"); 153 dev_dbg(&link->dev, "qlogic_attach()\n");
163 154
164 /* Create new SCSI device */ 155 /* Create new SCSI device */
165 info = kzalloc(sizeof(*info), GFP_KERNEL); 156 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -171,7 +162,6 @@ static int qlogic_probe(struct pcmcia_device *link)
171 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 162 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
172 link->io.IOAddrLines = 10; 163 link->io.IOAddrLines = 10;
173 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 164 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
174 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
175 link->conf.Attributes = CONF_ENABLE_IRQ; 165 link->conf.Attributes = CONF_ENABLE_IRQ;
176 link->conf.IntType = INT_MEMORY_AND_IO; 166 link->conf.IntType = INT_MEMORY_AND_IO;
177 link->conf.Present = PRESENT_OPTION; 167 link->conf.Present = PRESENT_OPTION;
@@ -183,7 +173,7 @@ static int qlogic_probe(struct pcmcia_device *link)
183 173
184static void qlogic_detach(struct pcmcia_device *link) 174static void qlogic_detach(struct pcmcia_device *link)
185{ 175{
186 DEBUG(0, "qlogic_detach(0x%p)\n", link); 176 dev_dbg(&link->dev, "qlogic_detach\n");
187 177
188 qlogic_release(link); 178 qlogic_release(link);
189 kfree(link->priv); 179 kfree(link->priv);
@@ -192,9 +182,6 @@ static void qlogic_detach(struct pcmcia_device *link)
192 182
193/*====================================================================*/ 183/*====================================================================*/
194 184
195#define CS_CHECK(fn, ret) \
196do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
197
198static int qlogic_config_check(struct pcmcia_device *p_dev, 185static int qlogic_config_check(struct pcmcia_device *p_dev,
199 cistpl_cftable_entry_t *cfg, 186 cistpl_cftable_entry_t *cfg,
200 cistpl_cftable_entry_t *dflt, 187 cistpl_cftable_entry_t *dflt,
@@ -213,19 +200,22 @@ static int qlogic_config_check(struct pcmcia_device *p_dev,
213static int qlogic_config(struct pcmcia_device * link) 200static int qlogic_config(struct pcmcia_device * link)
214{ 201{
215 scsi_info_t *info = link->priv; 202 scsi_info_t *info = link->priv;
216 int last_ret, last_fn; 203 int ret;
217 struct Scsi_Host *host; 204 struct Scsi_Host *host;
218 205
219 DEBUG(0, "qlogic_config(0x%p)\n", link); 206 dev_dbg(&link->dev, "qlogic_config\n");
220 207
221 last_ret = pcmcia_loop_config(link, qlogic_config_check, NULL); 208 ret = pcmcia_loop_config(link, qlogic_config_check, NULL);
222 if (last_ret) { 209 if (ret)
223 cs_error(link, RequestIO, last_ret); 210 goto failed;
211
212 ret = pcmcia_request_irq(link, &link->irq);
213 if (ret)
224 goto failed; 214 goto failed;
225 }
226 215
227 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 216 ret = pcmcia_request_configuration(link, &link->conf);
228 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 217 if (ret)
218 goto failed;
229 219
230 if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { 220 if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) {
231 /* set ATAcmd */ 221 /* set ATAcmd */
@@ -244,7 +234,7 @@ static int qlogic_config(struct pcmcia_device * link)
244 234
245 if (!host) { 235 if (!host) {
246 printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name); 236 printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name);
247 goto cs_failed; 237 goto failed;
248 } 238 }
249 239
250 sprintf(info->node.dev_name, "scsi%d", host->host_no); 240 sprintf(info->node.dev_name, "scsi%d", host->host_no);
@@ -253,12 +243,9 @@ static int qlogic_config(struct pcmcia_device * link)
253 243
254 return 0; 244 return 0;
255 245
256cs_failed:
257 cs_error(link, last_fn, last_ret);
258 pcmcia_disable_device(link);
259failed: 246failed:
247 pcmcia_disable_device(link);
260 return -ENODEV; 248 return -ENODEV;
261
262} /* qlogic_config */ 249} /* qlogic_config */
263 250
264/*====================================================================*/ 251/*====================================================================*/
@@ -267,7 +254,7 @@ static void qlogic_release(struct pcmcia_device *link)
267{ 254{
268 scsi_info_t *info = link->priv; 255 scsi_info_t *info = link->priv;
269 256
270 DEBUG(0, "qlogic_release(0x%p)\n", link); 257 dev_dbg(&link->dev, "qlogic_release\n");
271 258
272 scsi_remove_host(info->host); 259 scsi_remove_host(info->host);
273 260
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index b330c11a1752..e7564d8f0cbf 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -77,17 +77,6 @@
77#include <pcmcia/ds.h> 77#include <pcmcia/ds.h>
78#include <pcmcia/ciscode.h> 78#include <pcmcia/ciscode.h>
79 79
80/* ================================================================== */
81
82#ifdef PCMCIA_DEBUG
83static int pc_debug = PCMCIA_DEBUG;
84module_param(pc_debug, int, 0);
85#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
86static char *version =
87"sym53c500_cs.c 0.9c 2004/10/27 (Bob Tracy)";
88#else
89#define DEBUG(n, args...)
90#endif
91 80
92/* ================================================================== */ 81/* ================================================================== */
93 82
@@ -525,7 +514,7 @@ SYM53C500_release(struct pcmcia_device *link)
525 struct scsi_info_t *info = link->priv; 514 struct scsi_info_t *info = link->priv;
526 struct Scsi_Host *shost = info->host; 515 struct Scsi_Host *shost = info->host;
527 516
528 DEBUG(0, "SYM53C500_release(0x%p)\n", link); 517 dev_dbg(&link->dev, "SYM53C500_release\n");
529 518
530 /* 519 /*
531 * Do this before releasing/freeing resources. 520 * Do this before releasing/freeing resources.
@@ -697,9 +686,6 @@ static struct scsi_host_template sym53c500_driver_template = {
697 .shost_attrs = SYM53C500_shost_attrs 686 .shost_attrs = SYM53C500_shost_attrs
698}; 687};
699 688
700#define CS_CHECK(fn, ret) \
701do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
702
703static int SYM53C500_config_check(struct pcmcia_device *p_dev, 689static int SYM53C500_config_check(struct pcmcia_device *p_dev,
704 cistpl_cftable_entry_t *cfg, 690 cistpl_cftable_entry_t *cfg,
705 cistpl_cftable_entry_t *dflt, 691 cistpl_cftable_entry_t *dflt,
@@ -719,24 +705,27 @@ static int
719SYM53C500_config(struct pcmcia_device *link) 705SYM53C500_config(struct pcmcia_device *link)
720{ 706{
721 struct scsi_info_t *info = link->priv; 707 struct scsi_info_t *info = link->priv;
722 int last_ret, last_fn; 708 int ret;
723 int irq_level, port_base; 709 int irq_level, port_base;
724 struct Scsi_Host *host; 710 struct Scsi_Host *host;
725 struct scsi_host_template *tpnt = &sym53c500_driver_template; 711 struct scsi_host_template *tpnt = &sym53c500_driver_template;
726 struct sym53c500_data *data; 712 struct sym53c500_data *data;
727 713
728 DEBUG(0, "SYM53C500_config(0x%p)\n", link); 714 dev_dbg(&link->dev, "SYM53C500_config\n");
729 715
730 info->manf_id = link->manf_id; 716 info->manf_id = link->manf_id;
731 717
732 last_ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL); 718 ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL);
733 if (last_ret) { 719 if (ret)
734 cs_error(link, RequestIO, last_ret); 720 goto failed;
721
722 ret = pcmcia_request_irq(link, &link->irq);
723 if (ret)
735 goto failed; 724 goto failed;
736 }
737 725
738 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 726 ret = pcmcia_request_configuration(link, &link->conf);
739 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 727 if (ret)
728 goto failed;
740 729
741 /* 730 /*
742 * That's the trouble with copying liberally from another driver. 731 * That's the trouble with copying liberally from another driver.
@@ -824,8 +813,6 @@ err_release:
824 printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n"); 813 printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n");
825 return -ENODEV; 814 return -ENODEV;
826 815
827cs_failed:
828 cs_error(link, last_fn, last_ret);
829failed: 816failed:
830 SYM53C500_release(link); 817 SYM53C500_release(link);
831 return -ENODEV; 818 return -ENODEV;
@@ -855,7 +842,7 @@ static int sym53c500_resume(struct pcmcia_device *link)
855static void 842static void
856SYM53C500_detach(struct pcmcia_device *link) 843SYM53C500_detach(struct pcmcia_device *link)
857{ 844{
858 DEBUG(0, "SYM53C500_detach(0x%p)\n", link); 845 dev_dbg(&link->dev, "SYM53C500_detach\n");
859 846
860 SYM53C500_release(link); 847 SYM53C500_release(link);
861 848
@@ -868,7 +855,7 @@ SYM53C500_probe(struct pcmcia_device *link)
868{ 855{
869 struct scsi_info_t *info; 856 struct scsi_info_t *info;
870 857
871 DEBUG(0, "SYM53C500_attach()\n"); 858 dev_dbg(&link->dev, "SYM53C500_attach()\n");
872 859
873 /* Create new SCSI device */ 860 /* Create new SCSI device */
874 info = kzalloc(sizeof(*info), GFP_KERNEL); 861 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -880,7 +867,6 @@ SYM53C500_probe(struct pcmcia_device *link)
880 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 867 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
881 link->io.IOAddrLines = 10; 868 link->io.IOAddrLines = 10;
882 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 869 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
883 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
884 link->conf.Attributes = CONF_ENABLE_IRQ; 870 link->conf.Attributes = CONF_ENABLE_IRQ;
885 link->conf.IntType = INT_MEMORY_AND_IO; 871 link->conf.IntType = INT_MEMORY_AND_IO;
886 872
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 7c7914f5fa02..fc413f0f8dd2 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -54,14 +54,6 @@
54 54
55#include "8250.h" 55#include "8250.h"
56 56
57#ifdef PCMCIA_DEBUG
58static int pc_debug = PCMCIA_DEBUG;
59module_param(pc_debug, int, 0644);
60#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
61static char *version = "serial_cs.c 1.134 2002/05/04 05:48:53 (David Hinds)";
62#else
63#define DEBUG(n, args...)
64#endif
65 57
66/*====================================================================*/ 58/*====================================================================*/
67 59
@@ -121,24 +113,20 @@ static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_
121static int quirk_post_ibm(struct pcmcia_device *link) 113static int quirk_post_ibm(struct pcmcia_device *link)
122{ 114{
123 conf_reg_t reg = { 0, CS_READ, 0x800, 0 }; 115 conf_reg_t reg = { 0, CS_READ, 0x800, 0 };
124 int last_ret, last_fn; 116 int ret;
117
118 ret = pcmcia_access_configuration_register(link, &reg);
119 if (ret)
120 goto failed;
125 121
126 last_ret = pcmcia_access_configuration_register(link, &reg);
127 if (last_ret) {
128 last_fn = AccessConfigurationRegister;
129 goto cs_failed;
130 }
131 reg.Action = CS_WRITE; 122 reg.Action = CS_WRITE;
132 reg.Value = reg.Value | 1; 123 reg.Value = reg.Value | 1;
133 last_ret = pcmcia_access_configuration_register(link, &reg); 124 ret = pcmcia_access_configuration_register(link, &reg);
134 if (last_ret) { 125 if (ret)
135 last_fn = AccessConfigurationRegister; 126 goto failed;
136 goto cs_failed;
137 }
138 return 0; 127 return 0;
139 128
140 cs_failed: 129 failed:
141 cs_error(link, last_fn, last_ret);
142 return -ENODEV; 130 return -ENODEV;
143} 131}
144 132
@@ -283,7 +271,7 @@ static void serial_remove(struct pcmcia_device *link)
283 struct serial_info *info = link->priv; 271 struct serial_info *info = link->priv;
284 int i; 272 int i;
285 273
286 DEBUG(0, "serial_release(0x%p)\n", link); 274 dev_dbg(&link->dev, "serial_release\n");
287 275
288 /* 276 /*
289 * Recheck to see if the device is still configured. 277 * Recheck to see if the device is still configured.
@@ -334,7 +322,7 @@ static int serial_probe(struct pcmcia_device *link)
334{ 322{
335 struct serial_info *info; 323 struct serial_info *info;
336 324
337 DEBUG(0, "serial_attach()\n"); 325 dev_dbg(&link->dev, "serial_attach()\n");
338 326
339 /* Create new serial device */ 327 /* Create new serial device */
340 info = kzalloc(sizeof (*info), GFP_KERNEL); 328 info = kzalloc(sizeof (*info), GFP_KERNEL);
@@ -346,7 +334,6 @@ static int serial_probe(struct pcmcia_device *link)
346 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 334 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
347 link->io.NumPorts1 = 8; 335 link->io.NumPorts1 = 8;
348 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 336 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
349 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
350 link->conf.Attributes = CONF_ENABLE_IRQ; 337 link->conf.Attributes = CONF_ENABLE_IRQ;
351 if (do_sound) { 338 if (do_sound) {
352 link->conf.Attributes |= CONF_ENABLE_SPKR; 339 link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -370,7 +357,7 @@ static void serial_detach(struct pcmcia_device *link)
370{ 357{
371 struct serial_info *info = link->priv; 358 struct serial_info *info = link->priv;
372 359
373 DEBUG(0, "serial_detach(0x%p)\n", link); 360 dev_dbg(&link->dev, "serial_detach\n");
374 361
375 /* 362 /*
376 * Ensure any outstanding scheduled tasks are completed. 363 * Ensure any outstanding scheduled tasks are completed.
@@ -399,7 +386,7 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
399 port.irq = irq; 386 port.irq = irq;
400 port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; 387 port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
401 port.uartclk = 1843200; 388 port.uartclk = 1843200;
402 port.dev = &handle_to_dev(handle); 389 port.dev = &handle->dev;
403 if (buggy_uart) 390 if (buggy_uart)
404 port.flags |= UPF_BUGGY_UART; 391 port.flags |= UPF_BUGGY_UART;
405 392
@@ -426,21 +413,6 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
426 413
427/*====================================================================*/ 414/*====================================================================*/
428 415
429static int
430first_tuple(struct pcmcia_device *handle, tuple_t * tuple, cisparse_t * parse)
431{
432 int i;
433 i = pcmcia_get_first_tuple(handle, tuple);
434 if (i != 0)
435 return i;
436 i = pcmcia_get_tuple_data(handle, tuple);
437 if (i != 0)
438 return i;
439 return pcmcia_parse_tuple(tuple, parse);
440}
441
442/*====================================================================*/
443
444static int simple_config_check(struct pcmcia_device *p_dev, 416static int simple_config_check(struct pcmcia_device *p_dev,
445 cistpl_cftable_entry_t *cf, 417 cistpl_cftable_entry_t *cf,
446 cistpl_cftable_entry_t *dflt, 418 cistpl_cftable_entry_t *dflt,
@@ -522,15 +494,13 @@ static int simple_config(struct pcmcia_device *link)
522 494
523 printk(KERN_NOTICE 495 printk(KERN_NOTICE
524 "serial_cs: no usable port range found, giving up\n"); 496 "serial_cs: no usable port range found, giving up\n");
525 cs_error(link, RequestIO, i);
526 return -1; 497 return -1;
527 498
528found_port: 499found_port:
529 i = pcmcia_request_irq(link, &link->irq); 500 i = pcmcia_request_irq(link, &link->irq);
530 if (i != 0) { 501 if (i != 0)
531 cs_error(link, RequestIRQ, i);
532 link->irq.AssignedIRQ = 0; 502 link->irq.AssignedIRQ = 0;
533 } 503
534 if (info->multi && (info->manfid == MANFID_3COM)) 504 if (info->multi && (info->manfid == MANFID_3COM))
535 link->conf.ConfigIndex &= ~(0x08); 505 link->conf.ConfigIndex &= ~(0x08);
536 506
@@ -541,10 +511,8 @@ found_port:
541 info->quirk->config(link); 511 info->quirk->config(link);
542 512
543 i = pcmcia_request_configuration(link, &link->conf); 513 i = pcmcia_request_configuration(link, &link->conf);
544 if (i != 0) { 514 if (i != 0)
545 cs_error(link, RequestConfiguration, i);
546 return -1; 515 return -1;
547 }
548 return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ); 516 return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ);
549} 517}
550 518
@@ -613,7 +581,6 @@ static int multi_config(struct pcmcia_device *link)
613 /* FIXME: comment does not fit, error handling does not fit */ 581 /* FIXME: comment does not fit, error handling does not fit */
614 printk(KERN_NOTICE 582 printk(KERN_NOTICE
615 "serial_cs: no usable port range found, giving up\n"); 583 "serial_cs: no usable port range found, giving up\n");
616 cs_error(link, RequestIRQ, i);
617 link->irq.AssignedIRQ = 0; 584 link->irq.AssignedIRQ = 0;
618 } 585 }
619 586
@@ -624,10 +591,8 @@ static int multi_config(struct pcmcia_device *link)
624 info->quirk->config(link); 591 info->quirk->config(link);
625 592
626 i = pcmcia_request_configuration(link, &link->conf); 593 i = pcmcia_request_configuration(link, &link->conf);
627 if (i != 0) { 594 if (i != 0)
628 cs_error(link, RequestConfiguration, i);
629 return -ENODEV; 595 return -ENODEV;
630 }
631 596
632 /* The Oxford Semiconductor OXCF950 cards are in fact single-port: 597 /* The Oxford Semiconductor OXCF950 cards are in fact single-port:
633 * 8 registers are for the UART, the others are extra registers. 598 * 8 registers are for the UART, the others are extra registers.
@@ -665,6 +630,25 @@ static int multi_config(struct pcmcia_device *link)
665 return 0; 630 return 0;
666} 631}
667 632
633static int serial_check_for_multi(struct pcmcia_device *p_dev,
634 cistpl_cftable_entry_t *cf,
635 cistpl_cftable_entry_t *dflt,
636 unsigned int vcc,
637 void *priv_data)
638{
639 struct serial_info *info = p_dev->priv;
640
641 if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0))
642 info->multi = cf->io.win[0].len >> 3;
643
644 if ((cf->io.nwin == 2) && (cf->io.win[0].len == 8) &&
645 (cf->io.win[1].len == 8))
646 info->multi = 2;
647
648 return 0; /* break */
649}
650
651
668/*====================================================================== 652/*======================================================================
669 653
670 serial_config() is scheduled to run after a CARD_INSERTION event 654 serial_config() is scheduled to run after a CARD_INSERTION event
@@ -676,46 +660,14 @@ static int multi_config(struct pcmcia_device *link)
676static int serial_config(struct pcmcia_device * link) 660static int serial_config(struct pcmcia_device * link)
677{ 661{
678 struct serial_info *info = link->priv; 662 struct serial_info *info = link->priv;
679 struct serial_cfg_mem *cfg_mem; 663 int i;
680 tuple_t *tuple;
681 u_char *buf;
682 cisparse_t *parse;
683 cistpl_cftable_entry_t *cf;
684 int i, last_ret, last_fn;
685
686 DEBUG(0, "serial_config(0x%p)\n", link);
687
688 cfg_mem = kmalloc(sizeof(struct serial_cfg_mem), GFP_KERNEL);
689 if (!cfg_mem)
690 goto failed;
691 664
692 tuple = &cfg_mem->tuple; 665 dev_dbg(&link->dev, "serial_config\n");
693 parse = &cfg_mem->parse;
694 cf = &parse->cftable_entry;
695 buf = cfg_mem->buf;
696
697 tuple->TupleData = (cisdata_t *) buf;
698 tuple->TupleOffset = 0;
699 tuple->TupleDataMax = 255;
700 tuple->Attributes = 0;
701
702 /* Get configuration register information */
703 tuple->DesiredTuple = CISTPL_CONFIG;
704 last_ret = first_tuple(link, tuple, parse);
705 if (last_ret != 0) {
706 last_fn = ParseTuple;
707 goto cs_failed;
708 }
709 link->conf.ConfigBase = parse->config.base;
710 link->conf.Present = parse->config.rmask[0];
711 666
712 /* Is this a compliant multifunction card? */ 667 /* Is this a compliant multifunction card? */
713 tuple->DesiredTuple = CISTPL_LONGLINK_MFC; 668 info->multi = (link->socket->functions > 1);
714 tuple->Attributes = TUPLE_RETURN_COMMON | TUPLE_RETURN_LINK;
715 info->multi = (first_tuple(link, tuple, parse) == 0);
716 669
717 /* Is this a multiport card? */ 670 /* Is this a multiport card? */
718 tuple->DesiredTuple = CISTPL_MANFID;
719 info->manfid = link->manf_id; 671 info->manfid = link->manf_id;
720 info->prodid = link->card_id; 672 info->prodid = link->card_id;
721 673
@@ -730,20 +682,11 @@ static int serial_config(struct pcmcia_device * link)
730 682
731 /* Another check for dual-serial cards: look for either serial or 683 /* Another check for dual-serial cards: look for either serial or
732 multifunction cards that ask for appropriate IO port ranges */ 684 multifunction cards that ask for appropriate IO port ranges */
733 tuple->DesiredTuple = CISTPL_FUNCID;
734 if ((info->multi == 0) && 685 if ((info->multi == 0) &&
735 (link->has_func_id) && 686 (link->has_func_id) &&
736 ((link->func_id == CISTPL_FUNCID_MULTI) || 687 ((link->func_id == CISTPL_FUNCID_MULTI) ||
737 (link->func_id == CISTPL_FUNCID_SERIAL))) { 688 (link->func_id == CISTPL_FUNCID_SERIAL)))
738 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY; 689 pcmcia_loop_config(link, serial_check_for_multi, info);
739 if (first_tuple(link, tuple, parse) == 0) {
740 if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0))
741 info->multi = cf->io.win[0].len >> 3;
742 if ((cf->io.nwin == 2) && (cf->io.win[0].len == 8) &&
743 (cf->io.win[1].len == 8))
744 info->multi = 2;
745 }
746 }
747 690
748 /* 691 /*
749 * Apply any multi-port quirk. 692 * Apply any multi-port quirk.
@@ -768,14 +711,10 @@ static int serial_config(struct pcmcia_device * link)
768 goto failed; 711 goto failed;
769 712
770 link->dev_node = &info->node[0]; 713 link->dev_node = &info->node[0];
771 kfree(cfg_mem);
772 return 0; 714 return 0;
773 715
774cs_failed:
775 cs_error(link, last_fn, last_ret);
776failed: 716failed:
777 serial_remove(link); 717 serial_remove(link);
778 kfree(cfg_mem);
779 return -ENODEV; 718 return -ENODEV;
780} 719}
781 720
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index 96057de133ad..19f75627c3de 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -29,6 +29,8 @@
29 29
30 30
31#define SPI_FIFO_SIZE 4 31#define SPI_FIFO_SIZE 4
32#define SPI_MAX_DIVIDER 0xff /* Max. value for SPCR1.SER */
33#define SPI_MIN_DIVIDER 1 /* Min. value for SPCR1.SER */
32 34
33#define TXx9_SPMCR 0x00 35#define TXx9_SPMCR 0x00
34#define TXx9_SPCR0 0x04 36#define TXx9_SPCR0 0x04
@@ -193,11 +195,8 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
193 195
194 if (prev_speed_hz != speed_hz 196 if (prev_speed_hz != speed_hz
195 || prev_bits_per_word != bits_per_word) { 197 || prev_bits_per_word != bits_per_word) {
196 u32 n = (c->baseclk + speed_hz - 1) / speed_hz; 198 int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1;
197 if (n < 1) 199 n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER);
198 n = 1;
199 else if (n > 0xff)
200 n = 0xff;
201 /* enter config mode */ 200 /* enter config mode */
202 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, 201 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR,
203 TXx9_SPMCR); 202 TXx9_SPMCR);
@@ -370,8 +369,8 @@ static int __init txx9spi_probe(struct platform_device *dev)
370 goto exit; 369 goto exit;
371 } 370 }
372 c->baseclk = clk_get_rate(c->clk); 371 c->baseclk = clk_get_rate(c->clk);
373 c->min_speed_hz = (c->baseclk + 0xff - 1) / 0xff; 372 c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1);
374 c->max_speed_hz = c->baseclk; 373 c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1);
375 374
376 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 375 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
377 if (!res) 376 if (!res)
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index 100e7a5c5ea1..e72f4046a5e0 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -617,136 +617,140 @@ static int ssb_pcmcia_sprom_check_crc(const u16 *sprom, size_t size)
617 } \ 617 } \
618 } while (0) 618 } while (0)
619 619
620int ssb_pcmcia_get_invariants(struct ssb_bus *bus, 620static int ssb_pcmcia_get_mac(struct pcmcia_device *p_dev,
621 struct ssb_init_invariants *iv) 621 tuple_t *tuple,
622 void *priv)
622{ 623{
623 tuple_t tuple; 624 struct ssb_sprom *sprom = priv;
624 int res; 625
625 unsigned char buf[32]; 626 if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
627 return -EINVAL;
628 if (tuple->TupleDataLen != ETH_ALEN + 2)
629 return -EINVAL;
630 if (tuple->TupleData[1] != ETH_ALEN)
631 return -EINVAL;
632 memcpy(sprom->il0mac, &tuple->TupleData[2], ETH_ALEN);
633 return 0;
634};
635
636static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev,
637 tuple_t *tuple,
638 void *priv)
639{
640 struct ssb_init_invariants *iv = priv;
626 struct ssb_sprom *sprom = &iv->sprom; 641 struct ssb_sprom *sprom = &iv->sprom;
627 struct ssb_boardinfo *bi = &iv->boardinfo; 642 struct ssb_boardinfo *bi = &iv->boardinfo;
628 const char *error_description; 643 const char *error_description;
629 644
645 GOTO_ERROR_ON(tuple->TupleDataLen < 1, "VEN tpl < 1");
646 switch (tuple->TupleData[0]) {
647 case SSB_PCMCIA_CIS_ID:
648 GOTO_ERROR_ON((tuple->TupleDataLen != 5) &&
649 (tuple->TupleDataLen != 7),
650 "id tpl size");
651 bi->vendor = tuple->TupleData[1] |
652 ((u16)tuple->TupleData[2] << 8);
653 break;
654 case SSB_PCMCIA_CIS_BOARDREV:
655 GOTO_ERROR_ON(tuple->TupleDataLen != 2,
656 "boardrev tpl size");
657 sprom->board_rev = tuple->TupleData[1];
658 break;
659 case SSB_PCMCIA_CIS_PA:
660 GOTO_ERROR_ON((tuple->TupleDataLen != 9) &&
661 (tuple->TupleDataLen != 10),
662 "pa tpl size");
663 sprom->pa0b0 = tuple->TupleData[1] |
664 ((u16)tuple->TupleData[2] << 8);
665 sprom->pa0b1 = tuple->TupleData[3] |
666 ((u16)tuple->TupleData[4] << 8);
667 sprom->pa0b2 = tuple->TupleData[5] |
668 ((u16)tuple->TupleData[6] << 8);
669 sprom->itssi_a = tuple->TupleData[7];
670 sprom->itssi_bg = tuple->TupleData[7];
671 sprom->maxpwr_a = tuple->TupleData[8];
672 sprom->maxpwr_bg = tuple->TupleData[8];
673 break;
674 case SSB_PCMCIA_CIS_OEMNAME:
675 /* We ignore this. */
676 break;
677 case SSB_PCMCIA_CIS_CCODE:
678 GOTO_ERROR_ON(tuple->TupleDataLen != 2,
679 "ccode tpl size");
680 sprom->country_code = tuple->TupleData[1];
681 break;
682 case SSB_PCMCIA_CIS_ANTENNA:
683 GOTO_ERROR_ON(tuple->TupleDataLen != 2,
684 "ant tpl size");
685 sprom->ant_available_a = tuple->TupleData[1];
686 sprom->ant_available_bg = tuple->TupleData[1];
687 break;
688 case SSB_PCMCIA_CIS_ANTGAIN:
689 GOTO_ERROR_ON(tuple->TupleDataLen != 2,
690 "antg tpl size");
691 sprom->antenna_gain.ghz24.a0 = tuple->TupleData[1];
692 sprom->antenna_gain.ghz24.a1 = tuple->TupleData[1];
693 sprom->antenna_gain.ghz24.a2 = tuple->TupleData[1];
694 sprom->antenna_gain.ghz24.a3 = tuple->TupleData[1];
695 sprom->antenna_gain.ghz5.a0 = tuple->TupleData[1];
696 sprom->antenna_gain.ghz5.a1 = tuple->TupleData[1];
697 sprom->antenna_gain.ghz5.a2 = tuple->TupleData[1];
698 sprom->antenna_gain.ghz5.a3 = tuple->TupleData[1];
699 break;
700 case SSB_PCMCIA_CIS_BFLAGS:
701 GOTO_ERROR_ON((tuple->TupleDataLen != 3) &&
702 (tuple->TupleDataLen != 5),
703 "bfl tpl size");
704 sprom->boardflags_lo = tuple->TupleData[1] |
705 ((u16)tuple->TupleData[2] << 8);
706 break;
707 case SSB_PCMCIA_CIS_LEDS:
708 GOTO_ERROR_ON(tuple->TupleDataLen != 5,
709 "leds tpl size");
710 sprom->gpio0 = tuple->TupleData[1];
711 sprom->gpio1 = tuple->TupleData[2];
712 sprom->gpio2 = tuple->TupleData[3];
713 sprom->gpio3 = tuple->TupleData[4];
714 break;
715 }
716 return -ENOSPC; /* continue with next entry */
717
718error:
719 ssb_printk(KERN_ERR PFX
720 "PCMCIA: Failed to fetch device invariants: %s\n",
721 error_description);
722 return -ENODEV;
723}
724
725
726int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
727 struct ssb_init_invariants *iv)
728{
729 struct ssb_sprom *sprom = &iv->sprom;
730 int res;
731
630 memset(sprom, 0xFF, sizeof(*sprom)); 732 memset(sprom, 0xFF, sizeof(*sprom));
631 sprom->revision = 1; 733 sprom->revision = 1;
632 sprom->boardflags_lo = 0; 734 sprom->boardflags_lo = 0;
633 sprom->boardflags_hi = 0; 735 sprom->boardflags_hi = 0;
634 736
635 /* First fetch the MAC address. */ 737 /* First fetch the MAC address. */
636 memset(&tuple, 0, sizeof(tuple)); 738 res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE,
637 tuple.DesiredTuple = CISTPL_FUNCE; 739 ssb_pcmcia_get_mac, sprom);
638 tuple.TupleData = buf; 740 if (res != 0) {
639 tuple.TupleDataMax = sizeof(buf); 741 ssb_printk(KERN_ERR PFX
640 res = pcmcia_get_first_tuple(bus->host_pcmcia, &tuple); 742 "PCMCIA: Failed to fetch MAC address\n");
641 GOTO_ERROR_ON(res != 0, "MAC first tpl"); 743 return -ENODEV;
642 res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple);
643 GOTO_ERROR_ON(res != 0, "MAC first tpl data");
644 while (1) {
645 GOTO_ERROR_ON(tuple.TupleDataLen < 1, "MAC tpl < 1");
646 if (tuple.TupleData[0] == CISTPL_FUNCE_LAN_NODE_ID)
647 break;
648 res = pcmcia_get_next_tuple(bus->host_pcmcia, &tuple);
649 GOTO_ERROR_ON(res != 0, "MAC next tpl");
650 res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple);
651 GOTO_ERROR_ON(res != 0, "MAC next tpl data");
652 } 744 }
653 GOTO_ERROR_ON(tuple.TupleDataLen != ETH_ALEN + 2, "MAC tpl size");
654 memcpy(sprom->il0mac, &tuple.TupleData[2], ETH_ALEN);
655 745
656 /* Fetch the vendor specific tuples. */ 746 /* Fetch the vendor specific tuples. */
657 memset(&tuple, 0, sizeof(tuple)); 747 res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
658 tuple.DesiredTuple = SSB_PCMCIA_CIS; 748 ssb_pcmcia_do_get_invariants, sprom);
659 tuple.TupleData = buf; 749 if ((res == 0) || (res == -ENOSPC))
660 tuple.TupleDataMax = sizeof(buf); 750 return 0;
661 res = pcmcia_get_first_tuple(bus->host_pcmcia, &tuple);
662 GOTO_ERROR_ON(res != 0, "VEN first tpl");
663 res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple);
664 GOTO_ERROR_ON(res != 0, "VEN first tpl data");
665 while (1) {
666 GOTO_ERROR_ON(tuple.TupleDataLen < 1, "VEN tpl < 1");
667 switch (tuple.TupleData[0]) {
668 case SSB_PCMCIA_CIS_ID:
669 GOTO_ERROR_ON((tuple.TupleDataLen != 5) &&
670 (tuple.TupleDataLen != 7),
671 "id tpl size");
672 bi->vendor = tuple.TupleData[1] |
673 ((u16)tuple.TupleData[2] << 8);
674 break;
675 case SSB_PCMCIA_CIS_BOARDREV:
676 GOTO_ERROR_ON(tuple.TupleDataLen != 2,
677 "boardrev tpl size");
678 sprom->board_rev = tuple.TupleData[1];
679 break;
680 case SSB_PCMCIA_CIS_PA:
681 GOTO_ERROR_ON((tuple.TupleDataLen != 9) &&
682 (tuple.TupleDataLen != 10),
683 "pa tpl size");
684 sprom->pa0b0 = tuple.TupleData[1] |
685 ((u16)tuple.TupleData[2] << 8);
686 sprom->pa0b1 = tuple.TupleData[3] |
687 ((u16)tuple.TupleData[4] << 8);
688 sprom->pa0b2 = tuple.TupleData[5] |
689 ((u16)tuple.TupleData[6] << 8);
690 sprom->itssi_a = tuple.TupleData[7];
691 sprom->itssi_bg = tuple.TupleData[7];
692 sprom->maxpwr_a = tuple.TupleData[8];
693 sprom->maxpwr_bg = tuple.TupleData[8];
694 break;
695 case SSB_PCMCIA_CIS_OEMNAME:
696 /* We ignore this. */
697 break;
698 case SSB_PCMCIA_CIS_CCODE:
699 GOTO_ERROR_ON(tuple.TupleDataLen != 2,
700 "ccode tpl size");
701 sprom->country_code = tuple.TupleData[1];
702 break;
703 case SSB_PCMCIA_CIS_ANTENNA:
704 GOTO_ERROR_ON(tuple.TupleDataLen != 2,
705 "ant tpl size");
706 sprom->ant_available_a = tuple.TupleData[1];
707 sprom->ant_available_bg = tuple.TupleData[1];
708 break;
709 case SSB_PCMCIA_CIS_ANTGAIN:
710 GOTO_ERROR_ON(tuple.TupleDataLen != 2,
711 "antg tpl size");
712 sprom->antenna_gain.ghz24.a0 = tuple.TupleData[1];
713 sprom->antenna_gain.ghz24.a1 = tuple.TupleData[1];
714 sprom->antenna_gain.ghz24.a2 = tuple.TupleData[1];
715 sprom->antenna_gain.ghz24.a3 = tuple.TupleData[1];
716 sprom->antenna_gain.ghz5.a0 = tuple.TupleData[1];
717 sprom->antenna_gain.ghz5.a1 = tuple.TupleData[1];
718 sprom->antenna_gain.ghz5.a2 = tuple.TupleData[1];
719 sprom->antenna_gain.ghz5.a3 = tuple.TupleData[1];
720 break;
721 case SSB_PCMCIA_CIS_BFLAGS:
722 GOTO_ERROR_ON((tuple.TupleDataLen != 3) &&
723 (tuple.TupleDataLen != 5),
724 "bfl tpl size");
725 sprom->boardflags_lo = tuple.TupleData[1] |
726 ((u16)tuple.TupleData[2] << 8);
727 break;
728 case SSB_PCMCIA_CIS_LEDS:
729 GOTO_ERROR_ON(tuple.TupleDataLen != 5,
730 "leds tpl size");
731 sprom->gpio0 = tuple.TupleData[1];
732 sprom->gpio1 = tuple.TupleData[2];
733 sprom->gpio2 = tuple.TupleData[3];
734 sprom->gpio3 = tuple.TupleData[4];
735 break;
736 }
737 res = pcmcia_get_next_tuple(bus->host_pcmcia, &tuple);
738 if (res == -ENOSPC)
739 break;
740 GOTO_ERROR_ON(res != 0, "VEN next tpl");
741 res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple);
742 GOTO_ERROR_ON(res != 0, "VEN next tpl data");
743 }
744 751
745 return 0;
746error:
747 ssb_printk(KERN_ERR PFX 752 ssb_printk(KERN_ERR PFX
748 "PCMCIA: Failed to fetch device invariants: %s\n", 753 "PCMCIA: Failed to fetch device invariants\n");
749 error_description);
750 return -ENODEV; 754 return -ENODEV;
751} 755}
752 756
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 80c0df8656f3..39923cb388be 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -141,37 +141,14 @@ static int das16cs_timer_insn_config(struct comedi_device *dev,
141 struct comedi_insn *insn, 141 struct comedi_insn *insn,
142 unsigned int *data); 142 unsigned int *data);
143 143
144static int get_prodid(struct comedi_device *dev, struct pcmcia_device *link)
145{
146 tuple_t tuple;
147 u_short buf[128];
148 int prodid = 0;
149
150 tuple.TupleData = (cisdata_t *) buf;
151 tuple.TupleOffset = 0;
152 tuple.TupleDataMax = 255;
153 tuple.DesiredTuple = CISTPL_MANFID;
154 tuple.Attributes = TUPLE_RETURN_COMMON;
155 if ((pcmcia_get_first_tuple(link, &tuple) == 0) &&
156 (pcmcia_get_tuple_data(link, &tuple) == 0)) {
157 prodid = le16_to_cpu(buf[1]);
158 }
159
160 return prodid;
161}
162
163static const struct das16cs_board *das16cs_probe(struct comedi_device *dev, 144static const struct das16cs_board *das16cs_probe(struct comedi_device *dev,
164 struct pcmcia_device *link) 145 struct pcmcia_device *link)
165{ 146{
166 int id;
167 int i; 147 int i;
168 148
169 id = get_prodid(dev, link);
170
171 for (i = 0; i < n_boards; i++) { 149 for (i = 0; i < n_boards; i++) {
172 if (das16cs_boards[i].device_id == id) { 150 if (das16cs_boards[i].device_id == link->card_id)
173 return das16cs_boards + i; 151 return das16cs_boards + i;
174 }
175 } 152 }
176 153
177 printk("unknown board!\n"); 154 printk("unknown board!\n");
@@ -660,27 +637,8 @@ static int das16cs_timer_insn_config(struct comedi_device *dev,
660 637
661======================================================================*/ 638======================================================================*/
662 639
663/*
664 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
665 you do not define PCMCIA_DEBUG at all, all the debug code will be
666 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
667 be present but disabled -- but it can then be enabled for specific
668 modules at load time with a 'pc_debug=#' option to insmod.
669*/
670#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) 640#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
671 641
672#ifdef PCMCIA_DEBUG
673static int pc_debug = PCMCIA_DEBUG;
674module_param(pc_debug, int, 0644);
675#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
676static char *version =
677 "cb_das16_cs.c pcmcia code (David Schleef), modified from dummy_cs.c 1.31 2001/08/24 12:13:13 (David Hinds)";
678#else
679#define DEBUG(n, args...)
680#endif
681
682/*====================================================================*/
683
684static void das16cs_pcmcia_config(struct pcmcia_device *link); 642static void das16cs_pcmcia_config(struct pcmcia_device *link);
685static void das16cs_pcmcia_release(struct pcmcia_device *link); 643static void das16cs_pcmcia_release(struct pcmcia_device *link);
686static int das16cs_pcmcia_suspend(struct pcmcia_device *p_dev); 644static int das16cs_pcmcia_suspend(struct pcmcia_device *p_dev);
@@ -733,7 +691,7 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link)
733{ 691{
734 struct local_info_t *local; 692 struct local_info_t *local;
735 693
736 DEBUG(0, "das16cs_pcmcia_attach()\n"); 694 dev_dbg(&link->dev, "das16cs_pcmcia_attach()\n");
737 695
738 /* Allocate space for private device-specific data */ 696 /* Allocate space for private device-specific data */
739 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); 697 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
@@ -745,7 +703,6 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link)
745 /* Initialize the pcmcia_device structure */ 703 /* Initialize the pcmcia_device structure */
746 /* Interrupt setup */ 704 /* Interrupt setup */
747 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 705 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
748 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
749 link->irq.Handler = NULL; 706 link->irq.Handler = NULL;
750 707
751 link->conf.Attributes = 0; 708 link->conf.Attributes = 0;
@@ -760,7 +717,7 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link)
760 717
761static void das16cs_pcmcia_detach(struct pcmcia_device *link) 718static void das16cs_pcmcia_detach(struct pcmcia_device *link)
762{ 719{
763 DEBUG(0, "das16cs_pcmcia_detach(0x%p)\n", link); 720 dev_dbg(&link->dev, "das16cs_pcmcia_detach\n");
764 721
765 if (link->dev_node) { 722 if (link->dev_node) {
766 ((struct local_info_t *)link->priv)->stop = 1; 723 ((struct local_info_t *)link->priv)->stop = 1;
@@ -771,118 +728,55 @@ static void das16cs_pcmcia_detach(struct pcmcia_device *link)
771 kfree(link->priv); 728 kfree(link->priv);
772} /* das16cs_pcmcia_detach */ 729} /* das16cs_pcmcia_detach */
773 730
774static void das16cs_pcmcia_config(struct pcmcia_device *link)
775{
776 struct local_info_t *dev = link->priv;
777 tuple_t tuple;
778 cisparse_t parse;
779 int last_fn, last_ret;
780 u_char buf[64];
781 cistpl_cftable_entry_t dflt = { 0 };
782 731
783 DEBUG(0, "das16cs_pcmcia_config(0x%p)\n", link); 732static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev,
784 733 cistpl_cftable_entry_t *cfg,
785 /* 734 cistpl_cftable_entry_t *dflt,
786 This reads the card's CONFIG tuple to find its configuration 735 unsigned int vcc,
787 registers. 736 void *priv_data)
788 */ 737{
789 tuple.DesiredTuple = CISTPL_CONFIG; 738 if (cfg->index == 0)
790 tuple.Attributes = 0; 739 return -EINVAL;
791 tuple.TupleData = buf;
792 tuple.TupleDataMax = sizeof(buf);
793 tuple.TupleOffset = 0;
794
795 last_fn = GetFirstTuple;
796 last_ret = pcmcia_get_first_tuple(link, &tuple);
797 if (last_ret != 0)
798 goto cs_failed;
799
800 last_fn = GetTupleData;
801 last_ret = pcmcia_get_tuple_data(link, &tuple);
802 if (last_ret != 0)
803 goto cs_failed;
804
805 last_fn = ParseTuple;
806 last_ret = pcmcia_parse_tuple(&tuple, &parse);
807 if (last_ret != 0)
808 goto cs_failed;
809
810 link->conf.ConfigBase = parse.config.base;
811 link->conf.Present = parse.config.rmask[0];
812 740
813 /* 741 /* Do we need to allocate an interrupt? */
814 In this loop, we scan the CIS for configuration table entries, 742 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
815 each of which describes a valid card configuration, including 743 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
816 voltage, IO window, memory window, and interrupt settings. 744
817 745 /* IO window settings */
818 We make no assumptions about the card to be configured: we use 746 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
819 just the information available in the CIS. In an ideal world, 747 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
820 this would work for any PCMCIA card, but it requires a complete 748 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
821 and accurate CIS. In practice, a driver usually "knows" most of 749 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
822 these things without consulting the CIS, and most client drivers 750 if (!(io->flags & CISTPL_IO_8BIT))
823 will only use the CIS to fill in implementation-defined details. 751 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
824 */ 752 if (!(io->flags & CISTPL_IO_16BIT))
825 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 753 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
826 last_fn = GetFirstTuple; 754 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
827 755 p_dev->io.BasePort1 = io->win[0].base;
828 last_ret = pcmcia_get_first_tuple(link, &tuple); 756 p_dev->io.NumPorts1 = io->win[0].len;
829 if (last_ret) 757 if (io->nwin > 1) {
830 goto cs_failed; 758 p_dev->io.Attributes2 = p_dev->io.Attributes1;
831 759 p_dev->io.BasePort2 = io->win[1].base;
832 while (1) { 760 p_dev->io.NumPorts2 = io->win[1].len;
833 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
834 if (pcmcia_get_tuple_data(link, &tuple))
835 goto next_entry;
836 if (pcmcia_parse_tuple(&tuple, &parse))
837 goto next_entry;
838
839 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
840 dflt = *cfg;
841 if (cfg->index == 0)
842 goto next_entry;
843 link->conf.ConfigIndex = cfg->index;
844
845 /* Does this card need audio output? */
846/* if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
847 link->conf.Attributes |= CONF_ENABLE_SPKR;
848 link->conf.Status = CCSR_AUDIO_ENA;
849 }
850*/
851 /* Do we need to allocate an interrupt? */
852 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
853 link->conf.Attributes |= CONF_ENABLE_IRQ;
854
855 /* IO window settings */
856 link->io.NumPorts1 = link->io.NumPorts2 = 0;
857 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
858 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
859 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
860 if (!(io->flags & CISTPL_IO_8BIT))
861 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
862 if (!(io->flags & CISTPL_IO_16BIT))
863 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
864 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
865 link->io.BasePort1 = io->win[0].base;
866 link->io.NumPorts1 = io->win[0].len;
867 if (io->nwin > 1) {
868 link->io.Attributes2 = link->io.Attributes1;
869 link->io.BasePort2 = io->win[1].base;
870 link->io.NumPorts2 = io->win[1].len;
871 }
872 /* This reserves IO space but doesn't actually enable it */
873 if (pcmcia_request_io(link, &link->io))
874 goto next_entry;
875 } 761 }
762 /* This reserves IO space but doesn't actually enable it */
763 return pcmcia_request_io(p_dev, &p_dev->io);
764 }
876 765
877 /* If we got this far, we're cool! */ 766 return 0;
878 break; 767}
768
769static void das16cs_pcmcia_config(struct pcmcia_device *link)
770{
771 struct local_info_t *dev = link->priv;
772 int ret;
879 773
880next_entry: 774 dev_dbg(&link->dev, "das16cs_pcmcia_config\n");
881 last_fn = GetNextTuple;
882 775
883 last_ret = pcmcia_get_next_tuple(link, &tuple); 776 ret = pcmcia_loop_config(link, das16cs_pcmcia_config_loop, NULL);
884 if (last_ret) 777 if (ret) {
885 goto cs_failed; 778 dev_warn(&link->dev, "no configuration found\n");
779 goto failed;
886 } 780 }
887 781
888 /* 782 /*
@@ -891,21 +785,18 @@ next_entry:
891 irq structure is initialized. 785 irq structure is initialized.
892 */ 786 */
893 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 787 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
894 last_fn = RequestIRQ; 788 ret = pcmcia_request_irq(link, &link->irq);
895 789 if (ret)
896 last_ret = pcmcia_request_irq(link, &link->irq); 790 goto failed;
897 if (last_ret)
898 goto cs_failed;
899 } 791 }
900 /* 792 /*
901 This actually configures the PCMCIA socket -- setting up 793 This actually configures the PCMCIA socket -- setting up
902 the I/O windows and the interrupt mapping, and putting the 794 the I/O windows and the interrupt mapping, and putting the
903 card and host interface into "Memory and IO" mode. 795 card and host interface into "Memory and IO" mode.
904 */ 796 */
905 last_fn = RequestConfiguration; 797 ret = pcmcia_request_configuration(link, &link->conf);
906 last_ret = pcmcia_request_configuration(link, &link->conf); 798 if (ret)
907 if (last_ret) 799 goto failed;
908 goto cs_failed;
909 800
910 /* 801 /*
911 At this point, the dev_node_t structure(s) need to be 802 At this point, the dev_node_t structure(s) need to be
@@ -930,14 +821,13 @@ next_entry:
930 821
931 return; 822 return;
932 823
933cs_failed: 824failed:
934 cs_error(link, last_fn, last_ret);
935 das16cs_pcmcia_release(link); 825 das16cs_pcmcia_release(link);
936} /* das16cs_pcmcia_config */ 826} /* das16cs_pcmcia_config */
937 827
938static void das16cs_pcmcia_release(struct pcmcia_device *link) 828static void das16cs_pcmcia_release(struct pcmcia_device *link)
939{ 829{
940 DEBUG(0, "das16cs_pcmcia_release(0x%p)\n", link); 830 dev_dbg(&link->dev, "das16cs_pcmcia_release\n");
941 pcmcia_disable_device(link); 831 pcmcia_disable_device(link);
942} /* das16cs_pcmcia_release */ 832} /* das16cs_pcmcia_release */
943 833
@@ -983,14 +873,13 @@ struct pcmcia_driver das16cs_driver = {
983 873
984static int __init init_das16cs_pcmcia_cs(void) 874static int __init init_das16cs_pcmcia_cs(void)
985{ 875{
986 DEBUG(0, "%s\n", version);
987 pcmcia_register_driver(&das16cs_driver); 876 pcmcia_register_driver(&das16cs_driver);
988 return 0; 877 return 0;
989} 878}
990 879
991static void __exit exit_das16cs_pcmcia_cs(void) 880static void __exit exit_das16cs_pcmcia_cs(void)
992{ 881{
993 DEBUG(0, "das16cs_pcmcia_cs: unloading\n"); 882 pr_debug("das16cs_pcmcia_cs: unloading\n");
994 pcmcia_unregister_driver(&das16cs_driver); 883 pcmcia_unregister_driver(&das16cs_driver);
995} 884}
996 885
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 9cab21eaaa18..9b945e5fdd32 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -110,25 +110,6 @@ static int das08_cs_attach(struct comedi_device *dev,
110 110
111======================================================================*/ 111======================================================================*/
112 112
113/*
114 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
115 you do not define PCMCIA_DEBUG at all, all the debug code will be
116 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
117 be present but disabled -- but it can then be enabled for specific
118 modules at load time with a 'pc_debug=#' option to insmod.
119*/
120
121#ifdef PCMCIA_DEBUG
122static int pc_debug = PCMCIA_DEBUG;
123module_param(pc_debug, int, 0644);
124#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
125static const char *version =
126 "das08.c pcmcia code (Frank Hess), modified from dummy_cs.c 1.31 2001/08/24 12:13:13 (David Hinds)";
127#else
128#define DEBUG(n, args...)
129#endif
130
131/*====================================================================*/
132static void das08_pcmcia_config(struct pcmcia_device *link); 113static void das08_pcmcia_config(struct pcmcia_device *link);
133static void das08_pcmcia_release(struct pcmcia_device *link); 114static void das08_pcmcia_release(struct pcmcia_device *link);
134static int das08_pcmcia_suspend(struct pcmcia_device *p_dev); 115static int das08_pcmcia_suspend(struct pcmcia_device *p_dev);
@@ -181,7 +162,7 @@ static int das08_pcmcia_attach(struct pcmcia_device *link)
181{ 162{
182 struct local_info_t *local; 163 struct local_info_t *local;
183 164
184 DEBUG(0, "das08_pcmcia_attach()\n"); 165 dev_dbg(&link->dev, "das08_pcmcia_attach()\n");
185 166
186 /* Allocate space for private device-specific data */ 167 /* Allocate space for private device-specific data */
187 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); 168 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
@@ -192,7 +173,6 @@ static int das08_pcmcia_attach(struct pcmcia_device *link)
192 173
193 /* Interrupt setup */ 174 /* Interrupt setup */
194 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 175 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
195 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
196 link->irq.Handler = NULL; 176 link->irq.Handler = NULL;
197 177
198 /* 178 /*
@@ -224,7 +204,7 @@ static int das08_pcmcia_attach(struct pcmcia_device *link)
224static void das08_pcmcia_detach(struct pcmcia_device *link) 204static void das08_pcmcia_detach(struct pcmcia_device *link)
225{ 205{
226 206
227 DEBUG(0, "das08_pcmcia_detach(0x%p)\n", link); 207 dev_dbg(&link->dev, "das08_pcmcia_detach\n");
228 208
229 if (link->dev_node) { 209 if (link->dev_node) {
230 ((struct local_info_t *)link->priv)->stop = 1; 210 ((struct local_info_t *)link->priv)->stop = 1;
@@ -237,6 +217,44 @@ static void das08_pcmcia_detach(struct pcmcia_device *link)
237 217
238} /* das08_pcmcia_detach */ 218} /* das08_pcmcia_detach */
239 219
220
221static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
222 cistpl_cftable_entry_t *cfg,
223 cistpl_cftable_entry_t *dflt,
224 unsigned int vcc,
225 void *priv_data)
226{
227 if (cfg->index == 0)
228 return -ENODEV;
229
230 /* Do we need to allocate an interrupt? */
231 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
232 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
233
234 /* IO window settings */
235 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
236 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
237 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
238 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
239 if (!(io->flags & CISTPL_IO_8BIT))
240 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
241 if (!(io->flags & CISTPL_IO_16BIT))
242 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
243 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
244 p_dev->io.BasePort1 = io->win[0].base;
245 p_dev->io.NumPorts1 = io->win[0].len;
246 if (io->nwin > 1) {
247 p_dev->io.Attributes2 = p_dev->io.Attributes1;
248 p_dev->io.BasePort2 = io->win[1].base;
249 p_dev->io.NumPorts2 = io->win[1].len;
250 }
251 /* This reserves IO space but doesn't actually enable it */
252 return pcmcia_request_io(p_dev, &p_dev->io);
253 }
254 return 0;
255}
256
257
240/*====================================================================== 258/*======================================================================
241 259
242 das08_pcmcia_config() is scheduled to run after a CARD_INSERTION event 260 das08_pcmcia_config() is scheduled to run after a CARD_INSERTION event
@@ -248,128 +266,20 @@ static void das08_pcmcia_detach(struct pcmcia_device *link)
248static void das08_pcmcia_config(struct pcmcia_device *link) 266static void das08_pcmcia_config(struct pcmcia_device *link)
249{ 267{
250 struct local_info_t *dev = link->priv; 268 struct local_info_t *dev = link->priv;
251 tuple_t tuple; 269 int ret;
252 cisparse_t parse;
253 int last_fn, last_ret;
254 u_char buf[64];
255 cistpl_cftable_entry_t dflt = { 0 };
256
257 DEBUG(0, "das08_pcmcia_config(0x%p)\n", link);
258
259 /*
260 This reads the card's CONFIG tuple to find its configuration
261 registers.
262 */
263 tuple.DesiredTuple = CISTPL_CONFIG;
264 tuple.Attributes = 0;
265 tuple.TupleData = buf;
266 tuple.TupleDataMax = sizeof(buf);
267 tuple.TupleOffset = 0;
268 last_fn = GetFirstTuple;
269
270 last_ret = pcmcia_get_first_tuple(link, &tuple);
271 if (last_ret)
272 goto cs_failed;
273
274 last_fn = GetTupleData;
275
276 last_ret = pcmcia_get_tuple_data(link, &tuple);
277 if (last_ret)
278 goto cs_failed;
279
280 last_fn = ParseTuple;
281
282 last_ret = pcmcia_parse_tuple(&tuple, &parse);
283 if (last_ret)
284 goto cs_failed;
285
286 link->conf.ConfigBase = parse.config.base;
287 link->conf.Present = parse.config.rmask[0];
288
289 /*
290 In this loop, we scan the CIS for configuration table entries,
291 each of which describes a valid card configuration, including
292 voltage, IO window, memory window, and interrupt settings.
293
294 We make no assumptions about the card to be configured: we use
295 just the information available in the CIS. In an ideal world,
296 this would work for any PCMCIA card, but it requires a complete
297 and accurate CIS. In practice, a driver usually "knows" most of
298 these things without consulting the CIS, and most client drivers
299 will only use the CIS to fill in implementation-defined details.
300 */
301 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
302 last_fn = GetFirstTuple;
303
304 last_ret = pcmcia_get_first_tuple(link, &tuple);
305 if (last_ret)
306 goto cs_failed;
307
308 while (1) {
309 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
310
311 last_ret = pcmcia_get_tuple_data(link, &tuple);
312 if (last_ret)
313 goto next_entry;
314
315 last_ret = pcmcia_parse_tuple(&tuple, &parse);
316 if (last_ret)
317 goto next_entry;
318
319 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
320 dflt = *cfg;
321 if (cfg->index == 0)
322 goto next_entry;
323 link->conf.ConfigIndex = cfg->index;
324
325 /* Does this card need audio output? */
326/* if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
327 link->conf.Attributes |= CONF_ENABLE_SPKR;
328 link->conf.Status = CCSR_AUDIO_ENA;
329 }
330*/
331 /* Do we need to allocate an interrupt? */
332 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
333 link->conf.Attributes |= CONF_ENABLE_IRQ;
334
335 /* IO window settings */
336 link->io.NumPorts1 = link->io.NumPorts2 = 0;
337 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
338 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
339 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
340 if (!(io->flags & CISTPL_IO_8BIT))
341 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
342 if (!(io->flags & CISTPL_IO_16BIT))
343 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
344 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
345 link->io.BasePort1 = io->win[0].base;
346 link->io.NumPorts1 = io->win[0].len;
347 if (io->nwin > 1) {
348 link->io.Attributes2 = link->io.Attributes1;
349 link->io.BasePort2 = io->win[1].base;
350 link->io.NumPorts2 = io->win[1].len;
351 }
352 /* This reserves IO space but doesn't actually enable it */
353 if (pcmcia_request_io(link, &link->io) != 0)
354 goto next_entry;
355 }
356
357 /* If we got this far, we're cool! */
358 break;
359 270
360next_entry: 271 dev_dbg(&link->dev, "das08_pcmcia_config\n");
361 last_fn = GetNextTuple;
362 272
363 last_ret = pcmcia_get_next_tuple(link, &tuple); 273 ret = pcmcia_loop_config(link, das08_pcmcia_config_loop, NULL);
364 if (last_ret) 274 if (ret) {
365 goto cs_failed; 275 dev_warn(&link->dev, "no configuration found\n");
276 goto failed;
366 } 277 }
367 278
368 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 279 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
369 last_fn = RequestIRQ; 280 ret = pcmcia_request_irq(link, &link->irq);
370 last_ret = pcmcia_request_irq(link, &link->irq); 281 if (ret)
371 if (last_ret) 282 goto failed;
372 goto cs_failed;
373 } 283 }
374 284
375 /* 285 /*
@@ -377,10 +287,9 @@ next_entry:
377 the I/O windows and the interrupt mapping, and putting the 287 the I/O windows and the interrupt mapping, and putting the
378 card and host interface into "Memory and IO" mode. 288 card and host interface into "Memory and IO" mode.
379 */ 289 */
380 last_fn = RequestConfiguration; 290 ret = pcmcia_request_configuration(link, &link->conf);
381 last_ret = pcmcia_request_configuration(link, &link->conf); 291 if (ret)
382 if (last_ret) 292 goto failed;
383 goto cs_failed;
384 293
385 /* 294 /*
386 At this point, the dev_node_t structure(s) need to be 295 At this point, the dev_node_t structure(s) need to be
@@ -405,8 +314,7 @@ next_entry:
405 314
406 return; 315 return;
407 316
408cs_failed: 317failed:
409 cs_error(link, last_fn, last_ret);
410 das08_pcmcia_release(link); 318 das08_pcmcia_release(link);
411 319
412} /* das08_pcmcia_config */ 320} /* das08_pcmcia_config */
@@ -421,7 +329,7 @@ cs_failed:
421 329
422static void das08_pcmcia_release(struct pcmcia_device *link) 330static void das08_pcmcia_release(struct pcmcia_device *link)
423{ 331{
424 DEBUG(0, "das08_pcmcia_release(0x%p)\n", link); 332 dev_dbg(&link->dev, "das08_pcmcia_release\n");
425 pcmcia_disable_device(link); 333 pcmcia_disable_device(link);
426} /* das08_pcmcia_release */ 334} /* das08_pcmcia_release */
427 335
@@ -477,14 +385,13 @@ struct pcmcia_driver das08_cs_driver = {
477 385
478static int __init init_das08_pcmcia_cs(void) 386static int __init init_das08_pcmcia_cs(void)
479{ 387{
480 DEBUG(0, "%s\n", version);
481 pcmcia_register_driver(&das08_cs_driver); 388 pcmcia_register_driver(&das08_cs_driver);
482 return 0; 389 return 0;
483} 390}
484 391
485static void __exit exit_das08_pcmcia_cs(void) 392static void __exit exit_das08_pcmcia_cs(void)
486{ 393{
487 DEBUG(0, "das08_pcmcia_cs: unloading\n"); 394 pr_debug("das08_pcmcia_cs: unloading\n");
488 pcmcia_unregister_driver(&das08_cs_driver); 395 pcmcia_unregister_driver(&das08_cs_driver);
489} 396}
490 397
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index ec31a3970664..ef5e1183d47d 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -436,25 +436,7 @@ static int dio700_detach(struct comedi_device *dev)
436 return 0; 436 return 0;
437}; 437};
438 438
439/* PCMCIA crap */ 439/* PCMCIA crap -- watch your words, please! */
440
441/*
442 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
443 you do not define PCMCIA_DEBUG at all, all the debug code will be
444 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
445 be present but disabled -- but it can then be enabled for specific
446 modules at load time with a 'pc_debug=#' option to insmod.
447*/
448#ifdef PCMCIA_DEBUG
449static int pc_debug = PCMCIA_DEBUG;
450module_param(pc_debug, int, 0644);
451#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
452static char *version = "ni_daq_700.c, based on dummy_cs.c";
453#else
454#define DEBUG(n, args...)
455#endif
456
457/*====================================================================*/
458 440
459static void dio700_config(struct pcmcia_device *link); 441static void dio700_config(struct pcmcia_device *link);
460static void dio700_release(struct pcmcia_device *link); 442static void dio700_release(struct pcmcia_device *link);
@@ -510,7 +492,7 @@ static int dio700_cs_attach(struct pcmcia_device *link)
510 492
511 printk(KERN_INFO "ni_daq_700: cs-attach\n"); 493 printk(KERN_INFO "ni_daq_700: cs-attach\n");
512 494
513 DEBUG(0, "dio700_cs_attach()\n"); 495 dev_dbg(&link->dev, "dio700_cs_attach()\n");
514 496
515 /* Allocate space for private device-specific data */ 497 /* Allocate space for private device-specific data */
516 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); 498 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
@@ -521,7 +503,6 @@ static int dio700_cs_attach(struct pcmcia_device *link)
521 503
522 /* Interrupt setup */ 504 /* Interrupt setup */
523 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 505 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
524 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
525 link->irq.Handler = NULL; 506 link->irq.Handler = NULL;
526 507
527 /* 508 /*
@@ -555,7 +536,7 @@ static void dio700_cs_detach(struct pcmcia_device *link)
555 536
556 printk(KERN_INFO "ni_daq_700: cs-detach!\n"); 537 printk(KERN_INFO "ni_daq_700: cs-detach!\n");
557 538
558 DEBUG(0, "dio700_cs_detach(0x%p)\n", link); 539 dev_dbg(&link->dev, "dio700_cs_detach\n");
559 540
560 if (link->dev_node) { 541 if (link->dev_node) {
561 ((struct local_info_t *)link->priv)->stop = 1; 542 ((struct local_info_t *)link->priv)->stop = 1;
@@ -576,141 +557,85 @@ static void dio700_cs_detach(struct pcmcia_device *link)
576 557
577======================================================================*/ 558======================================================================*/
578 559
579static void dio700_config(struct pcmcia_device *link) 560static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev,
561 cistpl_cftable_entry_t *cfg,
562 cistpl_cftable_entry_t *dflt,
563 unsigned int vcc,
564 void *priv_data)
580{ 565{
581 struct local_info_t *dev = link->priv; 566 win_req_t *req = priv_data;
582 tuple_t tuple;
583 cisparse_t parse;
584 int last_ret;
585 u_char buf[64];
586 win_req_t req;
587 memreq_t map; 567 memreq_t map;
588 cistpl_cftable_entry_t dflt = { 0 };
589 568
590 printk(KERN_INFO "ni_daq_700: cs-config\n"); 569 if (cfg->index == 0)
591 570 return -ENODEV;
592 DEBUG(0, "dio700_config(0x%p)\n", link);
593 571
594 /* 572 /* Does this card need audio output? */
595 This reads the card's CONFIG tuple to find its configuration 573 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
596 registers. 574 p_dev->conf.Attributes |= CONF_ENABLE_SPKR;
597 */ 575 p_dev->conf.Status = CCSR_AUDIO_ENA;
598 tuple.DesiredTuple = CISTPL_CONFIG;
599 tuple.Attributes = 0;
600 tuple.TupleData = buf;
601 tuple.TupleDataMax = sizeof(buf);
602 tuple.TupleOffset = 0;
603
604 last_ret = pcmcia_get_first_tuple(link, &tuple);
605 if (last_ret) {
606 cs_error(link, GetFirstTuple, last_ret);
607 goto cs_failed;
608 } 576 }
609 577
610 last_ret = pcmcia_get_tuple_data(link, &tuple); 578 /* Do we need to allocate an interrupt? */
611 if (last_ret) { 579 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
612 cs_error(link, GetTupleData, last_ret); 580 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
613 goto cs_failed; 581
582 /* IO window settings */
583 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
584 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
585 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
586 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
587 if (!(io->flags & CISTPL_IO_8BIT))
588 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
589 if (!(io->flags & CISTPL_IO_16BIT))
590 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
591 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
592 p_dev->io.BasePort1 = io->win[0].base;
593 p_dev->io.NumPorts1 = io->win[0].len;
594 if (io->nwin > 1) {
595 p_dev->io.Attributes2 = p_dev->io.Attributes1;
596 p_dev->io.BasePort2 = io->win[1].base;
597 p_dev->io.NumPorts2 = io->win[1].len;
598 }
599 /* This reserves IO space but doesn't actually enable it */
600 if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
601 return -ENODEV;
614 } 602 }
615 603
616 last_ret = pcmcia_parse_tuple(&tuple, &parse); 604 if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
617 if (last_ret) { 605 cistpl_mem_t *mem =
618 cs_error(link, ParseTuple, last_ret); 606 (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
619 goto cs_failed; 607 req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
608 req->Attributes |= WIN_ENABLE;
609 req->Base = mem->win[0].host_addr;
610 req->Size = mem->win[0].len;
611 if (req->Size < 0x1000)
612 req->Size = 0x1000;
613 req->AccessSpeed = 0;
614 if (pcmcia_request_window(p_dev, req, &p_dev->win))
615 return -ENODEV;
616 map.Page = 0;
617 map.CardOffset = mem->win[0].card_addr;
618 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map))
619 return -ENODEV;
620 } 620 }
621 link->conf.ConfigBase = parse.config.base; 621 /* If we got this far, we're cool! */
622 link->conf.Present = parse.config.rmask[0]; 622 return 0;
623 623}
624 /*
625 In this loop, we scan the CIS for configuration table entries,
626 each of which describes a valid card configuration, including
627 voltage, IO window, memory window, and interrupt settings.
628
629 We make no assumptions about the card to be configured: we use
630 just the information available in the CIS. In an ideal world,
631 this would work for any PCMCIA card, but it requires a complete
632 and accurate CIS. In practice, a driver usually "knows" most of
633 these things without consulting the CIS, and most client drivers
634 will only use the CIS to fill in implementation-defined details.
635 */
636 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
637 last_ret = pcmcia_get_first_tuple(link, &tuple);
638 if (last_ret != 0) {
639 cs_error(link, GetFirstTuple, last_ret);
640 goto cs_failed;
641 }
642 while (1) {
643 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
644 if (pcmcia_get_tuple_data(link, &tuple) != 0)
645 goto next_entry;
646 if (pcmcia_parse_tuple(&tuple, &parse) != 0)
647 goto next_entry;
648
649 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
650 dflt = *cfg;
651 if (cfg->index == 0)
652 goto next_entry;
653 link->conf.ConfigIndex = cfg->index;
654
655 /* Does this card need audio output? */
656 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
657 link->conf.Attributes |= CONF_ENABLE_SPKR;
658 link->conf.Status = CCSR_AUDIO_ENA;
659 }
660 624
661 /* Do we need to allocate an interrupt? */ 625static void dio700_config(struct pcmcia_device *link)
662 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) 626{
663 link->conf.Attributes |= CONF_ENABLE_IRQ; 627 struct local_info_t *dev = link->priv;
664 628 win_req_t req;
665 /* IO window settings */ 629 int ret;
666 link->io.NumPorts1 = link->io.NumPorts2 = 0;
667 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
668 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
669 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
670 if (!(io->flags & CISTPL_IO_8BIT))
671 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
672 if (!(io->flags & CISTPL_IO_16BIT))
673 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
674 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
675 link->io.BasePort1 = io->win[0].base;
676 link->io.NumPorts1 = io->win[0].len;
677 if (io->nwin > 1) {
678 link->io.Attributes2 = link->io.Attributes1;
679 link->io.BasePort2 = io->win[1].base;
680 link->io.NumPorts2 = io->win[1].len;
681 }
682 /* This reserves IO space but doesn't actually enable it */
683 if (pcmcia_request_io(link, &link->io) != 0)
684 goto next_entry;
685 }
686 630
687 if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) { 631 printk(KERN_INFO "ni_daq_700: cs-config\n");
688 cistpl_mem_t *mem =
689 (cfg->mem.nwin) ? &cfg->mem : &dflt.mem;
690 req.Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
691 req.Attributes |= WIN_ENABLE;
692 req.Base = mem->win[0].host_addr;
693 req.Size = mem->win[0].len;
694 if (req.Size < 0x1000)
695 req.Size = 0x1000;
696 req.AccessSpeed = 0;
697 if (pcmcia_request_window(&link, &req, &link->win))
698 goto next_entry;
699 map.Page = 0;
700 map.CardOffset = mem->win[0].card_addr;
701 if (pcmcia_map_mem_page(link->win, &map))
702 goto next_entry;
703 }
704 /* If we got this far, we're cool! */
705 break;
706 632
707next_entry: 633 dev_dbg(&link->dev, "dio700_config\n");
708 634
709 last_ret = pcmcia_get_next_tuple(link, &tuple); 635 ret = pcmcia_loop_config(link, dio700_pcmcia_config_loop, &req);
710 if (last_ret) { 636 if (ret) {
711 cs_error(link, GetNextTuple, last_ret); 637 dev_warn(&link->dev, "no configuration found\n");
712 goto cs_failed; 638 goto failed;
713 }
714 } 639 }
715 640
716 /* 641 /*
@@ -719,11 +644,9 @@ next_entry:
719 irq structure is initialized. 644 irq structure is initialized.
720 */ 645 */
721 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 646 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
722 last_ret = pcmcia_request_irq(link, &link->irq); 647 ret = pcmcia_request_irq(link, &link->irq);
723 if (last_ret) { 648 if (ret)
724 cs_error(link, RequestIRQ, last_ret); 649 goto failed;
725 goto cs_failed;
726 }
727 } 650 }
728 651
729 /* 652 /*
@@ -731,11 +654,9 @@ next_entry:
731 the I/O windows and the interrupt mapping, and putting the 654 the I/O windows and the interrupt mapping, and putting the
732 card and host interface into "Memory and IO" mode. 655 card and host interface into "Memory and IO" mode.
733 */ 656 */
734 last_ret = pcmcia_request_configuration(link, &link->conf); 657 ret = pcmcia_request_configuration(link, &link->conf);
735 if (last_ret != 0) { 658 if (ret != 0)
736 cs_error(link, RequestConfiguration, last_ret); 659 goto failed;
737 goto cs_failed;
738 }
739 660
740 /* 661 /*
741 At this point, the dev_node_t structure(s) need to be 662 At this point, the dev_node_t structure(s) need to be
@@ -763,7 +684,7 @@ next_entry:
763 684
764 return; 685 return;
765 686
766cs_failed: 687failed:
767 printk(KERN_INFO "ni_daq_700 cs failed"); 688 printk(KERN_INFO "ni_daq_700 cs failed");
768 dio700_release(link); 689 dio700_release(link);
769 690
@@ -771,7 +692,7 @@ cs_failed:
771 692
772static void dio700_release(struct pcmcia_device *link) 693static void dio700_release(struct pcmcia_device *link)
773{ 694{
774 DEBUG(0, "dio700_release(0x%p)\n", link); 695 dev_dbg(&link->dev, "dio700_release\n");
775 696
776 pcmcia_disable_device(link); 697 pcmcia_disable_device(link);
777} /* dio700_release */ 698} /* dio700_release */
@@ -830,15 +751,13 @@ struct pcmcia_driver dio700_cs_driver = {
830 751
831static int __init init_dio700_cs(void) 752static int __init init_dio700_cs(void)
832{ 753{
833 printk("ni_daq_700: cs-init \n");
834 DEBUG(0, "%s\n", version);
835 pcmcia_register_driver(&dio700_cs_driver); 754 pcmcia_register_driver(&dio700_cs_driver);
836 return 0; 755 return 0;
837} 756}
838 757
839static void __exit exit_dio700_cs(void) 758static void __exit exit_dio700_cs(void)
840{ 759{
841 DEBUG(0, "ni_daq_700: unloading\n"); 760 pr_debug("ni_daq_700: unloading\n");
842 pcmcia_unregister_driver(&dio700_cs_driver); 761 pcmcia_unregister_driver(&dio700_cs_driver);
843} 762}
844 763
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 0700a8bddd1e..9017be3a92f1 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -187,25 +187,7 @@ static int dio24_detach(struct comedi_device *dev)
187 return 0; 187 return 0;
188}; 188};
189 189
190/* PCMCIA crap */ 190/* PCMCIA crap -- watch your words! */
191
192/*
193 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
194 you do not define PCMCIA_DEBUG at all, all the debug code will be
195 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
196 be present but disabled -- but it can then be enabled for specific
197 modules at load time with a 'pc_debug=#' option to insmod.
198*/
199#ifdef PCMCIA_DEBUG
200static int pc_debug = PCMCIA_DEBUG;
201module_param(pc_debug, int, 0644);
202#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
203static char *version = "ni_daq_dio24.c, based on dummy_cs.c";
204#else
205#define DEBUG(n, args...)
206#endif
207
208/*====================================================================*/
209 191
210static void dio24_config(struct pcmcia_device *link); 192static void dio24_config(struct pcmcia_device *link);
211static void dio24_release(struct pcmcia_device *link); 193static void dio24_release(struct pcmcia_device *link);
@@ -261,7 +243,7 @@ static int dio24_cs_attach(struct pcmcia_device *link)
261 243
262 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - CS-attach!\n"); 244 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - CS-attach!\n");
263 245
264 DEBUG(0, "dio24_cs_attach()\n"); 246 dev_dbg(&link->dev, "dio24_cs_attach()\n");
265 247
266 /* Allocate space for private device-specific data */ 248 /* Allocate space for private device-specific data */
267 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); 249 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
@@ -272,7 +254,6 @@ static int dio24_cs_attach(struct pcmcia_device *link)
272 254
273 /* Interrupt setup */ 255 /* Interrupt setup */
274 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 256 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
275 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
276 link->irq.Handler = NULL; 257 link->irq.Handler = NULL;
277 258
278 /* 259 /*
@@ -306,7 +287,7 @@ static void dio24_cs_detach(struct pcmcia_device *link)
306 287
307 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - cs-detach!\n"); 288 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - cs-detach!\n");
308 289
309 DEBUG(0, "dio24_cs_detach(0x%p)\n", link); 290 dev_dbg(&link->dev, "dio24_cs_detach\n");
310 291
311 if (link->dev_node) { 292 if (link->dev_node) {
312 ((struct local_info_t *)link->priv)->stop = 1; 293 ((struct local_info_t *)link->priv)->stop = 1;
@@ -327,142 +308,85 @@ static void dio24_cs_detach(struct pcmcia_device *link)
327 308
328======================================================================*/ 309======================================================================*/
329 310
330static void dio24_config(struct pcmcia_device *link) 311static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
312 cistpl_cftable_entry_t *cfg,
313 cistpl_cftable_entry_t *dflt,
314 unsigned int vcc,
315 void *priv_data)
331{ 316{
332 struct local_info_t *dev = link->priv; 317 win_req_t *req = priv_data;
333 tuple_t tuple;
334 cisparse_t parse;
335 int last_ret;
336 u_char buf[64];
337 win_req_t req;
338 memreq_t map; 318 memreq_t map;
339 cistpl_cftable_entry_t dflt = { 0 };
340 319
341 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO! - config\n"); 320 if (cfg->index == 0)
342 321 return -ENODEV;
343 DEBUG(0, "dio24_config(0x%p)\n", link);
344
345 /*
346 This reads the card's CONFIG tuple to find its configuration
347 registers.
348 */
349 tuple.DesiredTuple = CISTPL_CONFIG;
350 tuple.Attributes = 0;
351 tuple.TupleData = buf;
352 tuple.TupleDataMax = sizeof(buf);
353 tuple.TupleOffset = 0;
354
355 last_ret = pcmcia_get_first_tuple(link, &tuple);
356 if (last_ret) {
357 cs_error(link, GetFirstTuple, last_ret);
358 goto cs_failed;
359 }
360 322
361 last_ret = pcmcia_get_tuple_data(link, &tuple); 323 /* Does this card need audio output? */
362 if (last_ret) { 324 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
363 cs_error(link, GetTupleData, last_ret); 325 p_dev->conf.Attributes |= CONF_ENABLE_SPKR;
364 goto cs_failed; 326 p_dev->conf.Status = CCSR_AUDIO_ENA;
365 } 327 }
366 328
367 last_ret = pcmcia_parse_tuple(&tuple, &parse); 329 /* Do we need to allocate an interrupt? */
368 if (last_ret) { 330 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
369 cs_error(link, ParseTuple, last_ret); 331 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
370 goto cs_failed; 332
333 /* IO window settings */
334 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
335 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
336 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
337 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
338 if (!(io->flags & CISTPL_IO_8BIT))
339 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
340 if (!(io->flags & CISTPL_IO_16BIT))
341 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
342 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
343 p_dev->io.BasePort1 = io->win[0].base;
344 p_dev->io.NumPorts1 = io->win[0].len;
345 if (io->nwin > 1) {
346 p_dev->io.Attributes2 = p_dev->io.Attributes1;
347 p_dev->io.BasePort2 = io->win[1].base;
348 p_dev->io.NumPorts2 = io->win[1].len;
349 }
350 /* This reserves IO space but doesn't actually enable it */
351 if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
352 return -ENODEV;
371 } 353 }
372 link->conf.ConfigBase = parse.config.base;
373 link->conf.Present = parse.config.rmask[0];
374
375 /*
376 In this loop, we scan the CIS for configuration table entries,
377 each of which describes a valid card configuration, including
378 voltage, IO window, memory window, and interrupt settings.
379
380 We make no assumptions about the card to be configured: we use
381 just the information available in the CIS. In an ideal world,
382 this would work for any PCMCIA card, but it requires a complete
383 and accurate CIS. In practice, a driver usually "knows" most of
384 these things without consulting the CIS, and most client drivers
385 will only use the CIS to fill in implementation-defined details.
386 */
387 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
388 354
389 last_ret = pcmcia_get_first_tuple(link, &tuple); 355 if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
390 if (last_ret) { 356 cistpl_mem_t *mem =
391 cs_error(link, GetFirstTuple, last_ret); 357 (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
392 goto cs_failed; 358 req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
359 req->Attributes |= WIN_ENABLE;
360 req->Base = mem->win[0].host_addr;
361 req->Size = mem->win[0].len;
362 if (req->Size < 0x1000)
363 req->Size = 0x1000;
364 req->AccessSpeed = 0;
365 if (pcmcia_request_window(p_dev, req, &p_dev->win))
366 return -ENODEV;
367 map.Page = 0;
368 map.CardOffset = mem->win[0].card_addr;
369 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map))
370 return -ENODEV;
393 } 371 }
394 while (1) { 372 /* If we got this far, we're cool! */
395 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 373 return 0;
396 if (pcmcia_get_tuple_data(link, &tuple) != 0) 374}
397 goto next_entry;
398 if (pcmcia_parse_tuple(&tuple, &parse) != 0)
399 goto next_entry;
400
401 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
402 dflt = *cfg;
403 if (cfg->index == 0)
404 goto next_entry;
405 link->conf.ConfigIndex = cfg->index;
406
407 /* Does this card need audio output? */
408 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
409 link->conf.Attributes |= CONF_ENABLE_SPKR;
410 link->conf.Status = CCSR_AUDIO_ENA;
411 }
412 375
413 /* Do we need to allocate an interrupt? */ 376static void dio24_config(struct pcmcia_device *link)
414 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) 377{
415 link->conf.Attributes |= CONF_ENABLE_IRQ; 378 struct local_info_t *dev = link->priv;
416 379 int ret;
417 /* IO window settings */ 380 win_req_t req;
418 link->io.NumPorts1 = link->io.NumPorts2 = 0;
419 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
420 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
421 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
422 if (!(io->flags & CISTPL_IO_8BIT))
423 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
424 if (!(io->flags & CISTPL_IO_16BIT))
425 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
426 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
427 link->io.BasePort1 = io->win[0].base;
428 link->io.NumPorts1 = io->win[0].len;
429 if (io->nwin > 1) {
430 link->io.Attributes2 = link->io.Attributes1;
431 link->io.BasePort2 = io->win[1].base;
432 link->io.NumPorts2 = io->win[1].len;
433 }
434 /* This reserves IO space but doesn't actually enable it */
435 if (pcmcia_request_io(link, &link->io) != 0)
436 goto next_entry;
437 }
438 381
439 if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) { 382 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO! - config\n");
440 cistpl_mem_t *mem =
441 (cfg->mem.nwin) ? &cfg->mem : &dflt.mem;
442 req.Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
443 req.Attributes |= WIN_ENABLE;
444 req.Base = mem->win[0].host_addr;
445 req.Size = mem->win[0].len;
446 if (req.Size < 0x1000)
447 req.Size = 0x1000;
448 req.AccessSpeed = 0;
449 if (pcmcia_request_window(&link, &req, &link->win))
450 goto next_entry;
451 map.Page = 0;
452 map.CardOffset = mem->win[0].card_addr;
453 if (pcmcia_map_mem_page(link->win, &map))
454 goto next_entry;
455 }
456 /* If we got this far, we're cool! */
457 break;
458 383
459next_entry: 384 dev_dbg(&link->dev, "dio24_config\n");
460 385
461 last_ret = pcmcia_get_next_tuple(link, &tuple); 386 ret = pcmcia_loop_config(link, dio24_pcmcia_config_loop, &req);
462 if (last_ret) { 387 if (ret) {
463 cs_error(link, GetNextTuple, last_ret); 388 dev_warn(&link->dev, "no configuration found\n");
464 goto cs_failed; 389 goto failed;
465 }
466 } 390 }
467 391
468 /* 392 /*
@@ -471,11 +395,9 @@ next_entry:
471 irq structure is initialized. 395 irq structure is initialized.
472 */ 396 */
473 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 397 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
474 last_ret = pcmcia_request_irq(link, &link->irq); 398 ret = pcmcia_request_irq(link, &link->irq);
475 if (last_ret) { 399 if (ret)
476 cs_error(link, RequestIRQ, last_ret); 400 goto failed;
477 goto cs_failed;
478 }
479 } 401 }
480 402
481 /* 403 /*
@@ -483,11 +405,9 @@ next_entry:
483 the I/O windows and the interrupt mapping, and putting the 405 the I/O windows and the interrupt mapping, and putting the
484 card and host interface into "Memory and IO" mode. 406 card and host interface into "Memory and IO" mode.
485 */ 407 */
486 last_ret = pcmcia_request_configuration(link, &link->conf); 408 ret = pcmcia_request_configuration(link, &link->conf);
487 if (last_ret) { 409 if (ret)
488 cs_error(link, RequestConfiguration, last_ret); 410 goto failed;
489 goto cs_failed;
490 }
491 411
492 /* 412 /*
493 At this point, the dev_node_t structure(s) need to be 413 At this point, the dev_node_t structure(s) need to be
@@ -515,7 +435,7 @@ next_entry:
515 435
516 return; 436 return;
517 437
518cs_failed: 438failed:
519 printk(KERN_INFO "Fallo"); 439 printk(KERN_INFO "Fallo");
520 dio24_release(link); 440 dio24_release(link);
521 441
@@ -523,7 +443,7 @@ cs_failed:
523 443
524static void dio24_release(struct pcmcia_device *link) 444static void dio24_release(struct pcmcia_device *link)
525{ 445{
526 DEBUG(0, "dio24_release(0x%p)\n", link); 446 dev_dbg(&link->dev, "dio24_release\n");
527 447
528 pcmcia_disable_device(link); 448 pcmcia_disable_device(link);
529} /* dio24_release */ 449} /* dio24_release */
@@ -582,14 +502,12 @@ struct pcmcia_driver dio24_cs_driver = {
582static int __init init_dio24_cs(void) 502static int __init init_dio24_cs(void)
583{ 503{
584 printk("ni_daq_dio24: HOLA SOY YO!\n"); 504 printk("ni_daq_dio24: HOLA SOY YO!\n");
585 DEBUG(0, "%s\n", version);
586 pcmcia_register_driver(&dio24_cs_driver); 505 pcmcia_register_driver(&dio24_cs_driver);
587 return 0; 506 return 0;
588} 507}
589 508
590static void __exit exit_dio24_cs(void) 509static void __exit exit_dio24_cs(void)
591{ 510{
592 DEBUG(0, "ni_dio24: unloading\n");
593 pcmcia_unregister_driver(&dio24_cs_driver); 511 pcmcia_unregister_driver(&dio24_cs_driver);
594} 512}
595 513
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index a3053b8da1c6..7d514b3ee754 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -153,23 +153,6 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
153 return labpc_common_attach(dev, iobase, irq, 0); 153 return labpc_common_attach(dev, iobase, irq, 0);
154} 154}
155 155
156/*
157 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
158 you do not define PCMCIA_DEBUG at all, all the debug code will be
159 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
160 be present but disabled -- but it can then be enabled for specific
161 modules at load time with a 'pc_debug=#' option to insmod.
162*/
163#ifdef PCMCIA_DEBUG
164static int pc_debug = PCMCIA_DEBUG;
165module_param(pc_debug, int, 0644);
166#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
167static const char *version =
168 "ni_labpc.c, based on dummy_cs.c 1.31 2001/08/24 12:13:13";
169#else
170#define DEBUG(n, args...)
171#endif
172
173/*====================================================================*/ 156/*====================================================================*/
174 157
175/* 158/*
@@ -236,7 +219,7 @@ static int labpc_cs_attach(struct pcmcia_device *link)
236{ 219{
237 struct local_info_t *local; 220 struct local_info_t *local;
238 221
239 DEBUG(0, "labpc_cs_attach()\n"); 222 dev_dbg(&link->dev, "labpc_cs_attach()\n");
240 223
241 /* Allocate space for private device-specific data */ 224 /* Allocate space for private device-specific data */
242 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); 225 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
@@ -247,7 +230,6 @@ static int labpc_cs_attach(struct pcmcia_device *link)
247 230
248 /* Interrupt setup */ 231 /* Interrupt setup */
249 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE; 232 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE;
250 link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_PULSE_ID;
251 link->irq.Handler = NULL; 233 link->irq.Handler = NULL;
252 234
253 /* 235 /*
@@ -278,7 +260,7 @@ static int labpc_cs_attach(struct pcmcia_device *link)
278 260
279static void labpc_cs_detach(struct pcmcia_device *link) 261static void labpc_cs_detach(struct pcmcia_device *link)
280{ 262{
281 DEBUG(0, "labpc_cs_detach(0x%p)\n", link); 263 dev_dbg(&link->dev, "labpc_cs_detach\n");
282 264
283 /* 265 /*
284 If the device is currently configured and active, we won't 266 If the device is currently configured and active, we won't
@@ -305,135 +287,84 @@ static void labpc_cs_detach(struct pcmcia_device *link)
305 287
306======================================================================*/ 288======================================================================*/
307 289
308static void labpc_config(struct pcmcia_device *link) 290static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
291 cistpl_cftable_entry_t *cfg,
292 cistpl_cftable_entry_t *dflt,
293 unsigned int vcc,
294 void *priv_data)
309{ 295{
310 struct local_info_t *dev = link->priv; 296 win_req_t *req = priv_data;
311 tuple_t tuple;
312 cisparse_t parse;
313 int last_ret;
314 u_char buf[64];
315 win_req_t req;
316 memreq_t map; 297 memreq_t map;
317 cistpl_cftable_entry_t dflt = { 0 };
318 298
319 DEBUG(0, "labpc_config(0x%p)\n", link); 299 if (cfg->index == 0)
300 return -ENODEV;
320 301
321 /* 302 /* Does this card need audio output? */
322 This reads the card's CONFIG tuple to find its configuration 303 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
323 registers. 304 p_dev->conf.Attributes |= CONF_ENABLE_SPKR;
324 */ 305 p_dev->conf.Status = CCSR_AUDIO_ENA;
325 tuple.DesiredTuple = CISTPL_CONFIG;
326 tuple.Attributes = 0;
327 tuple.TupleData = buf;
328 tuple.TupleDataMax = sizeof(buf);
329 tuple.TupleOffset = 0;
330
331 last_ret = pcmcia_get_first_tuple(link, &tuple);
332 if (last_ret) {
333 cs_error(link, GetFirstTuple, last_ret);
334 goto cs_failed;
335 } 306 }
336 307
337 last_ret = pcmcia_get_tuple_data(link, &tuple); 308 /* Do we need to allocate an interrupt? */
338 if (last_ret) { 309 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
339 cs_error(link, GetTupleData, last_ret); 310 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
340 goto cs_failed; 311
312 /* IO window settings */
313 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
314 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
315 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
316 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
317 if (!(io->flags & CISTPL_IO_8BIT))
318 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
319 if (!(io->flags & CISTPL_IO_16BIT))
320 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
321 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
322 p_dev->io.BasePort1 = io->win[0].base;
323 p_dev->io.NumPorts1 = io->win[0].len;
324 if (io->nwin > 1) {
325 p_dev->io.Attributes2 = p_dev->io.Attributes1;
326 p_dev->io.BasePort2 = io->win[1].base;
327 p_dev->io.NumPorts2 = io->win[1].len;
328 }
329 /* This reserves IO space but doesn't actually enable it */
330 if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
331 return -ENODEV;
341 } 332 }
342 333
343 last_ret = pcmcia_parse_tuple(&tuple, &parse); 334 if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
344 if (last_ret) { 335 cistpl_mem_t *mem =
345 cs_error(link, ParseTuple, last_ret); 336 (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
346 goto cs_failed; 337 req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
338 req->Attributes |= WIN_ENABLE;
339 req->Base = mem->win[0].host_addr;
340 req->Size = mem->win[0].len;
341 if (req->Size < 0x1000)
342 req->Size = 0x1000;
343 req->AccessSpeed = 0;
344 if (pcmcia_request_window(p_dev, req, &p_dev->win))
345 return -ENODEV;
346 map.Page = 0;
347 map.CardOffset = mem->win[0].card_addr;
348 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map))
349 return -ENODEV;
347 } 350 }
348 link->conf.ConfigBase = parse.config.base; 351 /* If we got this far, we're cool! */
349 link->conf.Present = parse.config.rmask[0]; 352 return 0;
353}
350 354
351 /*
352 In this loop, we scan the CIS for configuration table entries,
353 each of which describes a valid card configuration, including
354 voltage, IO window, memory window, and interrupt settings.
355
356 We make no assumptions about the card to be configured: we use
357 just the information available in the CIS. In an ideal world,
358 this would work for any PCMCIA card, but it requires a complete
359 and accurate CIS. In practice, a driver usually "knows" most of
360 these things without consulting the CIS, and most client drivers
361 will only use the CIS to fill in implementation-defined details.
362 */
363 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
364 last_ret = pcmcia_get_first_tuple(link, &tuple);
365 if (last_ret) {
366 cs_error(link, GetFirstTuple, last_ret);
367 goto cs_failed;
368 }
369 while (1) {
370 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
371 if (pcmcia_get_tuple_data(link, &tuple))
372 goto next_entry;
373 if (pcmcia_parse_tuple(&tuple, &parse))
374 goto next_entry;
375
376 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
377 dflt = *cfg;
378 if (cfg->index == 0)
379 goto next_entry;
380 link->conf.ConfigIndex = cfg->index;
381
382 /* Does this card need audio output? */
383 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
384 link->conf.Attributes |= CONF_ENABLE_SPKR;
385 link->conf.Status = CCSR_AUDIO_ENA;
386 }
387 355
388 /* Do we need to allocate an interrupt? */ 356static void labpc_config(struct pcmcia_device *link)
389 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) 357{
390 link->conf.Attributes |= CONF_ENABLE_IRQ; 358 struct local_info_t *dev = link->priv;
391 359 int ret;
392 /* IO window settings */ 360 win_req_t req;
393 link->io.NumPorts1 = link->io.NumPorts2 = 0;
394 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
395 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
396 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
397 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
398 link->io.BasePort1 = io->win[0].base;
399 link->io.NumPorts1 = io->win[0].len;
400 if (io->nwin > 1) {
401 link->io.Attributes2 = link->io.Attributes1;
402 link->io.BasePort2 = io->win[1].base;
403 link->io.NumPorts2 = io->win[1].len;
404 }
405 /* This reserves IO space but doesn't actually enable it */
406 if (pcmcia_request_io(link, &link->io))
407 goto next_entry;
408 }
409 361
410 if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) { 362 dev_dbg(&link->dev, "labpc_config\n");
411 cistpl_mem_t *mem =
412 (cfg->mem.nwin) ? &cfg->mem : &dflt.mem;
413 req.Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
414 req.Attributes |= WIN_ENABLE;
415 req.Base = mem->win[0].host_addr;
416 req.Size = mem->win[0].len;
417 if (req.Size < 0x1000)
418 req.Size = 0x1000;
419 req.AccessSpeed = 0;
420 link->win = (window_handle_t) link;
421 if (pcmcia_request_window(&link, &req, &link->win))
422 goto next_entry;
423 map.Page = 0;
424 map.CardOffset = mem->win[0].card_addr;
425 if (pcmcia_map_mem_page(link->win, &map))
426 goto next_entry;
427 }
428 /* If we got this far, we're cool! */
429 break;
430 363
431next_entry: 364 ret = pcmcia_loop_config(link, labpc_pcmcia_config_loop, &req);
432 last_ret = pcmcia_get_next_tuple(link, &tuple); 365 if (ret) {
433 if (last_ret) { 366 dev_warn(&link->dev, "no configuration found\n");
434 cs_error(link, GetNextTuple, last_ret); 367 goto failed;
435 goto cs_failed;
436 }
437 } 368 }
438 369
439 /* 370 /*
@@ -442,11 +373,9 @@ next_entry:
442 irq structure is initialized. 373 irq structure is initialized.
443 */ 374 */
444 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 375 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
445 last_ret = pcmcia_request_irq(link, &link->irq); 376 ret = pcmcia_request_irq(link, &link->irq);
446 if (last_ret) { 377 if (ret)
447 cs_error(link, RequestIRQ, last_ret); 378 goto failed;
448 goto cs_failed;
449 }
450 } 379 }
451 380
452 /* 381 /*
@@ -454,11 +383,9 @@ next_entry:
454 the I/O windows and the interrupt mapping, and putting the 383 the I/O windows and the interrupt mapping, and putting the
455 card and host interface into "Memory and IO" mode. 384 card and host interface into "Memory and IO" mode.
456 */ 385 */
457 last_ret = pcmcia_request_configuration(link, &link->conf); 386 ret = pcmcia_request_configuration(link, &link->conf);
458 if (last_ret) { 387 if (ret)
459 cs_error(link, RequestConfiguration, last_ret); 388 goto failed;
460 goto cs_failed;
461 }
462 389
463 /* 390 /*
464 At this point, the dev_node_t structure(s) need to be 391 At this point, the dev_node_t structure(s) need to be
@@ -486,14 +413,14 @@ next_entry:
486 413
487 return; 414 return;
488 415
489cs_failed: 416failed:
490 labpc_release(link); 417 labpc_release(link);
491 418
492} /* labpc_config */ 419} /* labpc_config */
493 420
494static void labpc_release(struct pcmcia_device *link) 421static void labpc_release(struct pcmcia_device *link)
495{ 422{
496 DEBUG(0, "labpc_release(0x%p)\n", link); 423 dev_dbg(&link->dev, "labpc_release\n");
497 424
498 pcmcia_disable_device(link); 425 pcmcia_disable_device(link);
499} /* labpc_release */ 426} /* labpc_release */
@@ -551,14 +478,12 @@ struct pcmcia_driver labpc_cs_driver = {
551 478
552static int __init init_labpc_cs(void) 479static int __init init_labpc_cs(void)
553{ 480{
554 DEBUG(0, "%s\n", version);
555 pcmcia_register_driver(&labpc_cs_driver); 481 pcmcia_register_driver(&labpc_cs_driver);
556 return 0; 482 return 0;
557} 483}
558 484
559static void __exit exit_labpc_cs(void) 485static void __exit exit_labpc_cs(void)
560{ 486{
561 DEBUG(0, "ni_labpc: unloading\n");
562 pcmcia_unregister_driver(&labpc_cs_driver); 487 pcmcia_unregister_driver(&labpc_cs_driver);
563} 488}
564 489
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index 9aef87fc81dc..d692f4bb47ea 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -274,7 +274,6 @@ static int cs_attach(struct pcmcia_device *link)
274 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 274 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
275 link->io.NumPorts1 = 16; 275 link->io.NumPorts1 = 16;
276 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 276 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
277 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
278 link->conf.Attributes = CONF_ENABLE_IRQ; 277 link->conf.Attributes = CONF_ENABLE_IRQ;
279 link->conf.IntType = INT_MEMORY_AND_IO; 278 link->conf.IntType = INT_MEMORY_AND_IO;
280 279
@@ -312,96 +311,47 @@ static int mio_cs_resume(struct pcmcia_device *link)
312 return 0; 311 return 0;
313} 312}
314 313
315static void mio_cs_config(struct pcmcia_device *link)
316{
317 tuple_t tuple;
318 u_short buf[128];
319 cisparse_t parse;
320 int manfid = 0, prodid = 0;
321 int ret;
322
323 DPRINTK("mio_cs_config(link=%p)\n", link);
324 314
325 tuple.TupleData = (cisdata_t *) buf; 315static int mio_pcmcia_config_loop(struct pcmcia_device *p_dev,
326 tuple.TupleOffset = 0; 316 cistpl_cftable_entry_t *cfg,
327 tuple.TupleDataMax = 255; 317 cistpl_cftable_entry_t *dflt,
328 tuple.Attributes = 0; 318 unsigned int vcc,
319 void *priv_data)
320{
321 int base, ret;
329 322
330 tuple.DesiredTuple = CISTPL_CONFIG; 323 p_dev->io.NumPorts1 = cfg->io.win[0].len;
331 ret = pcmcia_get_first_tuple(link, &tuple); 324 p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK;
332 ret = pcmcia_get_tuple_data(link, &tuple); 325 p_dev->io.NumPorts2 = 0;
333 ret = pcmcia_parse_tuple(&tuple, &parse);
334 link->conf.ConfigBase = parse.config.base;
335 link->conf.Present = parse.config.rmask[0];
336 326
337#if 0 327 for (base = 0x000; base < 0x400; base += 0x20) {
338 tuple.DesiredTuple = CISTPL_LONGLINK_MFC; 328 p_dev->io.BasePort1 = base;
339 tuple.Attributes = TUPLE_RETURN_COMMON | TUPLE_RETURN_LINK; 329 ret = pcmcia_request_io(p_dev, &p_dev->io);
340 info->multi(first_tuple(link, &tuple, &parse) == 0); 330 if (!ret)
341#endif 331 return 0;
342
343 tuple.DesiredTuple = CISTPL_MANFID;
344 tuple.Attributes = TUPLE_RETURN_COMMON;
345 if ((pcmcia_get_first_tuple(link, &tuple) == 0) &&
346 (pcmcia_get_tuple_data(link, &tuple) == 0)) {
347 manfid = le16_to_cpu(buf[0]);
348 prodid = le16_to_cpu(buf[1]);
349 } 332 }
350 /* printk("manfid = 0x%04x, 0x%04x\n",manfid,prodid); */ 333 return -ENODEV;
334}
351 335
352 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
353 tuple.Attributes = 0;
354 ret = pcmcia_get_first_tuple(link, &tuple);
355 ret = pcmcia_get_tuple_data(link, &tuple);
356 ret = pcmcia_parse_tuple(&tuple, &parse);
357 336
358#if 0 337static void mio_cs_config(struct pcmcia_device *link)
359 printk(" index: 0x%x\n", parse.cftable_entry.index); 338{
360 printk(" flags: 0x%x\n", parse.cftable_entry.flags); 339 int ret;
361 printk(" io flags: 0x%x\n", parse.cftable_entry.io.flags);
362 printk(" io nwin: 0x%x\n", parse.cftable_entry.io.nwin);
363 printk(" io base: 0x%x\n", parse.cftable_entry.io.win[0].base);
364 printk(" io len: 0x%x\n", parse.cftable_entry.io.win[0].len);
365 printk(" irq1: 0x%x\n", parse.cftable_entry.irq.IRQInfo1);
366 printk(" irq2: 0x%x\n", parse.cftable_entry.irq.IRQInfo2);
367 printk(" mem flags: 0x%x\n", parse.cftable_entry.mem.flags);
368 printk(" mem nwin: 0x%x\n", parse.cftable_entry.mem.nwin);
369 printk(" subtuples: 0x%x\n", parse.cftable_entry.subtuples);
370#endif
371 340
372#if 0 341 DPRINTK("mio_cs_config(link=%p)\n", link);
373 link->io.NumPorts1 = 0x20;
374 link->io.IOAddrLines = 5;
375 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
376#endif
377 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
378 link->io.IOAddrLines =
379 parse.cftable_entry.io.flags & CISTPL_IO_LINES_MASK;
380 link->io.NumPorts2 = 0;
381 342
382 { 343 ret = pcmcia_loop_config(link, mio_pcmcia_config_loop, NULL);
383 int base; 344 if (ret) {
384 for (base = 0x000; base < 0x400; base += 0x20) { 345 dev_warn(&link->dev, "no configuration found\n");
385 link->io.BasePort1 = base; 346 return;
386 ret = pcmcia_request_io(link, &link->io);
387 /* printk("RequestIO 0x%02x\n",ret); */
388 if (!ret)
389 break;
390 }
391 } 347 }
392 348
393 link->irq.IRQInfo1 = parse.cftable_entry.irq.IRQInfo1;
394 link->irq.IRQInfo2 = parse.cftable_entry.irq.IRQInfo2;
395 ret = pcmcia_request_irq(link, &link->irq); 349 ret = pcmcia_request_irq(link, &link->irq);
396 if (ret) { 350 if (ret) {
397 printk("pcmcia_request_irq() returned error: %i\n", ret); 351 printk("pcmcia_request_irq() returned error: %i\n", ret);
398 } 352 }
399 /* printk("RequestIRQ 0x%02x\n",ret); */
400
401 link->conf.ConfigIndex = 1;
402 353
403 ret = pcmcia_request_configuration(link, &link->conf); 354 ret = pcmcia_request_configuration(link, &link->conf);
404 /* printk("RequestConfiguration %d\n",ret); */
405 355
406 link->dev_node = &dev_node; 356 link->dev_node = &dev_node;
407} 357}
@@ -475,40 +425,17 @@ static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it)
475 return 0; 425 return 0;
476} 426}
477 427
478static int get_prodid(struct comedi_device *dev, struct pcmcia_device *link)
479{
480 tuple_t tuple;
481 u_short buf[128];
482 int prodid = 0;
483
484 tuple.TupleData = (cisdata_t *) buf;
485 tuple.TupleOffset = 0;
486 tuple.TupleDataMax = 255;
487 tuple.DesiredTuple = CISTPL_MANFID;
488 tuple.Attributes = TUPLE_RETURN_COMMON;
489 if ((pcmcia_get_first_tuple(link, &tuple) == 0) &&
490 (pcmcia_get_tuple_data(link, &tuple) == 0)) {
491 prodid = le16_to_cpu(buf[1]);
492 }
493
494 return prodid;
495}
496
497static int ni_getboardtype(struct comedi_device *dev, 428static int ni_getboardtype(struct comedi_device *dev,
498 struct pcmcia_device *link) 429 struct pcmcia_device *link)
499{ 430{
500 int id;
501 int i; 431 int i;
502 432
503 id = get_prodid(dev, link);
504
505 for (i = 0; i < n_ni_boards; i++) { 433 for (i = 0; i < n_ni_boards; i++) {
506 if (ni_boards[i].device_id == id) { 434 if (ni_boards[i].device_id == link->card_id)
507 return i; 435 return i;
508 }
509 } 436 }
510 437
511 printk("unknown board 0x%04x -- pretend it is a ", id); 438 printk("unknown board 0x%04x -- pretend it is a ", link->card_id);
512 439
513 return 0; 440 return 0;
514} 441}
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 344b82353e08..5256fd933162 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -55,23 +55,6 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308
55#include <pcmcia/cisreg.h> 55#include <pcmcia/cisreg.h>
56#include <pcmcia/ds.h> 56#include <pcmcia/ds.h>
57 57
58/*
59 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
60 you do not define PCMCIA_DEBUG at all, all the debug code will be
61 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
62 be present but disabled -- but it can then be enabled for specific
63 modules at load time with a 'pc_debug=#' option to insmod.
64*/
65
66#ifdef PCMCIA_DEBUG
67static int pc_debug = PCMCIA_DEBUG;
68module_param(pc_debug, int, 0644);
69#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
70static char *version = "quatech_daqp_cs.c 1.10 2003/04/21 (Brent Baccala)";
71#else
72#define DEBUG(n, args...)
73#endif
74
75/* Maximum number of separate DAQP devices we'll allow */ 58/* Maximum number of separate DAQP devices we'll allow */
76#define MAX_DEV 4 59#define MAX_DEV 4
77 60
@@ -863,8 +846,6 @@ static int daqp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
863{ 846{
864 int ret; 847 int ret;
865 struct local_info_t *local = dev_table[it->options[0]]; 848 struct local_info_t *local = dev_table[it->options[0]];
866 tuple_t tuple;
867 int i;
868 struct comedi_subdevice *s; 849 struct comedi_subdevice *s;
869 850
870 if (it->options[0] < 0 || it->options[0] >= MAX_DEV || !local) { 851 if (it->options[0] < 0 || it->options[0] >= MAX_DEV || !local) {
@@ -883,29 +864,10 @@ static int daqp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
883 864
884 strcpy(local->board_name, "DAQP"); 865 strcpy(local->board_name, "DAQP");
885 dev->board_name = local->board_name; 866 dev->board_name = local->board_name;
886 867 if (local->link->prod_id[2]) {
887 tuple.DesiredTuple = CISTPL_VERS_1; 868 if (strncmp(local->link->prod_id[2], "DAQP", 4) == 0) {
888 if (pcmcia_get_first_tuple(local->link, &tuple) == 0) { 869 strncpy(local->board_name, local->link->prod_id[2],
889 u_char buf[128]; 870 sizeof(local->board_name));
890
891 buf[0] = buf[sizeof(buf) - 1] = 0;
892 tuple.TupleData = buf;
893 tuple.TupleDataMax = sizeof(buf);
894 tuple.TupleOffset = 2;
895 if (pcmcia_get_tuple_data(local->link, &tuple) == 0) {
896
897 for (i = 0; i < tuple.TupleDataLen - 4; i++)
898 if (buf[i] == 0)
899 break;
900 for (i++; i < tuple.TupleDataLen - 4; i++)
901 if (buf[i] == 0)
902 break;
903 i++;
904 if ((i < tuple.TupleDataLen - 4)
905 && (strncmp(buf + i, "DAQP", 4) == 0)) {
906 strncpy(local->board_name, buf + i,
907 sizeof(local->board_name));
908 }
909 } 871 }
910 } 872 }
911 873
@@ -1058,7 +1020,7 @@ static int daqp_cs_attach(struct pcmcia_device *link)
1058 struct local_info_t *local; 1020 struct local_info_t *local;
1059 int i; 1021 int i;
1060 1022
1061 DEBUG(0, "daqp_cs_attach()\n"); 1023 dev_dbg(&link->dev, "daqp_cs_attach()\n");
1062 1024
1063 for (i = 0; i < MAX_DEV; i++) 1025 for (i = 0; i < MAX_DEV; i++)
1064 if (dev_table[i] == NULL) 1026 if (dev_table[i] == NULL)
@@ -1079,10 +1041,8 @@ static int daqp_cs_attach(struct pcmcia_device *link)
1079 link->priv = local; 1041 link->priv = local;
1080 1042
1081 /* Interrupt setup */ 1043 /* Interrupt setup */
1082 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 1044 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
1083 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
1084 link->irq.Handler = daqp_interrupt; 1045 link->irq.Handler = daqp_interrupt;
1085 link->irq.Instance = local;
1086 1046
1087 /* 1047 /*
1088 General socket configuration defaults can go here. In this 1048 General socket configuration defaults can go here. In this
@@ -1112,7 +1072,7 @@ static void daqp_cs_detach(struct pcmcia_device *link)
1112{ 1072{
1113 struct local_info_t *dev = link->priv; 1073 struct local_info_t *dev = link->priv;
1114 1074
1115 DEBUG(0, "daqp_cs_detach(0x%p)\n", link); 1075 dev_dbg(&link->dev, "daqp_cs_detach\n");
1116 1076
1117 if (link->dev_node) { 1077 if (link->dev_node) {
1118 dev->stop = 1; 1078 dev->stop = 1;
@@ -1134,115 +1094,54 @@ static void daqp_cs_detach(struct pcmcia_device *link)
1134 1094
1135======================================================================*/ 1095======================================================================*/
1136 1096
1137static void daqp_cs_config(struct pcmcia_device *link)
1138{
1139 struct local_info_t *dev = link->priv;
1140 tuple_t tuple;
1141 cisparse_t parse;
1142 int last_ret;
1143 u_char buf[64];
1144
1145 DEBUG(0, "daqp_cs_config(0x%p)\n", link);
1146
1147 /*
1148 This reads the card's CONFIG tuple to find its configuration
1149 registers.
1150 */
1151 tuple.DesiredTuple = CISTPL_CONFIG;
1152 tuple.Attributes = 0;
1153 tuple.TupleData = buf;
1154 tuple.TupleDataMax = sizeof(buf);
1155 tuple.TupleOffset = 0;
1156
1157 last_ret = pcmcia_get_first_tuple(link, &tuple);
1158 if (last_ret) {
1159 cs_error(link, GetFirstTuple, last_ret);
1160 goto cs_failed;
1161 }
1162 1097
1163 last_ret = pcmcia_get_tuple_data(link, &tuple); 1098static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev,
1164 if (last_ret) { 1099 cistpl_cftable_entry_t *cfg,
1165 cs_error(link, GetTupleData, last_ret); 1100 cistpl_cftable_entry_t *dflt,
1166 goto cs_failed; 1101 unsigned int vcc,
1167 } 1102 void *priv_data)
1168 1103{
1169 last_ret = pcmcia_parse_tuple(&tuple, &parse); 1104 if (cfg->index == 0)
1170 if (last_ret) { 1105 return -ENODEV;
1171 cs_error(link, ParseTuple, last_ret);
1172 goto cs_failed;
1173 }
1174 link->conf.ConfigBase = parse.config.base;
1175 link->conf.Present = parse.config.rmask[0];
1176 1106
1177 /* 1107 /* Do we need to allocate an interrupt? */
1178 In this loop, we scan the CIS for configuration table entries, 1108 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
1179 each of which describes a valid card configuration, including 1109 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
1180 voltage, IO window, memory window, and interrupt settings. 1110
1181 1111 /* IO window settings */
1182 We make no assumptions about the card to be configured: we use 1112 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
1183 just the information available in the CIS. In an ideal world, 1113 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
1184 this would work for any PCMCIA card, but it requires a complete 1114 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
1185 and accurate CIS. In practice, a driver usually "knows" most of 1115 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
1186 these things without consulting the CIS, and most client drivers 1116 if (!(io->flags & CISTPL_IO_8BIT))
1187 will only use the CIS to fill in implementation-defined details. 1117 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
1188 */ 1118 if (!(io->flags & CISTPL_IO_16BIT))
1189 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 1119 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
1190 last_ret = pcmcia_get_first_tuple(link, &tuple); 1120 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
1191 if (last_ret) { 1121 p_dev->io.BasePort1 = io->win[0].base;
1192 cs_error(link, GetFirstTuple, last_ret); 1122 p_dev->io.NumPorts1 = io->win[0].len;
1193 goto cs_failed; 1123 if (io->nwin > 1) {
1124 p_dev->io.Attributes2 = p_dev->io.Attributes1;
1125 p_dev->io.BasePort2 = io->win[1].base;
1126 p_dev->io.NumPorts2 = io->win[1].len;
1127 }
1194 } 1128 }
1195 1129
1196 while (1) { 1130 /* This reserves IO space but doesn't actually enable it */
1197 cistpl_cftable_entry_t dflt = { 0 }; 1131 return pcmcia_request_io(p_dev, &p_dev->io);
1198 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 1132}
1199 if (pcmcia_get_tuple_data(link, &tuple))
1200 goto next_entry;
1201 if (pcmcia_parse_tuple(&tuple, &parse))
1202 goto next_entry;
1203
1204 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
1205 dflt = *cfg;
1206 if (cfg->index == 0)
1207 goto next_entry;
1208 link->conf.ConfigIndex = cfg->index;
1209
1210 /* Do we need to allocate an interrupt? */
1211 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
1212 link->conf.Attributes |= CONF_ENABLE_IRQ;
1213
1214 /* IO window settings */
1215 link->io.NumPorts1 = link->io.NumPorts2 = 0;
1216 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
1217 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
1218 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
1219 if (!(io->flags & CISTPL_IO_8BIT))
1220 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
1221 if (!(io->flags & CISTPL_IO_16BIT))
1222 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
1223 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
1224 link->io.BasePort1 = io->win[0].base;
1225 link->io.NumPorts1 = io->win[0].len;
1226 if (io->nwin > 1) {
1227 link->io.Attributes2 = link->io.Attributes1;
1228 link->io.BasePort2 = io->win[1].base;
1229 link->io.NumPorts2 = io->win[1].len;
1230 }
1231 }
1232 1133
1233 /* This reserves IO space but doesn't actually enable it */ 1134static void daqp_cs_config(struct pcmcia_device *link)
1234 if (pcmcia_request_io(link, &link->io)) 1135{
1235 goto next_entry; 1136 struct local_info_t *dev = link->priv;
1137 int ret;
1236 1138
1237 /* If we got this far, we're cool! */ 1139 dev_dbg(&link->dev, "daqp_cs_config\n");
1238 break;
1239 1140
1240next_entry: 1141 ret = pcmcia_loop_config(link, daqp_pcmcia_config_loop, NULL);
1241 last_ret = pcmcia_get_next_tuple(link, &tuple); 1142 if (ret) {
1242 if (last_ret) { 1143 dev_warn(&link->dev, "no configuration found\n");
1243 cs_error(link, GetNextTuple, last_ret); 1144 goto failed;
1244 goto cs_failed;
1245 }
1246 } 1145 }
1247 1146
1248 /* 1147 /*
@@ -1251,11 +1150,9 @@ next_entry:
1251 irq structure is initialized. 1150 irq structure is initialized.
1252 */ 1151 */
1253 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 1152 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
1254 last_ret = pcmcia_request_irq(link, &link->irq); 1153 ret = pcmcia_request_irq(link, &link->irq);
1255 if (last_ret) { 1154 if (ret)
1256 cs_error(link, RequestIRQ, last_ret); 1155 goto failed;
1257 goto cs_failed;
1258 }
1259 } 1156 }
1260 1157
1261 /* 1158 /*
@@ -1263,11 +1160,9 @@ next_entry:
1263 the I/O windows and the interrupt mapping, and putting the 1160 the I/O windows and the interrupt mapping, and putting the
1264 card and host interface into "Memory and IO" mode. 1161 card and host interface into "Memory and IO" mode.
1265 */ 1162 */
1266 last_ret = pcmcia_request_configuration(link, &link->conf); 1163 ret = pcmcia_request_configuration(link, &link->conf);
1267 if (last_ret) { 1164 if (ret)
1268 cs_error(link, RequestConfiguration, last_ret); 1165 goto failed;
1269 goto cs_failed;
1270 }
1271 1166
1272 /* 1167 /*
1273 At this point, the dev_node_t structure(s) need to be 1168 At this point, the dev_node_t structure(s) need to be
@@ -1296,14 +1191,14 @@ next_entry:
1296 1191
1297 return; 1192 return;
1298 1193
1299cs_failed: 1194failed:
1300 daqp_cs_release(link); 1195 daqp_cs_release(link);
1301 1196
1302} /* daqp_cs_config */ 1197} /* daqp_cs_config */
1303 1198
1304static void daqp_cs_release(struct pcmcia_device *link) 1199static void daqp_cs_release(struct pcmcia_device *link)
1305{ 1200{
1306 DEBUG(0, "daqp_cs_release(0x%p)\n", link); 1201 dev_dbg(&link->dev, "daqp_cs_release\n");
1307 1202
1308 pcmcia_disable_device(link); 1203 pcmcia_disable_device(link);
1309} /* daqp_cs_release */ 1204} /* daqp_cs_release */
@@ -1363,7 +1258,6 @@ struct pcmcia_driver daqp_cs_driver = {
1363 1258
1364int __init init_module(void) 1259int __init init_module(void)
1365{ 1260{
1366 DEBUG(0, "%s\n", version);
1367 pcmcia_register_driver(&daqp_cs_driver); 1261 pcmcia_register_driver(&daqp_cs_driver);
1368 comedi_driver_register(&driver_daqp); 1262 comedi_driver_register(&driver_daqp);
1369 return 0; 1263 return 0;
@@ -1371,7 +1265,6 @@ int __init init_module(void)
1371 1265
1372void __exit cleanup_module(void) 1266void __exit cleanup_module(void)
1373{ 1267{
1374 DEBUG(0, "daqp_cs: unloading\n");
1375 comedi_driver_unregister(&driver_daqp); 1268 comedi_driver_unregister(&driver_daqp);
1376 pcmcia_unregister_driver(&daqp_cs_driver); 1269 pcmcia_unregister_driver(&daqp_cs_driver);
1377} 1270}
diff --git a/drivers/staging/netwave/netwave_cs.c b/drivers/staging/netwave/netwave_cs.c
index 9498b46c99a4..e61e6b9440ab 100644
--- a/drivers/staging/netwave/netwave_cs.c
+++ b/drivers/staging/netwave/netwave_cs.c
@@ -145,23 +145,6 @@ static const unsigned int txConfEUD = 0x10; /* Enable Uni-Data packets */
145static const unsigned int txConfKey = 0x02; /* Scramble data packets */ 145static const unsigned int txConfKey = 0x02; /* Scramble data packets */
146static const unsigned int txConfLoop = 0x01; /* Loopback mode */ 146static const unsigned int txConfLoop = 0x01; /* Loopback mode */
147 147
148/*
149 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
150 you do not define PCMCIA_DEBUG at all, all the debug code will be
151 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
152 be present but disabled -- but it can then be enabled for specific
153 modules at load time with a 'pc_debug=#' option to insmod.
154*/
155
156#ifdef PCMCIA_DEBUG
157static int pc_debug = PCMCIA_DEBUG;
158module_param(pc_debug, int, 0);
159#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
160static char *version =
161"netwave_cs.c 0.3.0 Thu Jul 17 14:36:02 1997 (John Markus Bjørndalen)\n";
162#else
163#define DEBUG(n, args...)
164#endif
165 148
166/*====================================================================*/ 149/*====================================================================*/
167 150
@@ -383,7 +366,7 @@ static int netwave_probe(struct pcmcia_device *link)
383 struct net_device *dev; 366 struct net_device *dev;
384 netwave_private *priv; 367 netwave_private *priv;
385 368
386 DEBUG(0, "netwave_attach()\n"); 369 dev_dbg(&link->dev, "netwave_attach()\n");
387 370
388 /* Initialize the struct pcmcia_device structure */ 371 /* Initialize the struct pcmcia_device structure */
389 dev = alloc_etherdev(sizeof(netwave_private)); 372 dev = alloc_etherdev(sizeof(netwave_private));
@@ -401,8 +384,7 @@ static int netwave_probe(struct pcmcia_device *link)
401 link->io.IOAddrLines = 5; 384 link->io.IOAddrLines = 5;
402 385
403 /* Interrupt setup */ 386 /* Interrupt setup */
404 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 387 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
405 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
406 link->irq.Handler = &netwave_interrupt; 388 link->irq.Handler = &netwave_interrupt;
407 389
408 /* General socket configuration */ 390 /* General socket configuration */
@@ -421,8 +403,6 @@ static int netwave_probe(struct pcmcia_device *link)
421 403
422 dev->watchdog_timeo = TX_TIMEOUT; 404 dev->watchdog_timeo = TX_TIMEOUT;
423 405
424 link->irq.Instance = dev;
425
426 return netwave_pcmcia_config( link); 406 return netwave_pcmcia_config( link);
427} /* netwave_attach */ 407} /* netwave_attach */
428 408
@@ -438,7 +418,7 @@ static void netwave_detach(struct pcmcia_device *link)
438{ 418{
439 struct net_device *dev = link->priv; 419 struct net_device *dev = link->priv;
440 420
441 DEBUG(0, "netwave_detach(0x%p)\n", link); 421 dev_dbg(&link->dev, "netwave_detach\n");
442 422
443 netwave_release(link); 423 netwave_release(link);
444 424
@@ -725,18 +705,15 @@ static const struct iw_handler_def netwave_handler_def =
725 * 705 *
726 */ 706 */
727 707
728#define CS_CHECK(fn, ret) \
729do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
730
731static int netwave_pcmcia_config(struct pcmcia_device *link) { 708static int netwave_pcmcia_config(struct pcmcia_device *link) {
732 struct net_device *dev = link->priv; 709 struct net_device *dev = link->priv;
733 netwave_private *priv = netdev_priv(dev); 710 netwave_private *priv = netdev_priv(dev);
734 int i, j, last_ret, last_fn; 711 int i, j, ret;
735 win_req_t req; 712 win_req_t req;
736 memreq_t mem; 713 memreq_t mem;
737 u_char __iomem *ramBase = NULL; 714 u_char __iomem *ramBase = NULL;
738 715
739 DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link); 716 dev_dbg(&link->dev, "netwave_pcmcia_config\n");
740 717
741 /* 718 /*
742 * Try allocating IO ports. This tries a few fixed addresses. 719 * Try allocating IO ports. This tries a few fixed addresses.
@@ -749,22 +726,24 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
749 if (i == 0) 726 if (i == 0)
750 break; 727 break;
751 } 728 }
752 if (i != 0) { 729 if (i != 0)
753 cs_error(link, RequestIO, i);
754 goto failed; 730 goto failed;
755 }
756 731
757 /* 732 /*
758 * Now allocate an interrupt line. Note that this does not 733 * Now allocate an interrupt line. Note that this does not
759 * actually assign a handler to the interrupt. 734 * actually assign a handler to the interrupt.
760 */ 735 */
761 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 736 ret = pcmcia_request_irq(link, &link->irq);
737 if (ret)
738 goto failed;
762 739
763 /* 740 /*
764 * This actually configures the PCMCIA socket -- setting up 741 * This actually configures the PCMCIA socket -- setting up
765 * the I/O windows and the interrupt mapping. 742 * the I/O windows and the interrupt mapping.
766 */ 743 */
767 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 744 ret = pcmcia_request_configuration(link, &link->conf);
745 if (ret)
746 goto failed;
768 747
769 /* 748 /*
770 * Allocate a 32K memory window. Note that the struct pcmcia_device 749 * Allocate a 32K memory window. Note that the struct pcmcia_device
@@ -772,14 +751,18 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
772 * device needs several windows, you'll need to keep track of 751 * device needs several windows, you'll need to keep track of
773 * the handles in your private data structure, dev->priv. 752 * the handles in your private data structure, dev->priv.
774 */ 753 */
775 DEBUG(1, "Setting mem speed of %d\n", mem_speed); 754 dev_dbg(&link->dev, "Setting mem speed of %d\n", mem_speed);
776 755
777 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE; 756 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
778 req.Base = 0; req.Size = 0x8000; 757 req.Base = 0; req.Size = 0x8000;
779 req.AccessSpeed = mem_speed; 758 req.AccessSpeed = mem_speed;
780 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); 759 ret = pcmcia_request_window(link, &req, &link->win);
760 if (ret)
761 goto failed;
781 mem.CardOffset = 0x20000; mem.Page = 0; 762 mem.CardOffset = 0x20000; mem.Page = 0;
782 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); 763 ret = pcmcia_map_mem_page(link, link->win, &mem);
764 if (ret)
765 goto failed;
783 766
784 /* Store base address of the common window frame */ 767 /* Store base address of the common window frame */
785 ramBase = ioremap(req.Base, 0x8000); 768 ramBase = ioremap(req.Base, 0x8000);
@@ -787,7 +770,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
787 770
788 dev->irq = link->irq.AssignedIRQ; 771 dev->irq = link->irq.AssignedIRQ;
789 dev->base_addr = link->io.BasePort1; 772 dev->base_addr = link->io.BasePort1;
790 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 773 SET_NETDEV_DEV(dev, &link->dev);
791 774
792 if (register_netdev(dev) != 0) { 775 if (register_netdev(dev) != 0) {
793 printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n"); 776 printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n");
@@ -818,8 +801,6 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
818 get_uint16(ramBase + NETWAVE_EREG_ARW+2)); 801 get_uint16(ramBase + NETWAVE_EREG_ARW+2));
819 return 0; 802 return 0;
820 803
821cs_failed:
822 cs_error(link, last_fn, last_ret);
823failed: 804failed:
824 netwave_release(link); 805 netwave_release(link);
825 return -ENODEV; 806 return -ENODEV;
@@ -837,7 +818,7 @@ static void netwave_release(struct pcmcia_device *link)
837 struct net_device *dev = link->priv; 818 struct net_device *dev = link->priv;
838 netwave_private *priv = netdev_priv(dev); 819 netwave_private *priv = netdev_priv(dev);
839 820
840 DEBUG(0, "netwave_release(0x%p)\n", link); 821 dev_dbg(&link->dev, "netwave_release\n");
841 822
842 pcmcia_disable_device(link); 823 pcmcia_disable_device(link);
843 if (link->win) 824 if (link->win)
@@ -892,7 +873,7 @@ static void netwave_reset(struct net_device *dev) {
892 u_char __iomem *ramBase = priv->ramBase; 873 u_char __iomem *ramBase = priv->ramBase;
893 unsigned int iobase = dev->base_addr; 874 unsigned int iobase = dev->base_addr;
894 875
895 DEBUG(0, "netwave_reset: Done with hardware reset\n"); 876 pr_debug("netwave_reset: Done with hardware reset\n");
896 877
897 priv->timeoutCounter = 0; 878 priv->timeoutCounter = 0;
898 879
@@ -988,7 +969,7 @@ static int netwave_hw_xmit(unsigned char* data, int len,
988 969
989 dev->stats.tx_bytes += len; 970 dev->stats.tx_bytes += len;
990 971
991 DEBUG(3, "Transmitting with SPCQ %x SPU %x LIF %x ISPLQ %x\n", 972 pr_debug("Transmitting with SPCQ %x SPU %x LIF %x ISPLQ %x\n",
992 readb(ramBase + NETWAVE_EREG_SPCQ), 973 readb(ramBase + NETWAVE_EREG_SPCQ),
993 readb(ramBase + NETWAVE_EREG_SPU), 974 readb(ramBase + NETWAVE_EREG_SPU),
994 readb(ramBase + NETWAVE_EREG_LIF), 975 readb(ramBase + NETWAVE_EREG_LIF),
@@ -1000,7 +981,7 @@ static int netwave_hw_xmit(unsigned char* data, int len,
1000 MaxData = get_uint16(ramBase + NETWAVE_EREG_TDP+2); 981 MaxData = get_uint16(ramBase + NETWAVE_EREG_TDP+2);
1001 DataOffset = get_uint16(ramBase + NETWAVE_EREG_TDP+4); 982 DataOffset = get_uint16(ramBase + NETWAVE_EREG_TDP+4);
1002 983
1003 DEBUG(3, "TxFreeList %x, MaxData %x, DataOffset %x\n", 984 pr_debug("TxFreeList %x, MaxData %x, DataOffset %x\n",
1004 TxFreeList, MaxData, DataOffset); 985 TxFreeList, MaxData, DataOffset);
1005 986
1006 /* Copy packet to the adapter fragment buffers */ 987 /* Copy packet to the adapter fragment buffers */
@@ -1088,7 +1069,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1088 status = inb(iobase + NETWAVE_REG_ASR); 1069 status = inb(iobase + NETWAVE_REG_ASR);
1089 1070
1090 if (!pcmcia_dev_present(link)) { 1071 if (!pcmcia_dev_present(link)) {
1091 DEBUG(1, "netwave_interrupt: Interrupt with status 0x%x " 1072 pr_debug("netwave_interrupt: Interrupt with status 0x%x "
1092 "from removed or suspended card!\n", status); 1073 "from removed or suspended card!\n", status);
1093 break; 1074 break;
1094 } 1075 }
@@ -1132,7 +1113,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1132 int txStatus; 1113 int txStatus;
1133 1114
1134 txStatus = readb(ramBase + NETWAVE_EREG_TSER); 1115 txStatus = readb(ramBase + NETWAVE_EREG_TSER);
1135 DEBUG(3, "Transmit done. TSER = %x id %x\n", 1116 pr_debug("Transmit done. TSER = %x id %x\n",
1136 txStatus, readb(ramBase + NETWAVE_EREG_TSER + 1)); 1117 txStatus, readb(ramBase + NETWAVE_EREG_TSER + 1));
1137 1118
1138 if (txStatus & 0x20) { 1119 if (txStatus & 0x20) {
@@ -1156,7 +1137,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1156 * TxGU and TxNOAP is set. (Those are the only ones 1137 * TxGU and TxNOAP is set. (Those are the only ones
1157 * to set TxErr). 1138 * to set TxErr).
1158 */ 1139 */
1159 DEBUG(3, "netwave_interrupt: TxDN with error status %x\n", 1140 pr_debug("netwave_interrupt: TxDN with error status %x\n",
1160 txStatus); 1141 txStatus);
1161 1142
1162 /* Clear out TxGU, TxNOAP, TxErr and TxTrys */ 1143 /* Clear out TxGU, TxNOAP, TxErr and TxTrys */
@@ -1164,7 +1145,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1164 writeb(0xdf & txStatus, ramBase+NETWAVE_EREG_TSER+4); 1145 writeb(0xdf & txStatus, ramBase+NETWAVE_EREG_TSER+4);
1165 ++dev->stats.tx_errors; 1146 ++dev->stats.tx_errors;
1166 } 1147 }
1167 DEBUG(3, "New status is TSER %x ASR %x\n", 1148 pr_debug("New status is TSER %x ASR %x\n",
1168 readb(ramBase + NETWAVE_EREG_TSER), 1149 readb(ramBase + NETWAVE_EREG_TSER),
1169 inb(iobase + NETWAVE_REG_ASR)); 1150 inb(iobase + NETWAVE_REG_ASR));
1170 1151
@@ -1172,7 +1153,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1172 } 1153 }
1173 /* TxBA, this would trigger on all error packets received */ 1154 /* TxBA, this would trigger on all error packets received */
1174 /* if (status & 0x01) { 1155 /* if (status & 0x01) {
1175 DEBUG(4, "Transmit buffers available, %x\n", status); 1156 pr_debug("Transmit buffers available, %x\n", status);
1176 } 1157 }
1177 */ 1158 */
1178 } 1159 }
@@ -1190,7 +1171,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1190 */ 1171 */
1191static void netwave_watchdog(struct net_device *dev) { 1172static void netwave_watchdog(struct net_device *dev) {
1192 1173
1193 DEBUG(1, "%s: netwave_watchdog: watchdog timer expired\n", dev->name); 1174 pr_debug("%s: netwave_watchdog: watchdog timer expired\n", dev->name);
1194 netwave_reset(dev); 1175 netwave_reset(dev);
1195 dev->trans_start = jiffies; 1176 dev->trans_start = jiffies;
1196 netif_wake_queue(dev); 1177 netif_wake_queue(dev);
@@ -1211,7 +1192,7 @@ static int netwave_rx(struct net_device *dev)
1211 int i; 1192 int i;
1212 u_char *ptr; 1193 u_char *ptr;
1213 1194
1214 DEBUG(3, "xinw_rx: Receiving ... \n"); 1195 pr_debug("xinw_rx: Receiving ... \n");
1215 1196
1216 /* Receive max 10 packets for now. */ 1197 /* Receive max 10 packets for now. */
1217 for (i = 0; i < 10; i++) { 1198 for (i = 0; i < 10; i++) {
@@ -1237,7 +1218,7 @@ static int netwave_rx(struct net_device *dev)
1237 1218
1238 skb = dev_alloc_skb(rcvLen+5); 1219 skb = dev_alloc_skb(rcvLen+5);
1239 if (skb == NULL) { 1220 if (skb == NULL) {
1240 DEBUG(1, "netwave_rx: Could not allocate an sk_buff of " 1221 pr_debug("netwave_rx: Could not allocate an sk_buff of "
1241 "length %d\n", rcvLen); 1222 "length %d\n", rcvLen);
1242 ++dev->stats.rx_dropped; 1223 ++dev->stats.rx_dropped;
1243 /* Tell the adapter to skip the packet */ 1224 /* Tell the adapter to skip the packet */
@@ -1279,7 +1260,7 @@ static int netwave_rx(struct net_device *dev)
1279 wait_WOC(iobase); 1260 wait_WOC(iobase);
1280 writeb(NETWAVE_CMD_SRP, ramBase + NETWAVE_EREG_CB + 0); 1261 writeb(NETWAVE_CMD_SRP, ramBase + NETWAVE_EREG_CB + 0);
1281 writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1); 1262 writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
1282 DEBUG(3, "Packet reception ok\n"); 1263 pr_debug("Packet reception ok\n");
1283 } 1264 }
1284 return 0; 1265 return 0;
1285} 1266}
@@ -1288,7 +1269,7 @@ static int netwave_open(struct net_device *dev) {
1288 netwave_private *priv = netdev_priv(dev); 1269 netwave_private *priv = netdev_priv(dev);
1289 struct pcmcia_device *link = priv->p_dev; 1270 struct pcmcia_device *link = priv->p_dev;
1290 1271
1291 DEBUG(1, "netwave_open: starting.\n"); 1272 dev_dbg(&link->dev, "netwave_open: starting.\n");
1292 1273
1293 if (!pcmcia_dev_present(link)) 1274 if (!pcmcia_dev_present(link))
1294 return -ENODEV; 1275 return -ENODEV;
@@ -1305,7 +1286,7 @@ static int netwave_close(struct net_device *dev) {
1305 netwave_private *priv = netdev_priv(dev); 1286 netwave_private *priv = netdev_priv(dev);
1306 struct pcmcia_device *link = priv->p_dev; 1287 struct pcmcia_device *link = priv->p_dev;
1307 1288
1308 DEBUG(1, "netwave_close: finishing.\n"); 1289 dev_dbg(&link->dev, "netwave_close: finishing.\n");
1309 1290
1310 link->open--; 1291 link->open--;
1311 netif_stop_queue(dev); 1292 netif_stop_queue(dev);
@@ -1358,11 +1339,11 @@ static void set_multicast_list(struct net_device *dev)
1358 u_char rcvMode = 0; 1339 u_char rcvMode = 0;
1359 1340
1360#ifdef PCMCIA_DEBUG 1341#ifdef PCMCIA_DEBUG
1361 if (pc_debug > 2) { 1342 {
1362 static int old; 1343 xstatic int old;
1363 if (old != dev->mc_count) { 1344 if (old != dev->mc_count) {
1364 old = dev->mc_count; 1345 old = dev->mc_count;
1365 DEBUG(0, "%s: setting Rx mode to %d addresses.\n", 1346 pr_debug("%s: setting Rx mode to %d addresses.\n",
1366 dev->name, dev->mc_count); 1347 dev->name, dev->mc_count);
1367 } 1348 }
1368 } 1349 }
diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 431a20ec6db6..33918fd5b231 100644
--- a/drivers/staging/wavelan/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
@@ -3656,10 +3656,7 @@ wv_pcmcia_reset(struct net_device * dev)
3656 3656
3657 i = pcmcia_access_configuration_register(link, &reg); 3657 i = pcmcia_access_configuration_register(link, &reg);
3658 if (i != 0) 3658 if (i != 0)
3659 {
3660 cs_error(link, AccessConfigurationRegister, i);
3661 return FALSE; 3659 return FALSE;
3662 }
3663 3660
3664#ifdef DEBUG_CONFIG_INFO 3661#ifdef DEBUG_CONFIG_INFO
3665 printk(KERN_DEBUG "%s: wavelan_pcmcia_reset(): Config reg is 0x%x\n", 3662 printk(KERN_DEBUG "%s: wavelan_pcmcia_reset(): Config reg is 0x%x\n",
@@ -3670,19 +3667,13 @@ wv_pcmcia_reset(struct net_device * dev)
3670 reg.Value = reg.Value | COR_SW_RESET; 3667 reg.Value = reg.Value | COR_SW_RESET;
3671 i = pcmcia_access_configuration_register(link, &reg); 3668 i = pcmcia_access_configuration_register(link, &reg);
3672 if (i != 0) 3669 if (i != 0)
3673 {
3674 cs_error(link, AccessConfigurationRegister, i);
3675 return FALSE; 3670 return FALSE;
3676 }
3677 3671
3678 reg.Action = CS_WRITE; 3672 reg.Action = CS_WRITE;
3679 reg.Value = COR_LEVEL_IRQ | COR_CONFIG; 3673 reg.Value = COR_LEVEL_IRQ | COR_CONFIG;
3680 i = pcmcia_access_configuration_register(link, &reg); 3674 i = pcmcia_access_configuration_register(link, &reg);
3681 if (i != 0) 3675 if (i != 0)
3682 {
3683 cs_error(link, AccessConfigurationRegister, i);
3684 return FALSE; 3676 return FALSE;
3685 }
3686 3677
3687#ifdef DEBUG_CONFIG_TRACE 3678#ifdef DEBUG_CONFIG_TRACE
3688 printk(KERN_DEBUG "%s: <-wv_pcmcia_reset()\n", dev->name); 3679 printk(KERN_DEBUG "%s: <-wv_pcmcia_reset()\n", dev->name);
@@ -3857,10 +3848,7 @@ wv_pcmcia_config(struct pcmcia_device * link)
3857 { 3848 {
3858 i = pcmcia_request_io(link, &link->io); 3849 i = pcmcia_request_io(link, &link->io);
3859 if (i != 0) 3850 if (i != 0)
3860 {
3861 cs_error(link, RequestIO, i);
3862 break; 3851 break;
3863 }
3864 3852
3865 /* 3853 /*
3866 * Now allocate an interrupt line. Note that this does not 3854 * Now allocate an interrupt line. Note that this does not
@@ -3868,10 +3856,7 @@ wv_pcmcia_config(struct pcmcia_device * link)
3868 */ 3856 */
3869 i = pcmcia_request_irq(link, &link->irq); 3857 i = pcmcia_request_irq(link, &link->irq);
3870 if (i != 0) 3858 if (i != 0)
3871 {
3872 cs_error(link, RequestIRQ, i);
3873 break; 3859 break;
3874 }
3875 3860
3876 /* 3861 /*
3877 * This actually configures the PCMCIA socket -- setting up 3862 * This actually configures the PCMCIA socket -- setting up
@@ -3880,10 +3865,7 @@ wv_pcmcia_config(struct pcmcia_device * link)
3880 link->conf.ConfigIndex = 1; 3865 link->conf.ConfigIndex = 1;
3881 i = pcmcia_request_configuration(link, &link->conf); 3866 i = pcmcia_request_configuration(link, &link->conf);
3882 if (i != 0) 3867 if (i != 0)
3883 {
3884 cs_error(link, RequestConfiguration, i);
3885 break; 3868 break;
3886 }
3887 3869
3888 /* 3870 /*
3889 * Allocate a small memory window. Note that the struct pcmcia_device 3871 * Allocate a small memory window. Note that the struct pcmcia_device
@@ -3894,24 +3876,18 @@ wv_pcmcia_config(struct pcmcia_device * link)
3894 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 3876 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
3895 req.Base = req.Size = 0; 3877 req.Base = req.Size = 0;
3896 req.AccessSpeed = mem_speed; 3878 req.AccessSpeed = mem_speed;
3897 i = pcmcia_request_window(&link, &req, &link->win); 3879 i = pcmcia_request_window(link, &req, &link->win);
3898 if (i != 0) 3880 if (i != 0)
3899 {
3900 cs_error(link, RequestWindow, i);
3901 break; 3881 break;
3902 }
3903 3882
3904 lp->mem = ioremap(req.Base, req.Size); 3883 lp->mem = ioremap(req.Base, req.Size);
3905 dev->mem_start = (u_long)lp->mem; 3884 dev->mem_start = (u_long)lp->mem;
3906 dev->mem_end = dev->mem_start + req.Size; 3885 dev->mem_end = dev->mem_start + req.Size;
3907 3886
3908 mem.CardOffset = 0; mem.Page = 0; 3887 mem.CardOffset = 0; mem.Page = 0;
3909 i = pcmcia_map_mem_page(link->win, &mem); 3888 i = pcmcia_map_mem_page(link, link->win, &mem);
3910 if (i != 0) 3889 if (i != 0)
3911 {
3912 cs_error(link, MapMemPage, i);
3913 break; 3890 break;
3914 }
3915 3891
3916 /* Feed device with this info... */ 3892 /* Feed device with this info... */
3917 dev->irq = link->irq.AssignedIRQ; 3893 dev->irq = link->irq.AssignedIRQ;
@@ -3923,7 +3899,7 @@ wv_pcmcia_config(struct pcmcia_device * link)
3923 lp->mem, dev->irq, (u_int) dev->base_addr); 3899 lp->mem, dev->irq, (u_int) dev->base_addr);
3924#endif 3900#endif
3925 3901
3926 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 3902 SET_NETDEV_DEV(dev, &link->dev);
3927 i = register_netdev(dev); 3903 i = register_netdev(dev);
3928 if(i != 0) 3904 if(i != 0)
3929 { 3905 {
@@ -4462,8 +4438,7 @@ wavelan_probe(struct pcmcia_device *p_dev)
4462 p_dev->io.IOAddrLines = 3; 4438 p_dev->io.IOAddrLines = 3;
4463 4439
4464 /* Interrupt setup */ 4440 /* Interrupt setup */
4465 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 4441 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
4466 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
4467 p_dev->irq.Handler = wavelan_interrupt; 4442 p_dev->irq.Handler = wavelan_interrupt;
4468 4443
4469 /* General socket configuration */ 4444 /* General socket configuration */
@@ -4475,7 +4450,7 @@ wavelan_probe(struct pcmcia_device *p_dev)
4475 if (!dev) 4450 if (!dev)
4476 return -ENOMEM; 4451 return -ENOMEM;
4477 4452
4478 p_dev->priv = p_dev->irq.Instance = dev; 4453 p_dev->priv = dev;
4479 4454
4480 lp = netdev_priv(dev); 4455 lp = netdev_priv(dev);
4481 4456
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index 347c3ed1d9f1..d442fd35620a 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -19,13 +19,6 @@
19 * PCMCIA service support for Quicknet cards 19 * PCMCIA service support for Quicknet cards
20 */ 20 */
21 21
22#ifdef PCMCIA_DEBUG
23static int pc_debug = PCMCIA_DEBUG;
24module_param(pc_debug, int, 0644);
25#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
26#else
27#define DEBUG(n, args...)
28#endif
29 22
30typedef struct ixj_info_t { 23typedef struct ixj_info_t {
31 int ndev; 24 int ndev;
@@ -39,7 +32,7 @@ static void ixj_cs_release(struct pcmcia_device * link);
39 32
40static int ixj_probe(struct pcmcia_device *p_dev) 33static int ixj_probe(struct pcmcia_device *p_dev)
41{ 34{
42 DEBUG(0, "ixj_attach()\n"); 35 dev_dbg(&p_dev->dev, "ixj_attach()\n");
43 /* Create new ixj device */ 36 /* Create new ixj device */
44 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 37 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
45 p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 38 p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
@@ -55,33 +48,30 @@ static int ixj_probe(struct pcmcia_device *p_dev)
55 48
56static void ixj_detach(struct pcmcia_device *link) 49static void ixj_detach(struct pcmcia_device *link)
57{ 50{
58 DEBUG(0, "ixj_detach(0x%p)\n", link); 51 dev_dbg(&link->dev, "ixj_detach\n");
59 52
60 ixj_cs_release(link); 53 ixj_cs_release(link);
61 54
62 kfree(link->priv); 55 kfree(link->priv);
63} 56}
64 57
65#define CS_CHECK(fn, ret) \
66do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
67
68static void ixj_get_serial(struct pcmcia_device * link, IXJ * j) 58static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
69{ 59{
70 char *str; 60 char *str;
71 int i, place; 61 int i, place;
72 DEBUG(0, "ixj_get_serial(0x%p)\n", link); 62 dev_dbg(&link->dev, "ixj_get_serial\n");
73 63
74 str = link->prod_id[0]; 64 str = link->prod_id[0];
75 if (!str) 65 if (!str)
76 goto cs_failed; 66 goto failed;
77 printk("%s", str); 67 printk("%s", str);
78 str = link->prod_id[1]; 68 str = link->prod_id[1];
79 if (!str) 69 if (!str)
80 goto cs_failed; 70 goto failed;
81 printk(" %s", str); 71 printk(" %s", str);
82 str = link->prod_id[2]; 72 str = link->prod_id[2];
83 if (!str) 73 if (!str)
84 goto cs_failed; 74 goto failed;
85 place = 1; 75 place = 1;
86 for (i = strlen(str) - 1; i >= 0; i--) { 76 for (i = strlen(str) - 1; i >= 0; i--) {
87 switch (str[i]) { 77 switch (str[i]) {
@@ -118,9 +108,9 @@ static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
118 } 108 }
119 str = link->prod_id[3]; 109 str = link->prod_id[3];
120 if (!str) 110 if (!str)
121 goto cs_failed; 111 goto failed;
122 printk(" version %s\n", str); 112 printk(" version %s\n", str);
123 cs_failed: 113failed:
124 return; 114 return;
125} 115}
126 116
@@ -151,13 +141,13 @@ static int ixj_config(struct pcmcia_device * link)
151 cistpl_cftable_entry_t dflt = { 0 }; 141 cistpl_cftable_entry_t dflt = { 0 };
152 142
153 info = link->priv; 143 info = link->priv;
154 DEBUG(0, "ixj_config(0x%p)\n", link); 144 dev_dbg(&link->dev, "ixj_config\n");
155 145
156 if (pcmcia_loop_config(link, ixj_config_check, &dflt)) 146 if (pcmcia_loop_config(link, ixj_config_check, &dflt))
157 goto cs_failed; 147 goto failed;
158 148
159 if (pcmcia_request_configuration(link, &link->conf)) 149 if (pcmcia_request_configuration(link, &link->conf))
160 goto cs_failed; 150 goto failed;
161 151
162 /* 152 /*
163 * Register the card with the core. 153 * Register the card with the core.
@@ -170,7 +160,7 @@ static int ixj_config(struct pcmcia_device * link)
170 ixj_get_serial(link, j); 160 ixj_get_serial(link, j);
171 return 0; 161 return 0;
172 162
173 cs_failed: 163failed:
174 ixj_cs_release(link); 164 ixj_cs_release(link);
175 return -ENODEV; 165 return -ENODEV;
176} 166}
@@ -178,7 +168,7 @@ static int ixj_config(struct pcmcia_device * link)
178static void ixj_cs_release(struct pcmcia_device *link) 168static void ixj_cs_release(struct pcmcia_device *link)
179{ 169{
180 ixj_info_t *info = link->priv; 170 ixj_info_t *info = link->priv;
181 DEBUG(0, "ixj_cs_release(0x%p)\n", link); 171 dev_dbg(&link->dev, "ixj_cs_release\n");
182 info->ndev = 0; 172 info->ndev = 0;
183 pcmcia_disable_device(link); 173 pcmcia_disable_device(link);
184} 174}
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 516848dd9b48..39d253e841f6 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -37,28 +37,8 @@ MODULE_LICENSE("GPL");
37/* MACROS */ 37/* MACROS */
38/*====================================================================*/ 38/*====================================================================*/
39 39
40#if defined(DEBUG) || defined(PCMCIA_DEBUG)
41
42static int pc_debug = 0;
43module_param(pc_debug, int, 0644);
44
45#define DBG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG "sl811_cs: " args)
46
47#else
48#define DBG(n, args...) do{}while(0)
49#endif /* no debugging */
50
51#define INFO(args...) printk(KERN_INFO "sl811_cs: " args) 40#define INFO(args...) printk(KERN_INFO "sl811_cs: " args)
52 41
53#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444)
54
55#define CS_CHECK(fn, ret) \
56 do { \
57 last_fn = (fn); \
58 if ((last_ret = (ret)) != 0) \
59 goto cs_failed; \
60 } while (0)
61
62/*====================================================================*/ 42/*====================================================================*/
63/* VARIABLES */ 43/* VARIABLES */
64/*====================================================================*/ 44/*====================================================================*/
@@ -76,7 +56,7 @@ static void sl811_cs_release(struct pcmcia_device * link);
76 56
77static void release_platform_dev(struct device * dev) 57static void release_platform_dev(struct device * dev)
78{ 58{
79 DBG(0, "sl811_cs platform_dev release\n"); 59 dev_dbg(dev, "sl811_cs platform_dev release\n");
80 dev->parent = NULL; 60 dev->parent = NULL;
81} 61}
82 62
@@ -140,7 +120,7 @@ static int sl811_hc_init(struct device *parent, resource_size_t base_addr,
140 120
141static void sl811_cs_detach(struct pcmcia_device *link) 121static void sl811_cs_detach(struct pcmcia_device *link)
142{ 122{
143 DBG(0, "sl811_cs_detach(0x%p)\n", link); 123 dev_dbg(&link->dev, "sl811_cs_detach\n");
144 124
145 sl811_cs_release(link); 125 sl811_cs_release(link);
146 126
@@ -150,7 +130,7 @@ static void sl811_cs_detach(struct pcmcia_device *link)
150 130
151static void sl811_cs_release(struct pcmcia_device * link) 131static void sl811_cs_release(struct pcmcia_device * link)
152{ 132{
153 DBG(0, "sl811_cs_release(0x%p)\n", link); 133 dev_dbg(&link->dev, "sl811_cs_release\n");
154 134
155 pcmcia_disable_device(link); 135 pcmcia_disable_device(link);
156 platform_device_unregister(&platform_dev); 136 platform_device_unregister(&platform_dev);
@@ -205,11 +185,11 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev,
205 185
206static int sl811_cs_config(struct pcmcia_device *link) 186static int sl811_cs_config(struct pcmcia_device *link)
207{ 187{
208 struct device *parent = &handle_to_dev(link); 188 struct device *parent = &link->dev;
209 local_info_t *dev = link->priv; 189 local_info_t *dev = link->priv;
210 int last_fn, last_ret; 190 int ret;
211 191
212 DBG(0, "sl811_cs_config(0x%p)\n", link); 192 dev_dbg(&link->dev, "sl811_cs_config\n");
213 193
214 if (pcmcia_loop_config(link, sl811_cs_config_check, NULL)) 194 if (pcmcia_loop_config(link, sl811_cs_config_check, NULL))
215 goto failed; 195 goto failed;
@@ -217,14 +197,16 @@ static int sl811_cs_config(struct pcmcia_device *link)
217 /* require an IRQ and two registers */ 197 /* require an IRQ and two registers */
218 if (!link->io.NumPorts1 || link->io.NumPorts1 < 2) 198 if (!link->io.NumPorts1 || link->io.NumPorts1 < 2)
219 goto failed; 199 goto failed;
220 if (link->conf.Attributes & CONF_ENABLE_IRQ) 200 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
221 CS_CHECK(RequestIRQ, 201 ret = pcmcia_request_irq(link, &link->irq);
222 pcmcia_request_irq(link, &link->irq)); 202 if (ret)
223 else 203 goto failed;
204 } else
224 goto failed; 205 goto failed;
225 206
226 CS_CHECK(RequestConfiguration, 207 ret = pcmcia_request_configuration(link, &link->conf);
227 pcmcia_request_configuration(link, &link->conf)); 208 if (ret)
209 goto failed;
228 210
229 sprintf(dev->node.dev_name, driver_name); 211 sprintf(dev->node.dev_name, driver_name);
230 dev->node.major = dev->node.minor = 0; 212 dev->node.major = dev->node.minor = 0;
@@ -241,8 +223,6 @@ static int sl811_cs_config(struct pcmcia_device *link)
241 223
242 if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ) 224 if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ)
243 < 0) { 225 < 0) {
244cs_failed:
245 cs_error(link, last_fn, last_ret);
246failed: 226failed:
247 printk(KERN_WARNING "sl811_cs_config failed\n"); 227 printk(KERN_WARNING "sl811_cs_config failed\n");
248 sl811_cs_release(link); 228 sl811_cs_release(link);
@@ -263,7 +243,6 @@ static int sl811_cs_probe(struct pcmcia_device *link)
263 243
264 /* Initialize */ 244 /* Initialize */
265 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 245 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
266 link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
267 link->irq.Handler = NULL; 246 link->irq.Handler = NULL;
268 247
269 link->conf.Attributes = 0; 248 link->conf.Attributes = 0;
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 035d56835b75..ea1fd3f47511 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -554,11 +554,11 @@ static int fb_check_var(struct fb_var_screeninfo *var,
554 var->transp.length = 0; 554 var->transp.length = 0;
555 break; 555 break;
556 case 16: /* RGB 565 */ 556 case 16: /* RGB 565 */
557 var->red.offset = 0; 557 var->red.offset = 11;
558 var->red.length = 5; 558 var->red.length = 5;
559 var->green.offset = 5; 559 var->green.offset = 5;
560 var->green.length = 6; 560 var->green.length = 6;
561 var->blue.offset = 11; 561 var->blue.offset = 0;
562 var->blue.length = 5; 562 var->blue.length = 5;
563 var->transp.offset = 0; 563 var->transp.offset = 0;
564 var->transp.length = 0; 564 var->transp.length = 0;
@@ -591,7 +591,7 @@ static int __devexit fb_remove(struct platform_device *dev)
591 unregister_framebuffer(info); 591 unregister_framebuffer(info);
592 fb_dealloc_cmap(&info->cmap); 592 fb_dealloc_cmap(&info->cmap);
593 dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, 593 dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE,
594 info->screen_base, 594 info->screen_base - PAGE_SIZE,
595 info->fix.smem_start); 595 info->fix.smem_start);
596 free_irq(par->irq, par); 596 free_irq(par->irq, par);
597 clk_disable(par->lcdc_clk); 597 clk_disable(par->lcdc_clk);
@@ -749,6 +749,7 @@ static int __init fb_probe(struct platform_device *device)
749 (PAGE_SIZE - par->palette_sz); 749 (PAGE_SIZE - par->palette_sz);
750 750
751 /* the rest of the frame buffer is pixel data */ 751 /* the rest of the frame buffer is pixel data */
752 da8xx_fb_info->screen_base = par->v_palette_base + par->palette_sz;
752 da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz; 753 da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz;
753 da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz; 754 da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz;
754 da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8; 755 da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
@@ -787,6 +788,8 @@ static int __init fb_probe(struct platform_device *device)
787 da8xx_fb_info->var = da8xx_fb_var; 788 da8xx_fb_info->var = da8xx_fb_var;
788 da8xx_fb_info->fbops = &da8xx_fb_ops; 789 da8xx_fb_info->fbops = &da8xx_fb_ops;
789 da8xx_fb_info->pseudo_palette = par->pseudo_palette; 790 da8xx_fb_info->pseudo_palette = par->pseudo_palette;
791 da8xx_fb_info->fix.visual = (da8xx_fb_info->var.bits_per_pixel <= 8) ?
792 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
790 793
791 ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); 794 ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0);
792 if (ret) 795 if (ret)
@@ -825,7 +828,7 @@ err_free_irq:
825 828
826err_release_fb_mem: 829err_release_fb_mem:
827 dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, 830 dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE,
828 da8xx_fb_info->screen_base, 831 da8xx_fb_info->screen_base - PAGE_SIZE,
829 da8xx_fb_info->fix.smem_start); 832 da8xx_fb_info->fix.smem_start);
830 833
831err_release_fb: 834err_release_fb:
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index 1a83709f9611..f67db4268374 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -1147,7 +1147,7 @@ static int __init gbefb_probe(struct platform_device *p_dev)
1147 gbefb_setup(options); 1147 gbefb_setup(options);
1148#endif 1148#endif
1149 1149
1150 if (!request_region(GBE_BASE, sizeof(struct sgi_gbe), "GBE")) { 1150 if (!request_mem_region(GBE_BASE, sizeof(struct sgi_gbe), "GBE")) {
1151 printk(KERN_ERR "gbefb: couldn't reserve mmio region\n"); 1151 printk(KERN_ERR "gbefb: couldn't reserve mmio region\n");
1152 ret = -EBUSY; 1152 ret = -EBUSY;
1153 goto out_release_framebuffer; 1153 goto out_release_framebuffer;
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index f6cccc9df022..bf12d06b5877 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -62,7 +62,7 @@ extern unsigned int idt_cpu_freq;
62static int timeout = WATCHDOG_TIMEOUT; 62static int timeout = WATCHDOG_TIMEOUT;
63module_param(timeout, int, 0); 63module_param(timeout, int, 0);
64MODULE_PARM_DESC(timeout, "Watchdog timeout value, in seconds (default=" 64MODULE_PARM_DESC(timeout, "Watchdog timeout value, in seconds (default="
65 WATCHDOG_TIMEOUT ")"); 65 __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
66 66
67static int nowayout = WATCHDOG_NOWAYOUT; 67static int nowayout = WATCHDOG_NOWAYOUT;
68module_param(nowayout, int, 0); 68module_param(nowayout, int, 0);
@@ -276,7 +276,7 @@ static int __devinit rc32434_wdt_probe(struct platform_device *pdev)
276 return -ENODEV; 276 return -ENODEV;
277 } 277 }
278 278
279 wdt_reg = ioremap_nocache(r->start, r->end - r->start); 279 wdt_reg = ioremap_nocache(r->start, resource_size(r));
280 if (!wdt_reg) { 280 if (!wdt_reg) {
281 printk(KERN_ERR PFX "failed to remap I/O resources\n"); 281 printk(KERN_ERR PFX "failed to remap I/O resources\n");
282 return -ENXIO; 282 return -ENXIO;
diff --git a/fs/exec.c b/fs/exec.c
index ba112bd4a339..c0c636e34f60 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -46,7 +46,6 @@
46#include <linux/proc_fs.h> 46#include <linux/proc_fs.h>
47#include <linux/mount.h> 47#include <linux/mount.h>
48#include <linux/security.h> 48#include <linux/security.h>
49#include <linux/ima.h>
50#include <linux/syscalls.h> 49#include <linux/syscalls.h>
51#include <linux/tsacct_kern.h> 50#include <linux/tsacct_kern.h>
52#include <linux/cn_proc.h> 51#include <linux/cn_proc.h>
@@ -1209,9 +1208,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1209 retval = security_bprm_check(bprm); 1208 retval = security_bprm_check(bprm);
1210 if (retval) 1209 if (retval)
1211 return retval; 1210 return retval;
1212 retval = ima_bprm_check(bprm);
1213 if (retval)
1214 return retval;
1215 1211
1216 /* kernel module loader fixup */ 1212 /* kernel module loader fixup */
1217 /* so we don't try to load run modprobe in kernel space. */ 1213 /* so we don't try to load run modprobe in kernel space. */
diff --git a/fs/file_table.c b/fs/file_table.c
index 8eb44042e009..4bef4c01ec6f 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/security.h> 15#include <linux/security.h>
16#include <linux/ima.h>
17#include <linux/eventpoll.h> 16#include <linux/eventpoll.h>
18#include <linux/rcupdate.h> 17#include <linux/rcupdate.h>
19#include <linux/mount.h> 18#include <linux/mount.h>
@@ -280,7 +279,6 @@ void __fput(struct file *file)
280 if (file->f_op && file->f_op->release) 279 if (file->f_op && file->f_op->release)
281 file->f_op->release(inode, file); 280 file->f_op->release(inode, file);
282 security_file_free(file); 281 security_file_free(file);
283 ima_file_free(file);
284 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) 282 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
285 cdev_put(inode->i_cdev); 283 cdev_put(inode->i_cdev);
286 fops_put(file->f_op); 284 fops_put(file->f_op);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 5971359d2090..4dcddf83326f 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -8,6 +8,8 @@ config GFS2_FS
8 select FS_POSIX_ACL 8 select FS_POSIX_ACL
9 select CRC32 9 select CRC32
10 select SLOW_WORK 10 select SLOW_WORK
11 select QUOTA
12 select QUOTACTL
11 help 13 help
12 A cluster filesystem. 14 A cluster filesystem.
13 15
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 3fc4e3ac7d84..3eb1ea846173 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -12,6 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/completion.h> 13#include <linux/completion.h>
14#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
15#include <linux/xattr.h>
15#include <linux/posix_acl.h> 16#include <linux/posix_acl.h>
16#include <linux/posix_acl_xattr.h> 17#include <linux/posix_acl_xattr.h>
17#include <linux/gfs2_ondisk.h> 18#include <linux/gfs2_ondisk.h>
@@ -26,108 +27,44 @@
26#include "trans.h" 27#include "trans.h"
27#include "util.h" 28#include "util.h"
28 29
29#define ACL_ACCESS 1 30static const char *gfs2_acl_name(int type)
30#define ACL_DEFAULT 0
31
32int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
33 struct gfs2_ea_request *er, int *remove, mode_t *mode)
34{ 31{
35 struct posix_acl *acl; 32 switch (type) {
36 int error; 33 case ACL_TYPE_ACCESS:
37 34 return GFS2_POSIX_ACL_ACCESS;
38 error = gfs2_acl_validate_remove(ip, access); 35 case ACL_TYPE_DEFAULT:
39 if (error) 36 return GFS2_POSIX_ACL_DEFAULT;
40 return error;
41
42 if (!er->er_data)
43 return -EINVAL;
44
45 acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
46 if (IS_ERR(acl))
47 return PTR_ERR(acl);
48 if (!acl) {
49 *remove = 1;
50 return 0;
51 }
52
53 error = posix_acl_valid(acl);
54 if (error)
55 goto out;
56
57 if (access) {
58 error = posix_acl_equiv_mode(acl, mode);
59 if (!error)
60 *remove = 1;
61 else if (error > 0)
62 error = 0;
63 } 37 }
64 38 return NULL;
65out:
66 posix_acl_release(acl);
67 return error;
68}
69
70int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
71{
72 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
73 return -EOPNOTSUPP;
74 if (!is_owner_or_cap(&ip->i_inode))
75 return -EPERM;
76 if (S_ISLNK(ip->i_inode.i_mode))
77 return -EOPNOTSUPP;
78 if (!access && !S_ISDIR(ip->i_inode.i_mode))
79 return -EACCES;
80
81 return 0;
82} 39}
83 40
84static int acl_get(struct gfs2_inode *ip, const char *name, 41static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
85 struct posix_acl **acl, struct gfs2_ea_location *el,
86 char **datap, unsigned int *lenp)
87{ 42{
43 struct posix_acl *acl;
44 const char *name;
88 char *data; 45 char *data;
89 unsigned int len; 46 int len;
90 int error;
91
92 el->el_bh = NULL;
93 47
94 if (!ip->i_eattr) 48 if (!ip->i_eattr)
95 return 0; 49 return NULL;
96
97 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, el);
98 if (error)
99 return error;
100 if (!el->el_ea)
101 return 0;
102 if (!GFS2_EA_DATA_LEN(el->el_ea))
103 goto out;
104 50
105 len = GFS2_EA_DATA_LEN(el->el_ea); 51 acl = get_cached_acl(&ip->i_inode, type);
106 data = kmalloc(len, GFP_NOFS); 52 if (acl != ACL_NOT_CACHED)
107 error = -ENOMEM; 53 return acl;
108 if (!data)
109 goto out;
110 54
111 error = gfs2_ea_get_copy(ip, el, data, len); 55 name = gfs2_acl_name(type);
112 if (error < 0) 56 if (name == NULL)
113 goto out_kfree; 57 return ERR_PTR(-EINVAL);
114 error = 0;
115 58
116 if (acl) { 59 len = gfs2_xattr_acl_get(ip, name, &data);
117 *acl = posix_acl_from_xattr(data, len); 60 if (len < 0)
118 if (IS_ERR(*acl)) 61 return ERR_PTR(len);
119 error = PTR_ERR(*acl); 62 if (len == 0)
120 } 63 return NULL;
121 64
122out_kfree: 65 acl = posix_acl_from_xattr(data, len);
123 if (error || !datap) { 66 kfree(data);
124 kfree(data); 67 return acl;
125 } else {
126 *datap = data;
127 *lenp = len;
128 }
129out:
130 return error;
131} 68}
132 69
133/** 70/**
@@ -140,14 +77,12 @@ out:
140 77
141int gfs2_check_acl(struct inode *inode, int mask) 78int gfs2_check_acl(struct inode *inode, int mask)
142{ 79{
143 struct gfs2_ea_location el; 80 struct posix_acl *acl;
144 struct posix_acl *acl = NULL;
145 int error; 81 int error;
146 82
147 error = acl_get(GFS2_I(inode), GFS2_POSIX_ACL_ACCESS, &acl, &el, NULL, NULL); 83 acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS);
148 brelse(el.el_bh); 84 if (IS_ERR(acl))
149 if (error) 85 return PTR_ERR(acl);
150 return error;
151 86
152 if (acl) { 87 if (acl) {
153 error = posix_acl_permission(inode, acl, mask); 88 error = posix_acl_permission(inode, acl, mask);
@@ -158,57 +93,75 @@ int gfs2_check_acl(struct inode *inode, int mask)
158 return -EAGAIN; 93 return -EAGAIN;
159} 94}
160 95
161static int munge_mode(struct gfs2_inode *ip, mode_t mode) 96static int gfs2_set_mode(struct inode *inode, mode_t mode)
162{ 97{
163 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 98 int error = 0;
164 struct buffer_head *dibh;
165 int error;
166 99
167 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 100 if (mode != inode->i_mode) {
168 if (error) 101 struct iattr iattr;
169 return error;
170 102
171 error = gfs2_meta_inode_buffer(ip, &dibh); 103 iattr.ia_valid = ATTR_MODE;
172 if (!error) { 104 iattr.ia_mode = mode;
173 gfs2_assert_withdraw(sdp, 105
174 (ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT)); 106 error = gfs2_setattr_simple(GFS2_I(inode), &iattr);
175 ip->i_inode.i_mode = mode;
176 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
177 gfs2_dinode_out(ip, dibh->b_data);
178 brelse(dibh);
179 } 107 }
180 108
181 gfs2_trans_end(sdp); 109 return error;
110}
111
112static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl)
113{
114 int error;
115 int len;
116 char *data;
117 const char *name = gfs2_acl_name(type);
182 118
183 return 0; 119 BUG_ON(name == NULL);
120 len = posix_acl_to_xattr(acl, NULL, 0);
121 if (len == 0)
122 return 0;
123 data = kmalloc(len, GFP_NOFS);
124 if (data == NULL)
125 return -ENOMEM;
126 error = posix_acl_to_xattr(acl, data, len);
127 if (error < 0)
128 goto out;
129 error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, data, len, 0);
130 if (!error)
131 set_cached_acl(inode, type, acl);
132out:
133 kfree(data);
134 return error;
184} 135}
185 136
186int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip) 137int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
187{ 138{
188 struct gfs2_ea_location el;
189 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 139 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
190 struct posix_acl *acl = NULL, *clone; 140 struct posix_acl *acl, *clone;
191 mode_t mode = ip->i_inode.i_mode; 141 mode_t mode = inode->i_mode;
192 char *data = NULL; 142 int error = 0;
193 unsigned int len;
194 int error;
195 143
196 if (!sdp->sd_args.ar_posix_acl) 144 if (!sdp->sd_args.ar_posix_acl)
197 return 0; 145 return 0;
198 if (S_ISLNK(ip->i_inode.i_mode)) 146 if (S_ISLNK(inode->i_mode))
199 return 0; 147 return 0;
200 148
201 error = acl_get(dip, GFS2_POSIX_ACL_DEFAULT, &acl, &el, &data, &len); 149 acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT);
202 brelse(el.el_bh); 150 if (IS_ERR(acl))
203 if (error) 151 return PTR_ERR(acl);
204 return error;
205 if (!acl) { 152 if (!acl) {
206 mode &= ~current_umask(); 153 mode &= ~current_umask();
207 if (mode != ip->i_inode.i_mode) 154 if (mode != inode->i_mode)
208 error = munge_mode(ip, mode); 155 error = gfs2_set_mode(inode, mode);
209 return error; 156 return error;
210 } 157 }
211 158
159 if (S_ISDIR(inode->i_mode)) {
160 error = gfs2_acl_set(inode, ACL_TYPE_DEFAULT, acl);
161 if (error)
162 goto out;
163 }
164
212 clone = posix_acl_clone(acl, GFP_NOFS); 165 clone = posix_acl_clone(acl, GFP_NOFS);
213 error = -ENOMEM; 166 error = -ENOMEM;
214 if (!clone) 167 if (!clone)
@@ -216,43 +169,32 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
216 posix_acl_release(acl); 169 posix_acl_release(acl);
217 acl = clone; 170 acl = clone;
218 171
219 if (S_ISDIR(ip->i_inode.i_mode)) {
220 error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
221 GFS2_POSIX_ACL_DEFAULT, data, len, 0);
222 if (error)
223 goto out;
224 }
225
226 error = posix_acl_create_masq(acl, &mode); 172 error = posix_acl_create_masq(acl, &mode);
227 if (error < 0) 173 if (error < 0)
228 goto out; 174 goto out;
229 if (error == 0) 175 if (error == 0)
230 goto munge; 176 goto munge;
231 177
232 posix_acl_to_xattr(acl, data, len); 178 error = gfs2_acl_set(inode, ACL_TYPE_ACCESS, acl);
233 error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
234 GFS2_POSIX_ACL_ACCESS, data, len, 0);
235 if (error) 179 if (error)
236 goto out; 180 goto out;
237munge: 181munge:
238 error = munge_mode(ip, mode); 182 error = gfs2_set_mode(inode, mode);
239out: 183out:
240 posix_acl_release(acl); 184 posix_acl_release(acl);
241 kfree(data);
242 return error; 185 return error;
243} 186}
244 187
245int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr) 188int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
246{ 189{
247 struct posix_acl *acl = NULL, *clone; 190 struct posix_acl *acl, *clone;
248 struct gfs2_ea_location el;
249 char *data; 191 char *data;
250 unsigned int len; 192 unsigned int len;
251 int error; 193 int error;
252 194
253 error = acl_get(ip, GFS2_POSIX_ACL_ACCESS, &acl, &el, &data, &len); 195 acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS);
254 if (error) 196 if (IS_ERR(acl))
255 goto out_brelse; 197 return PTR_ERR(acl);
256 if (!acl) 198 if (!acl)
257 return gfs2_setattr_simple(ip, attr); 199 return gfs2_setattr_simple(ip, attr);
258 200
@@ -265,15 +207,134 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
265 207
266 error = posix_acl_chmod_masq(acl, attr->ia_mode); 208 error = posix_acl_chmod_masq(acl, attr->ia_mode);
267 if (!error) { 209 if (!error) {
210 len = posix_acl_to_xattr(acl, NULL, 0);
211 data = kmalloc(len, GFP_NOFS);
212 error = -ENOMEM;
213 if (data == NULL)
214 goto out;
268 posix_acl_to_xattr(acl, data, len); 215 posix_acl_to_xattr(acl, data, len);
269 error = gfs2_ea_acl_chmod(ip, &el, attr, data); 216 error = gfs2_xattr_acl_chmod(ip, attr, data);
217 kfree(data);
218 set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl);
270 } 219 }
271 220
272out: 221out:
273 posix_acl_release(acl); 222 posix_acl_release(acl);
274 kfree(data);
275out_brelse:
276 brelse(el.el_bh);
277 return error; 223 return error;
278} 224}
279 225
226static int gfs2_acl_type(const char *name)
227{
228 if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0)
229 return ACL_TYPE_ACCESS;
230 if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0)
231 return ACL_TYPE_DEFAULT;
232 return -EINVAL;
233}
234
235static int gfs2_xattr_system_get(struct inode *inode, const char *name,
236 void *buffer, size_t size)
237{
238 struct posix_acl *acl;
239 int type;
240 int error;
241
242 type = gfs2_acl_type(name);
243 if (type < 0)
244 return type;
245
246 acl = gfs2_acl_get(GFS2_I(inode), type);
247 if (IS_ERR(acl))
248 return PTR_ERR(acl);
249 if (acl == NULL)
250 return -ENODATA;
251
252 error = posix_acl_to_xattr(acl, buffer, size);
253 posix_acl_release(acl);
254
255 return error;
256}
257
258static int gfs2_xattr_system_set(struct inode *inode, const char *name,
259 const void *value, size_t size, int flags)
260{
261 struct gfs2_sbd *sdp = GFS2_SB(inode);
262 struct posix_acl *acl = NULL;
263 int error = 0, type;
264
265 if (!sdp->sd_args.ar_posix_acl)
266 return -EOPNOTSUPP;
267
268 type = gfs2_acl_type(name);
269 if (type < 0)
270 return type;
271 if (flags & XATTR_CREATE)
272 return -EINVAL;
273 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
274 return value ? -EACCES : 0;
275 if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
276 return -EPERM;
277 if (S_ISLNK(inode->i_mode))
278 return -EOPNOTSUPP;
279
280 if (!value)
281 goto set_acl;
282
283 acl = posix_acl_from_xattr(value, size);
284 if (!acl) {
285 /*
286 * acl_set_file(3) may request that we set default ACLs with
287 * zero length -- defend (gracefully) against that here.
288 */
289 goto out;
290 }
291 if (IS_ERR(acl)) {
292 error = PTR_ERR(acl);
293 goto out;
294 }
295
296 error = posix_acl_valid(acl);
297 if (error)
298 goto out_release;
299
300 error = -EINVAL;
301 if (acl->a_count > GFS2_ACL_MAX_ENTRIES)
302 goto out_release;
303
304 if (type == ACL_TYPE_ACCESS) {
305 mode_t mode = inode->i_mode;
306 error = posix_acl_equiv_mode(acl, &mode);
307
308 if (error <= 0) {
309 posix_acl_release(acl);
310 acl = NULL;
311
312 if (error < 0)
313 return error;
314 }
315
316 error = gfs2_set_mode(inode, mode);
317 if (error)
318 goto out_release;
319 }
320
321set_acl:
322 error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0);
323 if (!error) {
324 if (acl)
325 set_cached_acl(inode, type, acl);
326 else
327 forget_cached_acl(inode, type);
328 }
329out_release:
330 posix_acl_release(acl);
331out:
332 return error;
333}
334
335struct xattr_handler gfs2_xattr_system_handler = {
336 .prefix = XATTR_SYSTEM_PREFIX,
337 .get = gfs2_xattr_system_get,
338 .set = gfs2_xattr_system_set,
339};
340
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 6751930bfb64..9306a2e6620c 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -13,26 +13,12 @@
13#include "incore.h" 13#include "incore.h"
14 14
15#define GFS2_POSIX_ACL_ACCESS "posix_acl_access" 15#define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
16#define GFS2_POSIX_ACL_ACCESS_LEN 16
17#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default" 16#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
18#define GFS2_POSIX_ACL_DEFAULT_LEN 17 17#define GFS2_ACL_MAX_ENTRIES 25
19 18
20#define GFS2_ACL_IS_ACCESS(name, len) \ 19extern int gfs2_check_acl(struct inode *inode, int mask);
21 ((len) == GFS2_POSIX_ACL_ACCESS_LEN && \ 20extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
22 !memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len))) 21extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
23 22extern struct xattr_handler gfs2_xattr_system_handler;
24#define GFS2_ACL_IS_DEFAULT(name, len) \
25 ((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
26 !memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
27
28struct gfs2_ea_request;
29
30int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
31 struct gfs2_ea_request *er,
32 int *remove, mode_t *mode);
33int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
34int gfs2_check_acl(struct inode *inode, int mask);
35int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
36int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
37 23
38#endif /* __ACL_DOT_H__ */ 24#endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 694b5d48f036..7b8da9415267 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -269,7 +269,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
269 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 269 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
270 unsigned offset = i_size & (PAGE_CACHE_SIZE-1); 270 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
271 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); 271 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
272 struct backing_dev_info *bdi = mapping->backing_dev_info;
273 int i; 272 int i;
274 int ret; 273 int ret;
275 274
@@ -313,11 +312,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
313 312
314 if (ret || (--(wbc->nr_to_write) <= 0)) 313 if (ret || (--(wbc->nr_to_write) <= 0))
315 ret = 1; 314 ret = 1;
316 if (wbc->nonblocking && bdi_write_congested(bdi)) {
317 wbc->encountered_congestion = 1;
318 ret = 1;
319 }
320
321 } 315 }
322 gfs2_trans_end(sdp); 316 gfs2_trans_end(sdp);
323 return ret; 317 return ret;
@@ -338,7 +332,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
338static int gfs2_write_cache_jdata(struct address_space *mapping, 332static int gfs2_write_cache_jdata(struct address_space *mapping,
339 struct writeback_control *wbc) 333 struct writeback_control *wbc)
340{ 334{
341 struct backing_dev_info *bdi = mapping->backing_dev_info;
342 int ret = 0; 335 int ret = 0;
343 int done = 0; 336 int done = 0;
344 struct pagevec pvec; 337 struct pagevec pvec;
@@ -348,11 +341,6 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
348 int scanned = 0; 341 int scanned = 0;
349 int range_whole = 0; 342 int range_whole = 0;
350 343
351 if (wbc->nonblocking && bdi_write_congested(bdi)) {
352 wbc->encountered_congestion = 1;
353 return 0;
354 }
355
356 pagevec_init(&pvec, 0); 344 pagevec_init(&pvec, 0);
357 if (wbc->range_cyclic) { 345 if (wbc->range_cyclic) {
358 index = mapping->writeback_index; /* Start from prev offset */ 346 index = mapping->writeback_index; /* Start from prev offset */
@@ -819,8 +807,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
819 mark_inode_dirty(inode); 807 mark_inode_dirty(inode);
820 } 808 }
821 809
822 if (inode == sdp->sd_rindex) 810 if (inode == sdp->sd_rindex) {
823 adjust_fs_space(inode); 811 adjust_fs_space(inode);
812 ip->i_gh.gh_flags |= GL_NOCACHE;
813 }
824 814
825 brelse(dibh); 815 brelse(dibh);
826 gfs2_trans_end(sdp); 816 gfs2_trans_end(sdp);
@@ -889,8 +879,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
889 mark_inode_dirty(inode); 879 mark_inode_dirty(inode);
890 } 880 }
891 881
892 if (inode == sdp->sd_rindex) 882 if (inode == sdp->sd_rindex) {
893 adjust_fs_space(inode); 883 adjust_fs_space(inode);
884 ip->i_gh.gh_flags |= GL_NOCACHE;
885 }
894 886
895 brelse(dibh); 887 brelse(dibh);
896 gfs2_trans_end(sdp); 888 gfs2_trans_end(sdp);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 297d7e5cebad..25fddc100f18 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -525,38 +525,6 @@ consist_inode:
525 return ERR_PTR(-EIO); 525 return ERR_PTR(-EIO);
526} 526}
527 527
528
529/**
530 * dirent_first - Return the first dirent
531 * @dip: the directory
532 * @bh: The buffer
533 * @dent: Pointer to list of dirents
534 *
535 * return first dirent whether bh points to leaf or stuffed dinode
536 *
537 * Returns: IS_LEAF, IS_DINODE, or -errno
538 */
539
540static int dirent_first(struct gfs2_inode *dip, struct buffer_head *bh,
541 struct gfs2_dirent **dent)
542{
543 struct gfs2_meta_header *h = (struct gfs2_meta_header *)bh->b_data;
544
545 if (be32_to_cpu(h->mh_type) == GFS2_METATYPE_LF) {
546 if (gfs2_meta_check(GFS2_SB(&dip->i_inode), bh))
547 return -EIO;
548 *dent = (struct gfs2_dirent *)(bh->b_data +
549 sizeof(struct gfs2_leaf));
550 return IS_LEAF;
551 } else {
552 if (gfs2_metatype_check(GFS2_SB(&dip->i_inode), bh, GFS2_METATYPE_DI))
553 return -EIO;
554 *dent = (struct gfs2_dirent *)(bh->b_data +
555 sizeof(struct gfs2_dinode));
556 return IS_DINODE;
557 }
558}
559
560static int dirent_check_reclen(struct gfs2_inode *dip, 528static int dirent_check_reclen(struct gfs2_inode *dip,
561 const struct gfs2_dirent *d, const void *end_p) 529 const struct gfs2_dirent *d, const void *end_p)
562{ 530{
@@ -1006,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1006 divider = (start + half_len) << (32 - dip->i_depth); 974 divider = (start + half_len) << (32 - dip->i_depth);
1007 975
1008 /* Copy the entries */ 976 /* Copy the entries */
1009 dirent_first(dip, obh, &dent); 977 dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf));
1010 978
1011 do { 979 do {
1012 next = dent; 980 next = dent;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 8b674b1f3a55..f455a03a09e2 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl)
241 int rv = 0; 241 int rv = 0;
242 242
243 write_lock(gl_lock_addr(gl->gl_hash)); 243 write_lock(gl_lock_addr(gl->gl_hash));
244 if (atomic_dec_and_test(&gl->gl_ref)) { 244 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
245 hlist_del(&gl->gl_list); 245 hlist_del(&gl->gl_list);
246 write_unlock(gl_lock_addr(gl->gl_hash));
247 spin_lock(&lru_lock);
248 if (!list_empty(&gl->gl_lru)) { 246 if (!list_empty(&gl->gl_lru)) {
249 list_del_init(&gl->gl_lru); 247 list_del_init(&gl->gl_lru);
250 atomic_dec(&lru_count); 248 atomic_dec(&lru_count);
251 } 249 }
252 spin_unlock(&lru_lock); 250 spin_unlock(&lru_lock);
251 write_unlock(gl_lock_addr(gl->gl_hash));
253 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 252 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
254 glock_free(gl); 253 glock_free(gl);
255 rv = 1; 254 rv = 1;
@@ -513,7 +512,6 @@ retry:
513 GLOCK_BUG_ON(gl, 1); 512 GLOCK_BUG_ON(gl, 1);
514 } 513 }
515 spin_unlock(&gl->gl_spin); 514 spin_unlock(&gl->gl_spin);
516 gfs2_glock_put(gl);
517 return; 515 return;
518 } 516 }
519 517
@@ -524,8 +522,6 @@ retry:
524 if (glops->go_xmote_bh) { 522 if (glops->go_xmote_bh) {
525 spin_unlock(&gl->gl_spin); 523 spin_unlock(&gl->gl_spin);
526 rv = glops->go_xmote_bh(gl, gh); 524 rv = glops->go_xmote_bh(gl, gh);
527 if (rv == -EAGAIN)
528 return;
529 spin_lock(&gl->gl_spin); 525 spin_lock(&gl->gl_spin);
530 if (rv) { 526 if (rv) {
531 do_error(gl, rv); 527 do_error(gl, rv);
@@ -540,7 +536,6 @@ out:
540 clear_bit(GLF_LOCK, &gl->gl_flags); 536 clear_bit(GLF_LOCK, &gl->gl_flags);
541out_locked: 537out_locked:
542 spin_unlock(&gl->gl_spin); 538 spin_unlock(&gl->gl_spin);
543 gfs2_glock_put(gl);
544} 539}
545 540
546static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, 541static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin)
600 595
601 if (!(ret & LM_OUT_ASYNC)) { 596 if (!(ret & LM_OUT_ASYNC)) {
602 finish_xmote(gl, ret); 597 finish_xmote(gl, ret);
603 gfs2_glock_hold(gl);
604 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 598 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
605 gfs2_glock_put(gl); 599 gfs2_glock_put(gl);
606 } else { 600 } else {
@@ -672,12 +666,17 @@ out:
672 return; 666 return;
673 667
674out_sched: 668out_sched:
669 clear_bit(GLF_LOCK, &gl->gl_flags);
670 smp_mb__after_clear_bit();
675 gfs2_glock_hold(gl); 671 gfs2_glock_hold(gl);
676 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 672 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
677 gfs2_glock_put_nolock(gl); 673 gfs2_glock_put_nolock(gl);
674 return;
675
678out_unlock: 676out_unlock:
679 clear_bit(GLF_LOCK, &gl->gl_flags); 677 clear_bit(GLF_LOCK, &gl->gl_flags);
680 goto out; 678 smp_mb__after_clear_bit();
679 return;
681} 680}
682 681
683static void delete_work_func(struct work_struct *work) 682static void delete_work_func(struct work_struct *work)
@@ -707,9 +706,12 @@ static void glock_work_func(struct work_struct *work)
707{ 706{
708 unsigned long delay = 0; 707 unsigned long delay = 0;
709 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 708 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
709 int drop_ref = 0;
710 710
711 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 711 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
712 finish_xmote(gl, gl->gl_reply); 712 finish_xmote(gl, gl->gl_reply);
713 drop_ref = 1;
714 }
713 down_read(&gfs2_umount_flush_sem); 715 down_read(&gfs2_umount_flush_sem);
714 spin_lock(&gl->gl_spin); 716 spin_lock(&gl->gl_spin);
715 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 717 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -727,6 +729,8 @@ static void glock_work_func(struct work_struct *work)
727 if (!delay || 729 if (!delay ||
728 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 730 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
729 gfs2_glock_put(gl); 731 gfs2_glock_put(gl);
732 if (drop_ref)
733 gfs2_glock_put(gl);
730} 734}
731 735
732/** 736/**
@@ -1361,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1361 list_del_init(&gl->gl_lru); 1365 list_del_init(&gl->gl_lru);
1362 atomic_dec(&lru_count); 1366 atomic_dec(&lru_count);
1363 1367
1364 /* Check if glock is about to be freed */
1365 if (atomic_read(&gl->gl_ref) == 0)
1366 continue;
1367
1368 /* Test for being demotable */ 1368 /* Test for being demotable */
1369 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1369 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1370 gfs2_glock_hold(gl); 1370 gfs2_glock_hold(gl);
@@ -1375,10 +1375,11 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1375 handle_callback(gl, LM_ST_UNLOCKED, 0); 1375 handle_callback(gl, LM_ST_UNLOCKED, 0);
1376 nr--; 1376 nr--;
1377 } 1377 }
1378 clear_bit(GLF_LOCK, &gl->gl_flags);
1379 smp_mb__after_clear_bit();
1378 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1380 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1379 gfs2_glock_put_nolock(gl); 1381 gfs2_glock_put_nolock(gl);
1380 spin_unlock(&gl->gl_spin); 1382 spin_unlock(&gl->gl_spin);
1381 clear_bit(GLF_LOCK, &gl->gl_flags);
1382 spin_lock(&lru_lock); 1383 spin_lock(&lru_lock);
1383 continue; 1384 continue;
1384 } 1385 }
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c609894ec0d0..13f0bd228132 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -180,15 +180,6 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
180 return gl->gl_state == LM_ST_SHARED; 180 return gl->gl_state == LM_ST_SHARED;
181} 181}
182 182
183static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
184{
185 int ret;
186 spin_lock(&gl->gl_spin);
187 ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
188 spin_unlock(&gl->gl_spin);
189 return ret;
190}
191
192int gfs2_glock_get(struct gfs2_sbd *sdp, 183int gfs2_glock_get(struct gfs2_sbd *sdp,
193 u64 number, const struct gfs2_glock_operations *glops, 184 u64 number, const struct gfs2_glock_operations *glops,
194 int create, struct gfs2_glock **glp); 185 int create, struct gfs2_glock **glp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 6985eef06c39..78554acc0605 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -13,6 +13,7 @@
13#include <linux/buffer_head.h> 13#include <linux/buffer_head.h>
14#include <linux/gfs2_ondisk.h> 14#include <linux/gfs2_ondisk.h>
15#include <linux/bio.h> 15#include <linux/bio.h>
16#include <linux/posix_acl.h>
16 17
17#include "gfs2.h" 18#include "gfs2.h"
18#include "incore.h" 19#include "incore.h"
@@ -184,8 +185,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
184 if (flags & DIO_METADATA) { 185 if (flags & DIO_METADATA) {
185 struct address_space *mapping = gl->gl_aspace->i_mapping; 186 struct address_space *mapping = gl->gl_aspace->i_mapping;
186 truncate_inode_pages(mapping, 0); 187 truncate_inode_pages(mapping, 0);
187 if (ip) 188 if (ip) {
188 set_bit(GIF_INVALID, &ip->i_flags); 189 set_bit(GIF_INVALID, &ip->i_flags);
190 forget_all_cached_acls(&ip->i_inode);
191 }
189 } 192 }
190 193
191 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) 194 if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 6edb423f90b3..4792200978c8 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -429,7 +429,11 @@ struct gfs2_args {
429 unsigned int ar_meta:1; /* mount metafs */ 429 unsigned int ar_meta:1; /* mount metafs */
430 unsigned int ar_discard:1; /* discard requests */ 430 unsigned int ar_discard:1; /* discard requests */
431 unsigned int ar_errors:2; /* errors=withdraw | panic */ 431 unsigned int ar_errors:2; /* errors=withdraw | panic */
432 unsigned int ar_nobarrier:1; /* do not send barriers */
432 int ar_commit; /* Commit interval */ 433 int ar_commit; /* Commit interval */
434 int ar_statfs_quantum; /* The fast statfs interval */
435 int ar_quota_quantum; /* The quota interval */
436 int ar_statfs_percent; /* The % change to force sync */
433}; 437};
434 438
435struct gfs2_tune { 439struct gfs2_tune {
@@ -558,6 +562,7 @@ struct gfs2_sbd {
558 spinlock_t sd_statfs_spin; 562 spinlock_t sd_statfs_spin;
559 struct gfs2_statfs_change_host sd_statfs_master; 563 struct gfs2_statfs_change_host sd_statfs_master;
560 struct gfs2_statfs_change_host sd_statfs_local; 564 struct gfs2_statfs_change_host sd_statfs_local;
565 int sd_statfs_force_sync;
561 566
562 /* Resource group stuff */ 567 /* Resource group stuff */
563 568
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index fb15d3b1f409..26ba2a4c4a2d 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -871,7 +871,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
871 if (error) 871 if (error)
872 goto fail_gunlock2; 872 goto fail_gunlock2;
873 873
874 error = gfs2_acl_create(dip, GFS2_I(inode)); 874 error = gfs2_acl_create(dip, inode);
875 if (error) 875 if (error)
876 goto fail_gunlock2; 876 goto fail_gunlock2;
877 877
@@ -947,9 +947,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
947 947
948 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 948 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
949 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); 949 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
950 str->di_header.__pad0 = 0;
951 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); 950 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
952 str->di_header.__pad1 = 0;
953 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 951 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
954 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 952 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
955 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); 953 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 13c6237c5f67..4511b08fc451 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -596,7 +596,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
596 memset(lh, 0, sizeof(struct gfs2_log_header)); 596 memset(lh, 0, sizeof(struct gfs2_log_header));
597 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 597 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
598 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 598 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
599 lh->lh_header.__pad0 = cpu_to_be64(0);
599 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 600 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
601 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
600 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); 602 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
601 lh->lh_flags = cpu_to_be32(flags); 603 lh->lh_flags = cpu_to_be32(flags);
602 lh->lh_tail = cpu_to_be32(tail); 604 lh->lh_tail = cpu_to_be32(tail);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 9969ff062c5b..de97632ba32f 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -132,6 +132,7 @@ static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
132static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 132static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
133{ 133{
134 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 134 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
135 struct gfs2_meta_header *mh;
135 struct gfs2_trans *tr; 136 struct gfs2_trans *tr;
136 137
137 lock_buffer(bd->bd_bh); 138 lock_buffer(bd->bd_bh);
@@ -148,6 +149,9 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
148 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); 149 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
149 gfs2_meta_check(sdp, bd->bd_bh); 150 gfs2_meta_check(sdp, bd->bd_bh);
150 gfs2_pin(sdp, bd->bd_bh); 151 gfs2_pin(sdp, bd->bd_bh);
152 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
153 mh->__pad0 = cpu_to_be64(0);
154 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
151 sdp->sd_log_num_buf++; 155 sdp->sd_log_num_buf++;
152 list_add(&le->le_list, &sdp->sd_log_le_buf); 156 list_add(&le->le_list, &sdp->sd_log_le_buf);
153 tr->tr_num_buf_new++; 157 tr->tr_num_buf_new++;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 52fb6c048981..edfee24f3636 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -18,6 +18,7 @@
18#include <linux/mount.h> 18#include <linux/mount.h>
19#include <linux/gfs2_ondisk.h> 19#include <linux/gfs2_ondisk.h>
20#include <linux/slow-work.h> 20#include <linux/slow-work.h>
21#include <linux/quotaops.h>
21 22
22#include "gfs2.h" 23#include "gfs2.h"
23#include "incore.h" 24#include "incore.h"
@@ -62,13 +63,10 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
62 gt->gt_quota_warn_period = 10; 63 gt->gt_quota_warn_period = 10;
63 gt->gt_quota_scale_num = 1; 64 gt->gt_quota_scale_num = 1;
64 gt->gt_quota_scale_den = 1; 65 gt->gt_quota_scale_den = 1;
65 gt->gt_quota_quantum = 60;
66 gt->gt_new_files_jdata = 0; 66 gt->gt_new_files_jdata = 0;
67 gt->gt_max_readahead = 1 << 18; 67 gt->gt_max_readahead = 1 << 18;
68 gt->gt_stall_secs = 600; 68 gt->gt_stall_secs = 600;
69 gt->gt_complain_secs = 10; 69 gt->gt_complain_secs = 10;
70 gt->gt_statfs_quantum = 30;
71 gt->gt_statfs_slow = 0;
72} 70}
73 71
74static struct gfs2_sbd *init_sbd(struct super_block *sb) 72static struct gfs2_sbd *init_sbd(struct super_block *sb)
@@ -1114,7 +1112,7 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
1114 * Returns: errno 1112 * Returns: errno
1115 */ 1113 */
1116 1114
1117static int fill_super(struct super_block *sb, void *data, int silent) 1115static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
1118{ 1116{
1119 struct gfs2_sbd *sdp; 1117 struct gfs2_sbd *sdp;
1120 struct gfs2_holder mount_gh; 1118 struct gfs2_holder mount_gh;
@@ -1125,17 +1123,7 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1125 printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n"); 1123 printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n");
1126 return -ENOMEM; 1124 return -ENOMEM;
1127 } 1125 }
1128 1126 sdp->sd_args = *args;
1129 sdp->sd_args.ar_quota = GFS2_QUOTA_DEFAULT;
1130 sdp->sd_args.ar_data = GFS2_DATA_DEFAULT;
1131 sdp->sd_args.ar_commit = 60;
1132 sdp->sd_args.ar_errors = GFS2_ERRORS_DEFAULT;
1133
1134 error = gfs2_mount_args(sdp, &sdp->sd_args, data);
1135 if (error) {
1136 printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
1137 goto fail;
1138 }
1139 1127
1140 if (sdp->sd_args.ar_spectator) { 1128 if (sdp->sd_args.ar_spectator) {
1141 sb->s_flags |= MS_RDONLY; 1129 sb->s_flags |= MS_RDONLY;
@@ -1143,11 +1131,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1143 } 1131 }
1144 if (sdp->sd_args.ar_posix_acl) 1132 if (sdp->sd_args.ar_posix_acl)
1145 sb->s_flags |= MS_POSIXACL; 1133 sb->s_flags |= MS_POSIXACL;
1134 if (sdp->sd_args.ar_nobarrier)
1135 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1146 1136
1147 sb->s_magic = GFS2_MAGIC; 1137 sb->s_magic = GFS2_MAGIC;
1148 sb->s_op = &gfs2_super_ops; 1138 sb->s_op = &gfs2_super_ops;
1149 sb->s_export_op = &gfs2_export_ops; 1139 sb->s_export_op = &gfs2_export_ops;
1150 sb->s_xattr = gfs2_xattr_handlers; 1140 sb->s_xattr = gfs2_xattr_handlers;
1141 sb->s_qcop = &gfs2_quotactl_ops;
1142 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1151 sb->s_time_gran = 1; 1143 sb->s_time_gran = 1;
1152 sb->s_maxbytes = MAX_LFS_FILESIZE; 1144 sb->s_maxbytes = MAX_LFS_FILESIZE;
1153 1145
@@ -1160,6 +1152,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1160 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; 1152 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
1161 1153
1162 sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit; 1154 sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit;
1155 sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
1156 if (sdp->sd_args.ar_statfs_quantum) {
1157 sdp->sd_tune.gt_statfs_slow = 0;
1158 sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
1159 }
1160 else {
1161 sdp->sd_tune.gt_statfs_slow = 1;
1162 sdp->sd_tune.gt_statfs_quantum = 30;
1163 }
1163 1164
1164 error = init_names(sdp, silent); 1165 error = init_names(sdp, silent);
1165 if (error) 1166 if (error)
@@ -1243,18 +1244,127 @@ fail:
1243 return error; 1244 return error;
1244} 1245}
1245 1246
1246static int gfs2_get_sb(struct file_system_type *fs_type, int flags, 1247static int set_gfs2_super(struct super_block *s, void *data)
1247 const char *dev_name, void *data, struct vfsmount *mnt)
1248{ 1248{
1249 return get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt); 1249 s->s_bdev = data;
1250 s->s_dev = s->s_bdev->bd_dev;
1251
1252 /*
1253 * We set the bdi here to the queue backing, file systems can
1254 * overwrite this in ->fill_super()
1255 */
1256 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
1257 return 0;
1250} 1258}
1251 1259
1252static int test_meta_super(struct super_block *s, void *ptr) 1260static int test_gfs2_super(struct super_block *s, void *ptr)
1253{ 1261{
1254 struct block_device *bdev = ptr; 1262 struct block_device *bdev = ptr;
1255 return (bdev == s->s_bdev); 1263 return (bdev == s->s_bdev);
1256} 1264}
1257 1265
1266/**
1267 * gfs2_get_sb - Get the GFS2 superblock
1268 * @fs_type: The GFS2 filesystem type
1269 * @flags: Mount flags
1270 * @dev_name: The name of the device
1271 * @data: The mount arguments
1272 * @mnt: The vfsmnt for this mount
1273 *
1274 * Q. Why not use get_sb_bdev() ?
1275 * A. We need to select one of two root directories to mount, independent
1276 * of whether this is the initial, or subsequent, mount of this sb
1277 *
1278 * Returns: 0 or -ve on error
1279 */
1280
1281static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
1282 const char *dev_name, void *data, struct vfsmount *mnt)
1283{
1284 struct block_device *bdev;
1285 struct super_block *s;
1286 fmode_t mode = FMODE_READ;
1287 int error;
1288 struct gfs2_args args;
1289 struct gfs2_sbd *sdp;
1290
1291 if (!(flags & MS_RDONLY))
1292 mode |= FMODE_WRITE;
1293
1294 bdev = open_bdev_exclusive(dev_name, mode, fs_type);
1295 if (IS_ERR(bdev))
1296 return PTR_ERR(bdev);
1297
1298 /*
1299 * once the super is inserted into the list by sget, s_umount
1300 * will protect the lockfs code from trying to start a snapshot
1301 * while we are mounting
1302 */
1303 mutex_lock(&bdev->bd_fsfreeze_mutex);
1304 if (bdev->bd_fsfreeze_count > 0) {
1305 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1306 error = -EBUSY;
1307 goto error_bdev;
1308 }
1309 s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev);
1310 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1311 error = PTR_ERR(s);
1312 if (IS_ERR(s))
1313 goto error_bdev;
1314
1315 memset(&args, 0, sizeof(args));
1316 args.ar_quota = GFS2_QUOTA_DEFAULT;
1317 args.ar_data = GFS2_DATA_DEFAULT;
1318 args.ar_commit = 60;
1319 args.ar_statfs_quantum = 30;
1320 args.ar_quota_quantum = 60;
1321 args.ar_errors = GFS2_ERRORS_DEFAULT;
1322
1323 error = gfs2_mount_args(&args, data);
1324 if (error) {
1325 printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
1326 if (s->s_root)
1327 goto error_super;
1328 deactivate_locked_super(s);
1329 return error;
1330 }
1331
1332 if (s->s_root) {
1333 error = -EBUSY;
1334 if ((flags ^ s->s_flags) & MS_RDONLY)
1335 goto error_super;
1336 close_bdev_exclusive(bdev, mode);
1337 } else {
1338 char b[BDEVNAME_SIZE];
1339
1340 s->s_flags = flags;
1341 s->s_mode = mode;
1342 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
1343 sb_set_blocksize(s, block_size(bdev));
1344 error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
1345 if (error) {
1346 deactivate_locked_super(s);
1347 return error;
1348 }
1349 s->s_flags |= MS_ACTIVE;
1350 bdev->bd_super = s;
1351 }
1352
1353 sdp = s->s_fs_info;
1354 mnt->mnt_sb = s;
1355 if (args.ar_meta)
1356 mnt->mnt_root = dget(sdp->sd_master_dir);
1357 else
1358 mnt->mnt_root = dget(sdp->sd_root_dir);
1359 return 0;
1360
1361error_super:
1362 deactivate_locked_super(s);
1363error_bdev:
1364 close_bdev_exclusive(bdev, mode);
1365 return error;
1366}
1367
1258static int set_meta_super(struct super_block *s, void *ptr) 1368static int set_meta_super(struct super_block *s, void *ptr)
1259{ 1369{
1260 return -EINVAL; 1370 return -EINVAL;
@@ -1274,13 +1384,17 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
1274 dev_name, error); 1384 dev_name, error);
1275 return error; 1385 return error;
1276 } 1386 }
1277 s = sget(&gfs2_fs_type, test_meta_super, set_meta_super, 1387 s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super,
1278 path.dentry->d_inode->i_sb->s_bdev); 1388 path.dentry->d_inode->i_sb->s_bdev);
1279 path_put(&path); 1389 path_put(&path);
1280 if (IS_ERR(s)) { 1390 if (IS_ERR(s)) {
1281 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n"); 1391 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
1282 return PTR_ERR(s); 1392 return PTR_ERR(s);
1283 } 1393 }
1394 if ((flags ^ s->s_flags) & MS_RDONLY) {
1395 deactivate_locked_super(s);
1396 return -EBUSY;
1397 }
1284 sdp = s->s_fs_info; 1398 sdp = s->s_fs_info;
1285 mnt->mnt_sb = s; 1399 mnt->mnt_sb = s;
1286 mnt->mnt_root = dget(sdp->sd_master_dir); 1400 mnt->mnt_root = dget(sdp->sd_master_dir);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 2e9b9326bfc9..e3bf6eab8750 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -15,7 +15,7 @@
15 * fuzziness in the current usage value of IDs that are being used on different 15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on 16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable. 17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check 18 * Since quota tags are part of transactions, there is no need for a quota check
19 * program to be run on node crashes or anything like that. 19 * program to be run on node crashes or anything like that.
20 * 20 *
21 * There are couple of knobs that let the administrator manage the quota 21 * There are couple of knobs that let the administrator manage the quota
@@ -47,6 +47,8 @@
47#include <linux/gfs2_ondisk.h> 47#include <linux/gfs2_ondisk.h>
48#include <linux/kthread.h> 48#include <linux/kthread.h>
49#include <linux/freezer.h> 49#include <linux/freezer.h>
50#include <linux/quota.h>
51#include <linux/dqblk_xfs.h>
50 52
51#include "gfs2.h" 53#include "gfs2.h"
52#include "incore.h" 54#include "incore.h"
@@ -65,13 +67,6 @@
65#define QUOTA_USER 1 67#define QUOTA_USER 1
66#define QUOTA_GROUP 0 68#define QUOTA_GROUP 0
67 69
68struct gfs2_quota_host {
69 u64 qu_limit;
70 u64 qu_warn;
71 s64 qu_value;
72 u32 qu_ll_next;
73};
74
75struct gfs2_quota_change_host { 70struct gfs2_quota_change_host {
76 u64 qc_change; 71 u64 qc_change;
77 u32 qc_flags; /* GFS2_QCF_... */ 72 u32 qc_flags; /* GFS2_QCF_... */
@@ -164,7 +159,7 @@ fail:
164 return error; 159 return error;
165} 160}
166 161
167static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, 162static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
168 struct gfs2_quota_data **qdp) 163 struct gfs2_quota_data **qdp)
169{ 164{
170 struct gfs2_quota_data *qd = NULL, *new_qd = NULL; 165 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
@@ -202,7 +197,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
202 197
203 spin_unlock(&qd_lru_lock); 198 spin_unlock(&qd_lru_lock);
204 199
205 if (qd || !create) { 200 if (qd) {
206 if (new_qd) { 201 if (new_qd) {
207 gfs2_glock_put(new_qd->qd_gl); 202 gfs2_glock_put(new_qd->qd_gl);
208 kmem_cache_free(gfs2_quotad_cachep, new_qd); 203 kmem_cache_free(gfs2_quotad_cachep, new_qd);
@@ -461,12 +456,12 @@ static void qd_unlock(struct gfs2_quota_data *qd)
461 qd_put(qd); 456 qd_put(qd);
462} 457}
463 458
464static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create, 459static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
465 struct gfs2_quota_data **qdp) 460 struct gfs2_quota_data **qdp)
466{ 461{
467 int error; 462 int error;
468 463
469 error = qd_get(sdp, user, id, create, qdp); 464 error = qd_get(sdp, user, id, qdp);
470 if (error) 465 if (error)
471 return error; 466 return error;
472 467
@@ -508,20 +503,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
508 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 503 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
509 return 0; 504 return 0;
510 505
511 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd); 506 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
512 if (error) 507 if (error)
513 goto out; 508 goto out;
514 al->al_qd_num++; 509 al->al_qd_num++;
515 qd++; 510 qd++;
516 511
517 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd); 512 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
518 if (error) 513 if (error)
519 goto out; 514 goto out;
520 al->al_qd_num++; 515 al->al_qd_num++;
521 qd++; 516 qd++;
522 517
523 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { 518 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
524 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd); 519 error = qdsb_get(sdp, QUOTA_USER, uid, qd);
525 if (error) 520 if (error)
526 goto out; 521 goto out;
527 al->al_qd_num++; 522 al->al_qd_num++;
@@ -529,7 +524,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
529 } 524 }
530 525
531 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { 526 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
532 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd); 527 error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
533 if (error) 528 if (error)
534 goto out; 529 goto out;
535 al->al_qd_num++; 530 al->al_qd_num++;
@@ -617,48 +612,36 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
617 mutex_unlock(&sdp->sd_quota_mutex); 612 mutex_unlock(&sdp->sd_quota_mutex);
618} 613}
619 614
620static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
621{
622 const struct gfs2_quota *str = buf;
623
624 qu->qu_limit = be64_to_cpu(str->qu_limit);
625 qu->qu_warn = be64_to_cpu(str->qu_warn);
626 qu->qu_value = be64_to_cpu(str->qu_value);
627 qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
628}
629
630static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
631{
632 struct gfs2_quota *str = buf;
633
634 str->qu_limit = cpu_to_be64(qu->qu_limit);
635 str->qu_warn = cpu_to_be64(qu->qu_warn);
636 str->qu_value = cpu_to_be64(qu->qu_value);
637 str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
638 memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
639}
640
641/** 615/**
642 * gfs2_adjust_quota 616 * gfs2_adjust_quota - adjust record of current block usage
617 * @ip: The quota inode
618 * @loc: Offset of the entry in the quota file
619 * @change: The amount of usage change to record
620 * @qd: The quota data
621 * @fdq: The updated limits to record
643 * 622 *
644 * This function was mostly borrowed from gfs2_block_truncate_page which was 623 * This function was mostly borrowed from gfs2_block_truncate_page which was
645 * in turn mostly borrowed from ext3 624 * in turn mostly borrowed from ext3
625 *
626 * Returns: 0 or -ve on error
646 */ 627 */
628
647static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 629static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
648 s64 change, struct gfs2_quota_data *qd) 630 s64 change, struct gfs2_quota_data *qd,
631 struct fs_disk_quota *fdq)
649{ 632{
650 struct inode *inode = &ip->i_inode; 633 struct inode *inode = &ip->i_inode;
651 struct address_space *mapping = inode->i_mapping; 634 struct address_space *mapping = inode->i_mapping;
652 unsigned long index = loc >> PAGE_CACHE_SHIFT; 635 unsigned long index = loc >> PAGE_CACHE_SHIFT;
653 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 636 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
654 unsigned blocksize, iblock, pos; 637 unsigned blocksize, iblock, pos;
655 struct buffer_head *bh; 638 struct buffer_head *bh, *dibh;
656 struct page *page; 639 struct page *page;
657 void *kaddr; 640 void *kaddr;
658 char *ptr; 641 struct gfs2_quota *qp;
659 struct gfs2_quota_host qp;
660 s64 value; 642 s64 value;
661 int err = -EIO; 643 int err = -EIO;
644 u64 size;
662 645
663 if (gfs2_is_stuffed(ip)) 646 if (gfs2_is_stuffed(ip))
664 gfs2_unstuff_dinode(ip, NULL); 647 gfs2_unstuff_dinode(ip, NULL);
@@ -700,18 +683,38 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
700 gfs2_trans_add_bh(ip->i_gl, bh, 0); 683 gfs2_trans_add_bh(ip->i_gl, bh, 0);
701 684
702 kaddr = kmap_atomic(page, KM_USER0); 685 kaddr = kmap_atomic(page, KM_USER0);
703 ptr = kaddr + offset; 686 qp = kaddr + offset;
704 gfs2_quota_in(&qp, ptr); 687 value = (s64)be64_to_cpu(qp->qu_value) + change;
705 qp.qu_value += change; 688 qp->qu_value = cpu_to_be64(value);
706 value = qp.qu_value; 689 qd->qd_qb.qb_value = qp->qu_value;
707 gfs2_quota_out(&qp, ptr); 690 if (fdq) {
691 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
692 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
693 qd->qd_qb.qb_warn = qp->qu_warn;
694 }
695 if (fdq->d_fieldmask & FS_DQ_BHARD) {
696 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
697 qd->qd_qb.qb_limit = qp->qu_limit;
698 }
699 }
708 flush_dcache_page(page); 700 flush_dcache_page(page);
709 kunmap_atomic(kaddr, KM_USER0); 701 kunmap_atomic(kaddr, KM_USER0);
710 err = 0; 702
711 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC); 703 err = gfs2_meta_inode_buffer(ip, &dibh);
712 qd->qd_qb.qb_value = cpu_to_be64(value); 704 if (err)
713 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC); 705 goto unlock;
714 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value); 706
707 size = loc + sizeof(struct gfs2_quota);
708 if (size > inode->i_size) {
709 ip->i_disksize = size;
710 i_size_write(inode, size);
711 }
712 inode->i_mtime = inode->i_atime = CURRENT_TIME;
713 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
714 gfs2_dinode_out(ip, dibh->b_data);
715 brelse(dibh);
716 mark_inode_dirty(inode);
717
715unlock: 718unlock:
716 unlock_page(page); 719 unlock_page(page);
717 page_cache_release(page); 720 page_cache_release(page);
@@ -739,9 +742,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
739 return -ENOMEM; 742 return -ENOMEM;
740 743
741 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 744 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
745 mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
742 for (qx = 0; qx < num_qd; qx++) { 746 for (qx = 0; qx < num_qd; qx++) {
743 error = gfs2_glock_nq_init(qda[qx]->qd_gl, 747 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
744 LM_ST_EXCLUSIVE,
745 GL_NOCACHE, &ghs[qx]); 748 GL_NOCACHE, &ghs[qx]);
746 if (error) 749 if (error)
747 goto out; 750 goto out;
@@ -795,9 +798,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
795 for (x = 0; x < num_qd; x++) { 798 for (x = 0; x < num_qd; x++) {
796 qd = qda[x]; 799 qd = qda[x];
797 offset = qd2offset(qd); 800 offset = qd2offset(qd);
798 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, 801 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
799 (struct gfs2_quota_data *)
800 qd);
801 if (error) 802 if (error)
802 goto out_end_trans; 803 goto out_end_trans;
803 804
@@ -817,21 +818,44 @@ out_gunlock:
817out: 818out:
818 while (qx--) 819 while (qx--)
819 gfs2_glock_dq_uninit(&ghs[qx]); 820 gfs2_glock_dq_uninit(&ghs[qx]);
821 mutex_unlock(&ip->i_inode.i_mutex);
820 kfree(ghs); 822 kfree(ghs);
821 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); 823 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
822 return error; 824 return error;
823} 825}
824 826
827static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
828{
829 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
830 struct gfs2_quota q;
831 struct gfs2_quota_lvb *qlvb;
832 loff_t pos;
833 int error;
834
835 memset(&q, 0, sizeof(struct gfs2_quota));
836 pos = qd2offset(qd);
837 error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
838 if (error < 0)
839 return error;
840
841 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
842 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
843 qlvb->__pad = 0;
844 qlvb->qb_limit = q.qu_limit;
845 qlvb->qb_warn = q.qu_warn;
846 qlvb->qb_value = q.qu_value;
847 qd->qd_qb = *qlvb;
848
849 return 0;
850}
851
825static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 852static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
826 struct gfs2_holder *q_gh) 853 struct gfs2_holder *q_gh)
827{ 854{
828 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 855 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
829 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 856 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
830 struct gfs2_holder i_gh; 857 struct gfs2_holder i_gh;
831 struct gfs2_quota_host q;
832 char buf[sizeof(struct gfs2_quota)];
833 int error; 858 int error;
834 struct gfs2_quota_lvb *qlvb;
835 859
836restart: 860restart:
837 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 861 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
@@ -841,11 +865,9 @@ restart:
841 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 865 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
842 866
843 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 867 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
844 loff_t pos;
845 gfs2_glock_dq_uninit(q_gh); 868 gfs2_glock_dq_uninit(q_gh);
846 error = gfs2_glock_nq_init(qd->qd_gl, 869 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
847 LM_ST_EXCLUSIVE, GL_NOCACHE, 870 GL_NOCACHE, q_gh);
848 q_gh);
849 if (error) 871 if (error)
850 return error; 872 return error;
851 873
@@ -853,29 +875,14 @@ restart:
853 if (error) 875 if (error)
854 goto fail; 876 goto fail;
855 877
856 memset(buf, 0, sizeof(struct gfs2_quota)); 878 error = update_qd(sdp, qd);
857 pos = qd2offset(qd); 879 if (error)
858 error = gfs2_internal_read(ip, NULL, buf, &pos,
859 sizeof(struct gfs2_quota));
860 if (error < 0)
861 goto fail_gunlock; 880 goto fail_gunlock;
862 881
863 gfs2_glock_dq_uninit(&i_gh); 882 gfs2_glock_dq_uninit(&i_gh);
864 883 gfs2_glock_dq_uninit(q_gh);
865 gfs2_quota_in(&q, buf); 884 force_refresh = 0;
866 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 885 goto restart;
867 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
868 qlvb->__pad = 0;
869 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
870 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
871 qlvb->qb_value = cpu_to_be64(q.qu_value);
872 qd->qd_qb = *qlvb;
873
874 if (gfs2_glock_is_blocking(qd->qd_gl)) {
875 gfs2_glock_dq_uninit(q_gh);
876 force_refresh = 0;
877 goto restart;
878 }
879 } 886 }
880 887
881 return 0; 888 return 0;
@@ -995,7 +1002,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
995{ 1002{
996 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1003 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
997 1004
998 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n", 1005 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
999 sdp->sd_fsname, type, 1006 sdp->sd_fsname, type,
1000 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", 1007 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1001 qd->qd_id); 1008 qd->qd_id);
@@ -1032,6 +1039,10 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1032 1039
1033 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 1040 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1034 print_message(qd, "exceeded"); 1041 print_message(qd, "exceeded");
1042 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1043 USRQUOTA : GRPQUOTA, qd->qd_id,
1044 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1045
1035 error = -EDQUOT; 1046 error = -EDQUOT;
1036 break; 1047 break;
1037 } else if (be64_to_cpu(qd->qd_qb.qb_warn) && 1048 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
@@ -1039,6 +1050,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1039 time_after_eq(jiffies, qd->qd_last_warn + 1050 time_after_eq(jiffies, qd->qd_last_warn +
1040 gfs2_tune_get(sdp, 1051 gfs2_tune_get(sdp,
1041 gt_quota_warn_period) * HZ)) { 1052 gt_quota_warn_period) * HZ)) {
1053 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1054 USRQUOTA : GRPQUOTA, qd->qd_id,
1055 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1042 error = print_message(qd, "warning"); 1056 error = print_message(qd, "warning");
1043 qd->qd_last_warn = jiffies; 1057 qd->qd_last_warn = jiffies;
1044 } 1058 }
@@ -1069,8 +1083,9 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1069 } 1083 }
1070} 1084}
1071 1085
1072int gfs2_quota_sync(struct gfs2_sbd *sdp) 1086int gfs2_quota_sync(struct super_block *sb, int type)
1073{ 1087{
1088 struct gfs2_sbd *sdp = sb->s_fs_info;
1074 struct gfs2_quota_data **qda; 1089 struct gfs2_quota_data **qda;
1075 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); 1090 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1076 unsigned int num_qd; 1091 unsigned int num_qd;
@@ -1118,7 +1133,7 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1118 struct gfs2_holder q_gh; 1133 struct gfs2_holder q_gh;
1119 int error; 1134 int error;
1120 1135
1121 error = qd_get(sdp, user, id, CREATE, &qd); 1136 error = qd_get(sdp, user, id, &qd);
1122 if (error) 1137 if (error)
1123 return error; 1138 return error;
1124 1139
@@ -1127,7 +1142,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1127 gfs2_glock_dq_uninit(&q_gh); 1142 gfs2_glock_dq_uninit(&q_gh);
1128 1143
1129 qd_put(qd); 1144 qd_put(qd);
1130
1131 return error; 1145 return error;
1132} 1146}
1133 1147
@@ -1298,12 +1312,12 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1298} 1312}
1299 1313
1300static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1314static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1301 int (*fxn)(struct gfs2_sbd *sdp), 1315 int (*fxn)(struct super_block *sb, int type),
1302 unsigned long t, unsigned long *timeo, 1316 unsigned long t, unsigned long *timeo,
1303 unsigned int *new_timeo) 1317 unsigned int *new_timeo)
1304{ 1318{
1305 if (t >= *timeo) { 1319 if (t >= *timeo) {
1306 int error = fxn(sdp); 1320 int error = fxn(sdp->sd_vfs, 0);
1307 quotad_error(sdp, msg, error); 1321 quotad_error(sdp, msg, error);
1308 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1322 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1309 } else { 1323 } else {
@@ -1330,6 +1344,14 @@ static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1330 } 1344 }
1331} 1345}
1332 1346
1347void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1348 if (!sdp->sd_statfs_force_sync) {
1349 sdp->sd_statfs_force_sync = 1;
1350 wake_up(&sdp->sd_quota_wait);
1351 }
1352}
1353
1354
1333/** 1355/**
1334 * gfs2_quotad - Write cached quota changes into the quota file 1356 * gfs2_quotad - Write cached quota changes into the quota file
1335 * @sdp: Pointer to GFS2 superblock 1357 * @sdp: Pointer to GFS2 superblock
@@ -1349,8 +1371,15 @@ int gfs2_quotad(void *data)
1349 while (!kthread_should_stop()) { 1371 while (!kthread_should_stop()) {
1350 1372
1351 /* Update the master statfs file */ 1373 /* Update the master statfs file */
1352 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1374 if (sdp->sd_statfs_force_sync) {
1353 &statfs_timeo, &tune->gt_statfs_quantum); 1375 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1376 quotad_error(sdp, "statfs", error);
1377 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1378 }
1379 else
1380 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1381 &statfs_timeo,
1382 &tune->gt_statfs_quantum);
1354 1383
1355 /* Update quota file */ 1384 /* Update quota file */
1356 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1385 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
@@ -1367,7 +1396,7 @@ int gfs2_quotad(void *data)
1367 spin_lock(&sdp->sd_trunc_lock); 1396 spin_lock(&sdp->sd_trunc_lock);
1368 empty = list_empty(&sdp->sd_trunc_list); 1397 empty = list_empty(&sdp->sd_trunc_list);
1369 spin_unlock(&sdp->sd_trunc_lock); 1398 spin_unlock(&sdp->sd_trunc_lock);
1370 if (empty) 1399 if (empty && !sdp->sd_statfs_force_sync)
1371 t -= schedule_timeout(t); 1400 t -= schedule_timeout(t);
1372 else 1401 else
1373 t = 0; 1402 t = 0;
@@ -1377,3 +1406,181 @@ int gfs2_quotad(void *data)
1377 return 0; 1406 return 0;
1378} 1407}
1379 1408
1409static int gfs2_quota_get_xstate(struct super_block *sb,
1410 struct fs_quota_stat *fqs)
1411{
1412 struct gfs2_sbd *sdp = sb->s_fs_info;
1413
1414 memset(fqs, 0, sizeof(struct fs_quota_stat));
1415 fqs->qs_version = FS_QSTAT_VERSION;
1416 if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON)
1417 fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
1418 else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
1419 fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
1420 if (sdp->sd_quota_inode) {
1421 fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1422 fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1423 }
1424 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1425 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1426 fqs->qs_incoredqs = atomic_read(&qd_lru_count);
1427 return 0;
1428}
1429
1430static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
1431 struct fs_disk_quota *fdq)
1432{
1433 struct gfs2_sbd *sdp = sb->s_fs_info;
1434 struct gfs2_quota_lvb *qlvb;
1435 struct gfs2_quota_data *qd;
1436 struct gfs2_holder q_gh;
1437 int error;
1438
1439 memset(fdq, 0, sizeof(struct fs_disk_quota));
1440
1441 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1442 return -ESRCH; /* Crazy XFS error code */
1443
1444 if (type == USRQUOTA)
1445 type = QUOTA_USER;
1446 else if (type == GRPQUOTA)
1447 type = QUOTA_GROUP;
1448 else
1449 return -EINVAL;
1450
1451 error = qd_get(sdp, type, id, &qd);
1452 if (error)
1453 return error;
1454 error = do_glock(qd, FORCE, &q_gh);
1455 if (error)
1456 goto out;
1457
1458 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1459 fdq->d_version = FS_DQUOT_VERSION;
1460 fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA;
1461 fdq->d_id = id;
1462 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
1463 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
1464 fdq->d_bcount = be64_to_cpu(qlvb->qb_value);
1465
1466 gfs2_glock_dq_uninit(&q_gh);
1467out:
1468 qd_put(qd);
1469 return error;
1470}
1471
1472/* GFS2 only supports a subset of the XFS fields */
1473#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
1474
1475static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
1476 struct fs_disk_quota *fdq)
1477{
1478 struct gfs2_sbd *sdp = sb->s_fs_info;
1479 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1480 struct gfs2_quota_data *qd;
1481 struct gfs2_holder q_gh, i_gh;
1482 unsigned int data_blocks, ind_blocks;
1483 unsigned int blocks = 0;
1484 int alloc_required;
1485 struct gfs2_alloc *al;
1486 loff_t offset;
1487 int error;
1488
1489 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1490 return -ESRCH; /* Crazy XFS error code */
1491
1492 switch(type) {
1493 case USRQUOTA:
1494 type = QUOTA_USER;
1495 if (fdq->d_flags != XFS_USER_QUOTA)
1496 return -EINVAL;
1497 break;
1498 case GRPQUOTA:
1499 type = QUOTA_GROUP;
1500 if (fdq->d_flags != XFS_GROUP_QUOTA)
1501 return -EINVAL;
1502 break;
1503 default:
1504 return -EINVAL;
1505 }
1506
1507 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1508 return -EINVAL;
1509 if (fdq->d_id != id)
1510 return -EINVAL;
1511
1512 error = qd_get(sdp, type, id, &qd);
1513 if (error)
1514 return error;
1515
1516 mutex_lock(&ip->i_inode.i_mutex);
1517 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1518 if (error)
1519 goto out_put;
1520 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1521 if (error)
1522 goto out_q;
1523
1524 /* Check for existing entry, if none then alloc new blocks */
1525 error = update_qd(sdp, qd);
1526 if (error)
1527 goto out_i;
1528
1529 /* If nothing has changed, this is a no-op */
1530 if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1531 (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn)))
1532 fdq->d_fieldmask ^= FS_DQ_BSOFT;
1533 if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1534 (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit)))
1535 fdq->d_fieldmask ^= FS_DQ_BHARD;
1536 if (fdq->d_fieldmask == 0)
1537 goto out_i;
1538
1539 offset = qd2offset(qd);
1540 error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota),
1541 &alloc_required);
1542 if (error)
1543 goto out_i;
1544 if (alloc_required) {
1545 al = gfs2_alloc_get(ip);
1546 if (al == NULL)
1547 goto out_i;
1548 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1549 &data_blocks, &ind_blocks);
1550 blocks = al->al_requested = 1 + data_blocks + ind_blocks;
1551 error = gfs2_inplace_reserve(ip);
1552 if (error)
1553 goto out_alloc;
1554 }
1555
1556 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
1557 if (error)
1558 goto out_release;
1559
1560 /* Apply changes */
1561 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1562
1563 gfs2_trans_end(sdp);
1564out_release:
1565 if (alloc_required) {
1566 gfs2_inplace_release(ip);
1567out_alloc:
1568 gfs2_alloc_put(ip);
1569 }
1570out_i:
1571 gfs2_glock_dq_uninit(&i_gh);
1572out_q:
1573 gfs2_glock_dq_uninit(&q_gh);
1574out_put:
1575 mutex_unlock(&ip->i_inode.i_mutex);
1576 qd_put(qd);
1577 return error;
1578}
1579
1580const struct quotactl_ops gfs2_quotactl_ops = {
1581 .quota_sync = gfs2_quota_sync,
1582 .get_xstate = gfs2_quota_get_xstate,
1583 .get_xquota = gfs2_xquota_get,
1584 .set_xquota = gfs2_xquota_set,
1585};
1586
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 0fa5fa63d0e8..e271fa07ad02 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -25,13 +25,15 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
25extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 25extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
26 u32 uid, u32 gid); 26 u32 uid, u32 gid);
27 27
28extern int gfs2_quota_sync(struct gfs2_sbd *sdp); 28extern int gfs2_quota_sync(struct super_block *sb, int type);
29extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); 29extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
30 30
31extern int gfs2_quota_init(struct gfs2_sbd *sdp); 31extern int gfs2_quota_init(struct gfs2_sbd *sdp);
32extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp); 32extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
33extern int gfs2_quotad(void *data); 33extern int gfs2_quotad(void *data);
34 34
35extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
36
35static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) 37static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
36{ 38{
37 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 39 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -50,5 +52,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
50} 52}
51 53
52extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); 54extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
55extern const struct quotactl_ops gfs2_quotactl_ops;
53 56
54#endif /* __QUOTA_DOT_H__ */ 57#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 09fa31965576..4b9bece3d437 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -410,7 +410,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
410 memset(lh, 0, sizeof(struct gfs2_log_header)); 410 memset(lh, 0, sizeof(struct gfs2_log_header));
411 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 411 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
412 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 412 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
413 lh->lh_header.__pad0 = cpu_to_be64(0);
413 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 414 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
415 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
414 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1); 416 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1);
415 lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT); 417 lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT);
416 lh->lh_blkno = cpu_to_be32(lblock); 418 lh->lh_blkno = cpu_to_be32(lblock);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 8f1cfb02a6cb..0608f490c295 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1710,11 +1710,16 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
1710{ 1710{
1711 struct gfs2_rgrpd *rgd; 1711 struct gfs2_rgrpd *rgd;
1712 struct gfs2_holder ri_gh, rgd_gh; 1712 struct gfs2_holder ri_gh, rgd_gh;
1713 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1714 int ri_locked = 0;
1713 int error; 1715 int error;
1714 1716
1715 error = gfs2_rindex_hold(sdp, &ri_gh); 1717 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
1716 if (error) 1718 error = gfs2_rindex_hold(sdp, &ri_gh);
1717 goto fail; 1719 if (error)
1720 goto fail;
1721 ri_locked = 1;
1722 }
1718 1723
1719 error = -EINVAL; 1724 error = -EINVAL;
1720 rgd = gfs2_blk2rgrpd(sdp, no_addr); 1725 rgd = gfs2_blk2rgrpd(sdp, no_addr);
@@ -1730,7 +1735,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
1730 1735
1731 gfs2_glock_dq_uninit(&rgd_gh); 1736 gfs2_glock_dq_uninit(&rgd_gh);
1732fail_rindex: 1737fail_rindex:
1733 gfs2_glock_dq_uninit(&ri_gh); 1738 if (ri_locked)
1739 gfs2_glock_dq_uninit(&ri_gh);
1734fail: 1740fail:
1735 return error; 1741 return error;
1736} 1742}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 0ec3ec672de1..c282ad41f3d1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -70,6 +70,11 @@ enum {
70 Opt_commit, 70 Opt_commit,
71 Opt_err_withdraw, 71 Opt_err_withdraw,
72 Opt_err_panic, 72 Opt_err_panic,
73 Opt_statfs_quantum,
74 Opt_statfs_percent,
75 Opt_quota_quantum,
76 Opt_barrier,
77 Opt_nobarrier,
73 Opt_error, 78 Opt_error,
74}; 79};
75 80
@@ -101,18 +106,23 @@ static const match_table_t tokens = {
101 {Opt_commit, "commit=%d"}, 106 {Opt_commit, "commit=%d"},
102 {Opt_err_withdraw, "errors=withdraw"}, 107 {Opt_err_withdraw, "errors=withdraw"},
103 {Opt_err_panic, "errors=panic"}, 108 {Opt_err_panic, "errors=panic"},
109 {Opt_statfs_quantum, "statfs_quantum=%d"},
110 {Opt_statfs_percent, "statfs_percent=%d"},
111 {Opt_quota_quantum, "quota_quantum=%d"},
112 {Opt_barrier, "barrier"},
113 {Opt_nobarrier, "nobarrier"},
104 {Opt_error, NULL} 114 {Opt_error, NULL}
105}; 115};
106 116
107/** 117/**
108 * gfs2_mount_args - Parse mount options 118 * gfs2_mount_args - Parse mount options
109 * @sdp: 119 * @args: The structure into which the parsed options will be written
110 * @data: 120 * @options: The options to parse
111 * 121 *
112 * Return: errno 122 * Return: errno
113 */ 123 */
114 124
115int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) 125int gfs2_mount_args(struct gfs2_args *args, char *options)
116{ 126{
117 char *o; 127 char *o;
118 int token; 128 int token;
@@ -157,7 +167,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
157 break; 167 break;
158 case Opt_debug: 168 case Opt_debug:
159 if (args->ar_errors == GFS2_ERRORS_PANIC) { 169 if (args->ar_errors == GFS2_ERRORS_PANIC) {
160 fs_info(sdp, "-o debug and -o errors=panic " 170 printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
161 "are mutually exclusive.\n"); 171 "are mutually exclusive.\n");
162 return -EINVAL; 172 return -EINVAL;
163 } 173 }
@@ -210,7 +220,29 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
210 case Opt_commit: 220 case Opt_commit:
211 rv = match_int(&tmp[0], &args->ar_commit); 221 rv = match_int(&tmp[0], &args->ar_commit);
212 if (rv || args->ar_commit <= 0) { 222 if (rv || args->ar_commit <= 0) {
213 fs_info(sdp, "commit mount option requires a positive numeric argument\n"); 223 printk(KERN_WARNING "GFS2: commit mount option requires a positive numeric argument\n");
224 return rv ? rv : -EINVAL;
225 }
226 break;
227 case Opt_statfs_quantum:
228 rv = match_int(&tmp[0], &args->ar_statfs_quantum);
229 if (rv || args->ar_statfs_quantum < 0) {
230 printk(KERN_WARNING "GFS2: statfs_quantum mount option requires a non-negative numeric argument\n");
231 return rv ? rv : -EINVAL;
232 }
233 break;
234 case Opt_quota_quantum:
235 rv = match_int(&tmp[0], &args->ar_quota_quantum);
236 if (rv || args->ar_quota_quantum <= 0) {
237 printk(KERN_WARNING "GFS2: quota_quantum mount option requires a positive numeric argument\n");
238 return rv ? rv : -EINVAL;
239 }
240 break;
241 case Opt_statfs_percent:
242 rv = match_int(&tmp[0], &args->ar_statfs_percent);
243 if (rv || args->ar_statfs_percent < 0 ||
244 args->ar_statfs_percent > 100) {
245 printk(KERN_WARNING "statfs_percent mount option requires a numeric argument between 0 and 100\n");
214 return rv ? rv : -EINVAL; 246 return rv ? rv : -EINVAL;
215 } 247 }
216 break; 248 break;
@@ -219,15 +251,21 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
219 break; 251 break;
220 case Opt_err_panic: 252 case Opt_err_panic:
221 if (args->ar_debug) { 253 if (args->ar_debug) {
222 fs_info(sdp, "-o debug and -o errors=panic " 254 printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
223 "are mutually exclusive.\n"); 255 "are mutually exclusive.\n");
224 return -EINVAL; 256 return -EINVAL;
225 } 257 }
226 args->ar_errors = GFS2_ERRORS_PANIC; 258 args->ar_errors = GFS2_ERRORS_PANIC;
227 break; 259 break;
260 case Opt_barrier:
261 args->ar_nobarrier = 0;
262 break;
263 case Opt_nobarrier:
264 args->ar_nobarrier = 1;
265 break;
228 case Opt_error: 266 case Opt_error:
229 default: 267 default:
230 fs_info(sdp, "invalid mount option: %s\n", o); 268 printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o);
231 return -EINVAL; 269 return -EINVAL;
232 } 270 }
233 } 271 }
@@ -442,7 +480,10 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
442{ 480{
443 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 481 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
444 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 482 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
483 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
445 struct buffer_head *l_bh; 484 struct buffer_head *l_bh;
485 s64 x, y;
486 int need_sync = 0;
446 int error; 487 int error;
447 488
448 error = gfs2_meta_inode_buffer(l_ip, &l_bh); 489 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
@@ -456,9 +497,17 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
456 l_sc->sc_free += free; 497 l_sc->sc_free += free;
457 l_sc->sc_dinodes += dinodes; 498 l_sc->sc_dinodes += dinodes;
458 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); 499 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
500 if (sdp->sd_args.ar_statfs_percent) {
501 x = 100 * l_sc->sc_free;
502 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
503 if (x >= y || x <= -y)
504 need_sync = 1;
505 }
459 spin_unlock(&sdp->sd_statfs_spin); 506 spin_unlock(&sdp->sd_statfs_spin);
460 507
461 brelse(l_bh); 508 brelse(l_bh);
509 if (need_sync)
510 gfs2_wake_up_statfs(sdp);
462} 511}
463 512
464void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, 513void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
@@ -484,8 +533,9 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
484 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); 533 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
485} 534}
486 535
487int gfs2_statfs_sync(struct gfs2_sbd *sdp) 536int gfs2_statfs_sync(struct super_block *sb, int type)
488{ 537{
538 struct gfs2_sbd *sdp = sb->s_fs_info;
489 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 539 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
490 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 540 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
491 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 541 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
@@ -521,6 +571,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
521 goto out_bh2; 571 goto out_bh2;
522 572
523 update_statfs(sdp, m_bh, l_bh); 573 update_statfs(sdp, m_bh, l_bh);
574 sdp->sd_statfs_force_sync = 0;
524 575
525 gfs2_trans_end(sdp); 576 gfs2_trans_end(sdp);
526 577
@@ -712,8 +763,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
712 int error; 763 int error;
713 764
714 flush_workqueue(gfs2_delete_workqueue); 765 flush_workqueue(gfs2_delete_workqueue);
715 gfs2_quota_sync(sdp); 766 gfs2_quota_sync(sdp->sd_vfs, 0);
716 gfs2_statfs_sync(sdp); 767 gfs2_statfs_sync(sdp->sd_vfs, 0);
717 768
718 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE, 769 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
719 &t_gh); 770 &t_gh);
@@ -1061,8 +1112,13 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1061 1112
1062 spin_lock(&gt->gt_spin); 1113 spin_lock(&gt->gt_spin);
1063 args.ar_commit = gt->gt_log_flush_secs; 1114 args.ar_commit = gt->gt_log_flush_secs;
1115 args.ar_quota_quantum = gt->gt_quota_quantum;
1116 if (gt->gt_statfs_slow)
1117 args.ar_statfs_quantum = 0;
1118 else
1119 args.ar_statfs_quantum = gt->gt_statfs_quantum;
1064 spin_unlock(&gt->gt_spin); 1120 spin_unlock(&gt->gt_spin);
1065 error = gfs2_mount_args(sdp, &args, data); 1121 error = gfs2_mount_args(&args, data);
1066 if (error) 1122 if (error)
1067 return error; 1123 return error;
1068 1124
@@ -1097,8 +1153,21 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1097 sb->s_flags |= MS_POSIXACL; 1153 sb->s_flags |= MS_POSIXACL;
1098 else 1154 else
1099 sb->s_flags &= ~MS_POSIXACL; 1155 sb->s_flags &= ~MS_POSIXACL;
1156 if (sdp->sd_args.ar_nobarrier)
1157 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1158 else
1159 clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1100 spin_lock(&gt->gt_spin); 1160 spin_lock(&gt->gt_spin);
1101 gt->gt_log_flush_secs = args.ar_commit; 1161 gt->gt_log_flush_secs = args.ar_commit;
1162 gt->gt_quota_quantum = args.ar_quota_quantum;
1163 if (args.ar_statfs_quantum) {
1164 gt->gt_statfs_slow = 0;
1165 gt->gt_statfs_quantum = args.ar_statfs_quantum;
1166 }
1167 else {
1168 gt->gt_statfs_slow = 1;
1169 gt->gt_statfs_quantum = 30;
1170 }
1102 spin_unlock(&gt->gt_spin); 1171 spin_unlock(&gt->gt_spin);
1103 1172
1104 gfs2_online_uevent(sdp); 1173 gfs2_online_uevent(sdp);
@@ -1179,7 +1248,7 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1179{ 1248{
1180 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info; 1249 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
1181 struct gfs2_args *args = &sdp->sd_args; 1250 struct gfs2_args *args = &sdp->sd_args;
1182 int lfsecs; 1251 int val;
1183 1252
1184 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir)) 1253 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
1185 seq_printf(s, ",meta"); 1254 seq_printf(s, ",meta");
@@ -1240,9 +1309,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1240 } 1309 }
1241 if (args->ar_discard) 1310 if (args->ar_discard)
1242 seq_printf(s, ",discard"); 1311 seq_printf(s, ",discard");
1243 lfsecs = sdp->sd_tune.gt_log_flush_secs; 1312 val = sdp->sd_tune.gt_log_flush_secs;
1244 if (lfsecs != 60) 1313 if (val != 60)
1245 seq_printf(s, ",commit=%d", lfsecs); 1314 seq_printf(s, ",commit=%d", val);
1315 val = sdp->sd_tune.gt_statfs_quantum;
1316 if (val != 30)
1317 seq_printf(s, ",statfs_quantum=%d", val);
1318 val = sdp->sd_tune.gt_quota_quantum;
1319 if (val != 60)
1320 seq_printf(s, ",quota_quantum=%d", val);
1321 if (args->ar_statfs_percent)
1322 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1246 if (args->ar_errors != GFS2_ERRORS_DEFAULT) { 1323 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1247 const char *state; 1324 const char *state;
1248 1325
@@ -1259,6 +1336,9 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1259 } 1336 }
1260 seq_printf(s, ",errors=%s", state); 1337 seq_printf(s, ",errors=%s", state);
1261 } 1338 }
1339 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1340 seq_printf(s, ",nobarrier");
1341
1262 return 0; 1342 return 0;
1263} 1343}
1264 1344
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 235db3682885..3df60f2d84e3 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -27,7 +27,7 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
27 27
28extern void gfs2_jindex_free(struct gfs2_sbd *sdp); 28extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
29 29
30extern int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *data); 30extern int gfs2_mount_args(struct gfs2_args *args, char *data);
31 31
32extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); 32extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
33extern int gfs2_jdesc_check(struct gfs2_jdesc *jd); 33extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
@@ -44,7 +44,7 @@ extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
44 const void *buf); 44 const void *buf);
45extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, 45extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
46 struct buffer_head *l_bh); 46 struct buffer_head *l_bh);
47extern int gfs2_statfs_sync(struct gfs2_sbd *sdp); 47extern int gfs2_statfs_sync(struct super_block *sb, int type);
48 48
49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp); 49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
50extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); 50extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 446329728d52..c5dad1eb7b91 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -158,7 +158,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
158 if (simple_strtol(buf, NULL, 0) != 1) 158 if (simple_strtol(buf, NULL, 0) != 1)
159 return -EINVAL; 159 return -EINVAL;
160 160
161 gfs2_statfs_sync(sdp); 161 gfs2_statfs_sync(sdp->sd_vfs, 0);
162 return len; 162 return len;
163} 163}
164 164
@@ -171,13 +171,14 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
171 if (simple_strtol(buf, NULL, 0) != 1) 171 if (simple_strtol(buf, NULL, 0) != 1)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 gfs2_quota_sync(sdp); 174 gfs2_quota_sync(sdp->sd_vfs, 0);
175 return len; 175 return len;
176} 176}
177 177
178static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, 178static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
179 size_t len) 179 size_t len)
180{ 180{
181 int error;
181 u32 id; 182 u32 id;
182 183
183 if (!capable(CAP_SYS_ADMIN)) 184 if (!capable(CAP_SYS_ADMIN))
@@ -185,13 +186,14 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
185 186
186 id = simple_strtoul(buf, NULL, 0); 187 id = simple_strtoul(buf, NULL, 0);
187 188
188 gfs2_quota_refresh(sdp, 1, id); 189 error = gfs2_quota_refresh(sdp, 1, id);
189 return len; 190 return error ? error : len;
190} 191}
191 192
192static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, 193static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
193 size_t len) 194 size_t len)
194{ 195{
196 int error;
195 u32 id; 197 u32 id;
196 198
197 if (!capable(CAP_SYS_ADMIN)) 199 if (!capable(CAP_SYS_ADMIN))
@@ -199,8 +201,8 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
199 201
200 id = simple_strtoul(buf, NULL, 0); 202 id = simple_strtoul(buf, NULL, 0);
201 203
202 gfs2_quota_refresh(sdp, 0, id); 204 error = gfs2_quota_refresh(sdp, 0, id);
203 return len; 205 return error ? error : len;
204} 206}
205 207
206static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 208static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 8a0f8ef6ee27..912f5cbc4740 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -186,8 +186,8 @@ static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
186 return 0; 186 return 0;
187} 187}
188 188
189int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, 189static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
190 struct gfs2_ea_location *el) 190 struct gfs2_ea_location *el)
191{ 191{
192 struct ea_find ef; 192 struct ea_find ef;
193 int error; 193 int error;
@@ -516,8 +516,8 @@ out:
516 return error; 516 return error;
517} 517}
518 518
519int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, 519static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
520 char *data, size_t size) 520 char *data, size_t size)
521{ 521{
522 int ret; 522 int ret;
523 size_t len = GFS2_EA_DATA_LEN(el->el_ea); 523 size_t len = GFS2_EA_DATA_LEN(el->el_ea);
@@ -534,6 +534,36 @@ int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
534 return len; 534 return len;
535} 535}
536 536
537int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
538{
539 struct gfs2_ea_location el;
540 int error;
541 int len;
542 char *data;
543
544 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
545 if (error)
546 return error;
547 if (!el.el_ea)
548 goto out;
549 if (!GFS2_EA_DATA_LEN(el.el_ea))
550 goto out;
551
552 len = GFS2_EA_DATA_LEN(el.el_ea);
553 data = kmalloc(len, GFP_NOFS);
554 error = -ENOMEM;
555 if (data == NULL)
556 goto out;
557
558 error = gfs2_ea_get_copy(ip, &el, data, len);
559 if (error == 0)
560 error = len;
561 *ppdata = data;
562out:
563 brelse(el.el_bh);
564 return error;
565}
566
537/** 567/**
538 * gfs2_xattr_get - Get a GFS2 extended attribute 568 * gfs2_xattr_get - Get a GFS2 extended attribute
539 * @inode: The inode 569 * @inode: The inode
@@ -1259,22 +1289,26 @@ fail:
1259 return error; 1289 return error;
1260} 1290}
1261 1291
1262int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el, 1292int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
1263 struct iattr *attr, char *data)
1264{ 1293{
1294 struct gfs2_ea_location el;
1265 struct buffer_head *dibh; 1295 struct buffer_head *dibh;
1266 int error; 1296 int error;
1267 1297
1268 if (GFS2_EA_IS_STUFFED(el->el_ea)) { 1298 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
1299 if (error)
1300 return error;
1301
1302 if (GFS2_EA_IS_STUFFED(el.el_ea)) {
1269 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); 1303 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1270 if (error) 1304 if (error)
1271 return error; 1305 return error;
1272 1306
1273 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1); 1307 gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
1274 memcpy(GFS2_EA2DATA(el->el_ea), data, 1308 memcpy(GFS2_EA2DATA(el.el_ea), data,
1275 GFS2_EA_DATA_LEN(el->el_ea)); 1309 GFS2_EA_DATA_LEN(el.el_ea));
1276 } else 1310 } else
1277 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data); 1311 error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
1278 1312
1279 if (error) 1313 if (error)
1280 return error; 1314 return error;
@@ -1507,18 +1541,6 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name,
1507 return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags); 1541 return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
1508} 1542}
1509 1543
1510static int gfs2_xattr_system_get(struct inode *inode, const char *name,
1511 void *buffer, size_t size)
1512{
1513 return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
1514}
1515
1516static int gfs2_xattr_system_set(struct inode *inode, const char *name,
1517 const void *value, size_t size, int flags)
1518{
1519 return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags);
1520}
1521
1522static int gfs2_xattr_security_get(struct inode *inode, const char *name, 1544static int gfs2_xattr_security_get(struct inode *inode, const char *name,
1523 void *buffer, size_t size) 1545 void *buffer, size_t size)
1524{ 1546{
@@ -1543,12 +1565,6 @@ static struct xattr_handler gfs2_xattr_security_handler = {
1543 .set = gfs2_xattr_security_set, 1565 .set = gfs2_xattr_security_set,
1544}; 1566};
1545 1567
1546static struct xattr_handler gfs2_xattr_system_handler = {
1547 .prefix = XATTR_SYSTEM_PREFIX,
1548 .get = gfs2_xattr_system_get,
1549 .set = gfs2_xattr_system_set,
1550};
1551
1552struct xattr_handler *gfs2_xattr_handlers[] = { 1568struct xattr_handler *gfs2_xattr_handlers[] = {
1553 &gfs2_xattr_user_handler, 1569 &gfs2_xattr_user_handler,
1554 &gfs2_xattr_security_handler, 1570 &gfs2_xattr_security_handler,
diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
index cbdfd7743733..8d6ae5813c4d 100644
--- a/fs/gfs2/xattr.h
+++ b/fs/gfs2/xattr.h
@@ -62,11 +62,7 @@ extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
62 62
63/* Exported to acl.c */ 63/* Exported to acl.c */
64 64
65extern int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, 65extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
66 struct gfs2_ea_location *el); 66extern int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data);
67extern int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
68 char *data, size_t size);
69extern int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
70 struct iattr *attr, char *data);
71 67
72#endif /* __EATTR_DOT_H__ */ 68#endif /* __EATTR_DOT_H__ */
diff --git a/fs/inode.c b/fs/inode.c
index 4d8e3be55976..06c1f02de611 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -18,7 +18,6 @@
18#include <linux/hash.h> 18#include <linux/hash.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/security.h> 20#include <linux/security.h>
21#include <linux/ima.h>
22#include <linux/pagemap.h> 21#include <linux/pagemap.h>
23#include <linux/cdev.h> 22#include <linux/cdev.h>
24#include <linux/bootmem.h> 23#include <linux/bootmem.h>
@@ -157,11 +156,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
157 156
158 if (security_inode_alloc(inode)) 157 if (security_inode_alloc(inode))
159 goto out; 158 goto out;
160
161 /* allocate and initialize an i_integrity */
162 if (ima_inode_alloc(inode))
163 goto out_free_security;
164
165 spin_lock_init(&inode->i_lock); 159 spin_lock_init(&inode->i_lock);
166 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 160 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
167 161
@@ -201,9 +195,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
201#endif 195#endif
202 196
203 return 0; 197 return 0;
204
205out_free_security:
206 security_inode_free(inode);
207out: 198out:
208 return -ENOMEM; 199 return -ENOMEM;
209} 200}
@@ -235,7 +226,6 @@ static struct inode *alloc_inode(struct super_block *sb)
235void __destroy_inode(struct inode *inode) 226void __destroy_inode(struct inode *inode)
236{ 227{
237 BUG_ON(inode_has_buffers(inode)); 228 BUG_ON(inode_has_buffers(inode));
238 ima_inode_free(inode);
239 security_inode_free(inode); 229 security_inode_free(inode);
240 fsnotify_inode_delete(inode); 230 fsnotify_inode_delete(inode);
241#ifdef CONFIG_FS_POSIX_ACL 231#ifdef CONFIG_FS_POSIX_ACL
diff --git a/fs/namespace.c b/fs/namespace.c
index bdc3cb4fd222..7d70d63ceb29 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1921,6 +1921,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
1921 if (data_page) 1921 if (data_page)
1922 ((char *)data_page)[PAGE_SIZE - 1] = 0; 1922 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1923 1923
1924 /* ... and get the mountpoint */
1925 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
1926 if (retval)
1927 return retval;
1928
1929 retval = security_sb_mount(dev_name, &path,
1930 type_page, flags, data_page);
1931 if (retval)
1932 goto dput_out;
1933
1924 /* Default to relatime unless overriden */ 1934 /* Default to relatime unless overriden */
1925 if (!(flags & MS_NOATIME)) 1935 if (!(flags & MS_NOATIME))
1926 mnt_flags |= MNT_RELATIME; 1936 mnt_flags |= MNT_RELATIME;
@@ -1945,16 +1955,6 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
1945 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | 1955 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
1946 MS_STRICTATIME); 1956 MS_STRICTATIME);
1947 1957
1948 /* ... and get the mountpoint */
1949 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
1950 if (retval)
1951 return retval;
1952
1953 retval = security_sb_mount(dev_name, &path,
1954 type_page, flags, data_page);
1955 if (retval)
1956 goto dput_out;
1957
1958 if (flags & MS_REMOUNT) 1958 if (flags & MS_REMOUNT)
1959 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, 1959 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
1960 data_page); 1960 data_page);
diff --git a/fs/open.c b/fs/open.c
index 4f01e06227c6..b4b31d277f3a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -587,6 +587,9 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
587 error = -EPERM; 587 error = -EPERM;
588 if (!capable(CAP_SYS_CHROOT)) 588 if (!capable(CAP_SYS_CHROOT))
589 goto dput_and_out; 589 goto dput_and_out;
590 error = security_path_chroot(&path);
591 if (error)
592 goto dput_and_out;
590 593
591 set_fs_root(current->fs, &path); 594 set_fs_root(current->fs, &path);
592 error = 0; 595 error = 0;
@@ -617,11 +620,15 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
617 if (err) 620 if (err)
618 goto out_putf; 621 goto out_putf;
619 mutex_lock(&inode->i_mutex); 622 mutex_lock(&inode->i_mutex);
623 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
624 if (err)
625 goto out_unlock;
620 if (mode == (mode_t) -1) 626 if (mode == (mode_t) -1)
621 mode = inode->i_mode; 627 mode = inode->i_mode;
622 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); 628 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
623 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; 629 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
624 err = notify_change(dentry, &newattrs); 630 err = notify_change(dentry, &newattrs);
631out_unlock:
625 mutex_unlock(&inode->i_mutex); 632 mutex_unlock(&inode->i_mutex);
626 mnt_drop_write(file->f_path.mnt); 633 mnt_drop_write(file->f_path.mnt);
627out_putf: 634out_putf:
@@ -646,11 +653,15 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
646 if (error) 653 if (error)
647 goto dput_and_out; 654 goto dput_and_out;
648 mutex_lock(&inode->i_mutex); 655 mutex_lock(&inode->i_mutex);
656 error = security_path_chmod(path.dentry, path.mnt, mode);
657 if (error)
658 goto out_unlock;
649 if (mode == (mode_t) -1) 659 if (mode == (mode_t) -1)
650 mode = inode->i_mode; 660 mode = inode->i_mode;
651 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); 661 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
652 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; 662 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
653 error = notify_change(path.dentry, &newattrs); 663 error = notify_change(path.dentry, &newattrs);
664out_unlock:
654 mutex_unlock(&inode->i_mutex); 665 mutex_unlock(&inode->i_mutex);
655 mnt_drop_write(path.mnt); 666 mnt_drop_write(path.mnt);
656dput_and_out: 667dput_and_out:
@@ -664,9 +675,9 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
664 return sys_fchmodat(AT_FDCWD, filename, mode); 675 return sys_fchmodat(AT_FDCWD, filename, mode);
665} 676}
666 677
667static int chown_common(struct dentry * dentry, uid_t user, gid_t group) 678static int chown_common(struct path *path, uid_t user, gid_t group)
668{ 679{
669 struct inode *inode = dentry->d_inode; 680 struct inode *inode = path->dentry->d_inode;
670 int error; 681 int error;
671 struct iattr newattrs; 682 struct iattr newattrs;
672 683
@@ -683,7 +694,9 @@ static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
683 newattrs.ia_valid |= 694 newattrs.ia_valid |=
684 ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; 695 ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
685 mutex_lock(&inode->i_mutex); 696 mutex_lock(&inode->i_mutex);
686 error = notify_change(dentry, &newattrs); 697 error = security_path_chown(path, user, group);
698 if (!error)
699 error = notify_change(path->dentry, &newattrs);
687 mutex_unlock(&inode->i_mutex); 700 mutex_unlock(&inode->i_mutex);
688 701
689 return error; 702 return error;
@@ -700,7 +713,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
700 error = mnt_want_write(path.mnt); 713 error = mnt_want_write(path.mnt);
701 if (error) 714 if (error)
702 goto out_release; 715 goto out_release;
703 error = chown_common(path.dentry, user, group); 716 error = chown_common(&path, user, group);
704 mnt_drop_write(path.mnt); 717 mnt_drop_write(path.mnt);
705out_release: 718out_release:
706 path_put(&path); 719 path_put(&path);
@@ -725,7 +738,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
725 error = mnt_want_write(path.mnt); 738 error = mnt_want_write(path.mnt);
726 if (error) 739 if (error)
727 goto out_release; 740 goto out_release;
728 error = chown_common(path.dentry, user, group); 741 error = chown_common(&path, user, group);
729 mnt_drop_write(path.mnt); 742 mnt_drop_write(path.mnt);
730out_release: 743out_release:
731 path_put(&path); 744 path_put(&path);
@@ -744,7 +757,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
744 error = mnt_want_write(path.mnt); 757 error = mnt_want_write(path.mnt);
745 if (error) 758 if (error)
746 goto out_release; 759 goto out_release;
747 error = chown_common(path.dentry, user, group); 760 error = chown_common(&path, user, group);
748 mnt_drop_write(path.mnt); 761 mnt_drop_write(path.mnt);
749out_release: 762out_release:
750 path_put(&path); 763 path_put(&path);
@@ -767,7 +780,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
767 goto out_fput; 780 goto out_fput;
768 dentry = file->f_path.dentry; 781 dentry = file->f_path.dentry;
769 audit_inode(NULL, dentry); 782 audit_inode(NULL, dentry);
770 error = chown_common(dentry, user, group); 783 error = chown_common(&file->f_path, user, group);
771 mnt_drop_write(file->f_path.mnt); 784 mnt_drop_write(file->f_path.mnt);
772out_fput: 785out_fput:
773 fput(file); 786 fput(file);
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index 8047e01ef46b..353e78a9ebee 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -17,7 +17,7 @@ config QUOTA
17 17
18config QUOTA_NETLINK_INTERFACE 18config QUOTA_NETLINK_INTERFACE
19 bool "Report quota messages through netlink interface" 19 bool "Report quota messages through netlink interface"
20 depends on QUOTA && NET 20 depends on QUOTACTL && NET
21 help 21 help
22 If you say Y here, quota warnings (about exceeding softlimit, reaching 22 If you say Y here, quota warnings (about exceeding softlimit, reaching
23 hardlimit, etc.) will be reported through netlink interface. If unsure, 23 hardlimit, etc.) will be reported through netlink interface. If unsure,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 39b49c42a7ed..9b6ad908dcb2 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -77,10 +77,6 @@
77#include <linux/capability.h> 77#include <linux/capability.h>
78#include <linux/quotaops.h> 78#include <linux/quotaops.h>
79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */ 79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
80#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
81#include <net/netlink.h>
82#include <net/genetlink.h>
83#endif
84 80
85#include <asm/uaccess.h> 81#include <asm/uaccess.h>
86 82
@@ -1071,73 +1067,6 @@ static void print_warning(struct dquot *dquot, const int warntype)
1071} 1067}
1072#endif 1068#endif
1073 1069
1074#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1075
1076/* Netlink family structure for quota */
1077static struct genl_family quota_genl_family = {
1078 .id = GENL_ID_GENERATE,
1079 .hdrsize = 0,
1080 .name = "VFS_DQUOT",
1081 .version = 1,
1082 .maxattr = QUOTA_NL_A_MAX,
1083};
1084
1085/* Send warning to userspace about user which exceeded quota */
1086static void send_warning(const struct dquot *dquot, const char warntype)
1087{
1088 static atomic_t seq;
1089 struct sk_buff *skb;
1090 void *msg_head;
1091 int ret;
1092 int msg_size = 4 * nla_total_size(sizeof(u32)) +
1093 2 * nla_total_size(sizeof(u64));
1094
1095 /* We have to allocate using GFP_NOFS as we are called from a
1096 * filesystem performing write and thus further recursion into
1097 * the fs to free some data could cause deadlocks. */
1098 skb = genlmsg_new(msg_size, GFP_NOFS);
1099 if (!skb) {
1100 printk(KERN_ERR
1101 "VFS: Not enough memory to send quota warning.\n");
1102 return;
1103 }
1104 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
1105 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
1106 if (!msg_head) {
1107 printk(KERN_ERR
1108 "VFS: Cannot store netlink header in quota warning.\n");
1109 goto err_out;
1110 }
1111 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
1112 if (ret)
1113 goto attr_err_out;
1114 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
1115 if (ret)
1116 goto attr_err_out;
1117 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
1118 if (ret)
1119 goto attr_err_out;
1120 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
1121 MAJOR(dquot->dq_sb->s_dev));
1122 if (ret)
1123 goto attr_err_out;
1124 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
1125 MINOR(dquot->dq_sb->s_dev));
1126 if (ret)
1127 goto attr_err_out;
1128 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
1129 if (ret)
1130 goto attr_err_out;
1131 genlmsg_end(skb, msg_head);
1132
1133 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
1134 return;
1135attr_err_out:
1136 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
1137err_out:
1138 kfree_skb(skb);
1139}
1140#endif
1141/* 1070/*
1142 * Write warnings to the console and send warning messages over netlink. 1071 * Write warnings to the console and send warning messages over netlink.
1143 * 1072 *
@@ -1145,18 +1074,20 @@ err_out:
1145 */ 1074 */
1146static void flush_warnings(struct dquot *const *dquots, char *warntype) 1075static void flush_warnings(struct dquot *const *dquots, char *warntype)
1147{ 1076{
1077 struct dquot *dq;
1148 int i; 1078 int i;
1149 1079
1150 for (i = 0; i < MAXQUOTAS; i++) 1080 for (i = 0; i < MAXQUOTAS; i++) {
1151 if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN && 1081 dq = dquots[i];
1152 !warning_issued(dquots[i], warntype[i])) { 1082 if (dq && warntype[i] != QUOTA_NL_NOWARN &&
1083 !warning_issued(dq, warntype[i])) {
1153#ifdef CONFIG_PRINT_QUOTA_WARNING 1084#ifdef CONFIG_PRINT_QUOTA_WARNING
1154 print_warning(dquots[i], warntype[i]); 1085 print_warning(dq, warntype[i]);
1155#endif
1156#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1157 send_warning(dquots[i], warntype[i]);
1158#endif 1086#endif
1087 quota_send_warning(dq->dq_type, dq->dq_id,
1088 dq->dq_sb->s_dev, warntype[i]);
1159 } 1089 }
1090 }
1160} 1091}
1161 1092
1162static int ignore_hardlimit(struct dquot *dquot) 1093static int ignore_hardlimit(struct dquot *dquot)
@@ -2607,12 +2538,6 @@ static int __init dquot_init(void)
2607 2538
2608 register_shrinker(&dqcache_shrinker); 2539 register_shrinker(&dqcache_shrinker);
2609 2540
2610#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
2611 if (genl_register_family(&quota_genl_family) != 0)
2612 printk(KERN_ERR
2613 "VFS: Failed to create quota netlink interface.\n");
2614#endif
2615
2616 return 0; 2541 return 0;
2617} 2542}
2618module_init(dquot_init); 2543module_init(dquot_init);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 95c5b42384b2..ee91e2756950 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -18,6 +18,8 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/quotaops.h> 19#include <linux/quotaops.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <net/netlink.h>
22#include <net/genetlink.h>
21 23
22/* Check validity of generic quotactl commands */ 24/* Check validity of generic quotactl commands */
23static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, 25static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
@@ -525,3 +527,94 @@ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
525 return ret; 527 return ret;
526} 528}
527#endif 529#endif
530
531
532#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
533
534/* Netlink family structure for quota */
535static struct genl_family quota_genl_family = {
536 .id = GENL_ID_GENERATE,
537 .hdrsize = 0,
538 .name = "VFS_DQUOT",
539 .version = 1,
540 .maxattr = QUOTA_NL_A_MAX,
541};
542
543/**
544 * quota_send_warning - Send warning to userspace about exceeded quota
545 * @type: The quota type: USRQQUOTA, GRPQUOTA,...
546 * @id: The user or group id of the quota that was exceeded
547 * @dev: The device on which the fs is mounted (sb->s_dev)
548 * @warntype: The type of the warning: QUOTA_NL_...
549 *
550 * This can be used by filesystems (including those which don't use
551 * dquot) to send a message to userspace relating to quota limits.
552 *
553 */
554
555void quota_send_warning(short type, unsigned int id, dev_t dev,
556 const char warntype)
557{
558 static atomic_t seq;
559 struct sk_buff *skb;
560 void *msg_head;
561 int ret;
562 int msg_size = 4 * nla_total_size(sizeof(u32)) +
563 2 * nla_total_size(sizeof(u64));
564
565 /* We have to allocate using GFP_NOFS as we are called from a
566 * filesystem performing write and thus further recursion into
567 * the fs to free some data could cause deadlocks. */
568 skb = genlmsg_new(msg_size, GFP_NOFS);
569 if (!skb) {
570 printk(KERN_ERR
571 "VFS: Not enough memory to send quota warning.\n");
572 return;
573 }
574 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
575 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
576 if (!msg_head) {
577 printk(KERN_ERR
578 "VFS: Cannot store netlink header in quota warning.\n");
579 goto err_out;
580 }
581 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
582 if (ret)
583 goto attr_err_out;
584 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
585 if (ret)
586 goto attr_err_out;
587 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
588 if (ret)
589 goto attr_err_out;
590 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
591 if (ret)
592 goto attr_err_out;
593 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
594 if (ret)
595 goto attr_err_out;
596 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
597 if (ret)
598 goto attr_err_out;
599 genlmsg_end(skb, msg_head);
600
601 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
602 return;
603attr_err_out:
604 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
605err_out:
606 kfree_skb(skb);
607}
608EXPORT_SYMBOL(quota_send_warning);
609
610static int __init quota_init(void)
611{
612 if (genl_register_family(&quota_genl_family) != 0)
613 printk(KERN_ERR
614 "VFS: Failed to create quota netlink interface.\n");
615 return 0;
616};
617
618module_init(quota_init);
619#endif
620
diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
index c6ad7c7e3ee9..05ac0fe9c4d3 100644
--- a/fs/xattr_acl.c
+++ b/fs/xattr_acl.c
@@ -36,7 +36,7 @@ posix_acl_from_xattr(const void *value, size_t size)
36 if (count == 0) 36 if (count == 0)
37 return NULL; 37 return NULL;
38 38
39 acl = posix_acl_alloc(count, GFP_KERNEL); 39 acl = posix_acl_alloc(count, GFP_NOFS);
40 if (!acl) 40 if (!acl)
41 return ERR_PTR(-ENOMEM); 41 return ERR_PTR(-ENOMEM);
42 acl_e = acl->a_entries; 42 acl_e = acl->a_entries;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 1feed71551c9..5a5385749e16 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -330,6 +330,7 @@ unifdef-y += scc.h
330unifdef-y += sched.h 330unifdef-y += sched.h
331unifdef-y += screen_info.h 331unifdef-y += screen_info.h
332unifdef-y += sdla.h 332unifdef-y += sdla.h
333unifdef-y += securebits.h
333unifdef-y += selinux_netlink.h 334unifdef-y += selinux_netlink.h
334unifdef-y += sem.h 335unifdef-y += sem.h
335unifdef-y += serial_core.h 336unifdef-y += serial_core.h
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index dd97fb8408a8..b10ec49ee2dd 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -53,6 +53,7 @@ extern void free_bootmem_node(pg_data_t *pgdat,
53 unsigned long addr, 53 unsigned long addr,
54 unsigned long size); 54 unsigned long size);
55extern void free_bootmem(unsigned long addr, unsigned long size); 55extern void free_bootmem(unsigned long addr, unsigned long size);
56extern void free_bootmem_late(unsigned long addr, unsigned long size);
56 57
57/* 58/*
58 * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, 59 * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
diff --git a/include/linux/capability.h b/include/linux/capability.h
index c8f2a5f70ed5..39e5ff512fbe 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -92,9 +92,7 @@ struct vfs_cap_data {
92#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 92#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
93#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 93#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
94 94
95#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
96extern int file_caps_enabled; 95extern int file_caps_enabled;
97#endif
98 96
99typedef struct kernel_cap_struct { 97typedef struct kernel_cap_struct {
100 __u32 cap[_KERNEL_CAPABILITY_U32S]; 98 __u32 cap[_KERNEL_CAPABILITY_U32S];
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index a3ed7cb8ca34..73dcf804bc94 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -79,6 +79,7 @@
79#define noinline __attribute__((noinline)) 79#define noinline __attribute__((noinline))
80#define __attribute_const__ __attribute__((__const__)) 80#define __attribute_const__ __attribute__((__const__))
81#define __maybe_unused __attribute__((unused)) 81#define __maybe_unused __attribute__((unused))
82#define __always_unused __attribute__((unused))
82 83
83#define __gcc_header(x) #x 84#define __gcc_header(x) #x
84#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) 85#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 450fa597c94d..ab3af40a53c6 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -36,4 +36,18 @@
36 the kernel context */ 36 the kernel context */
37#define __cold __attribute__((__cold__)) 37#define __cold __attribute__((__cold__))
38 38
39
40#if __GNUC_MINOR__ >= 5
41/*
42 * Mark a position in code as unreachable. This can be used to
43 * suppress control flow warnings after asm blocks that transfer
44 * control elsewhere.
45 *
46 * Early snapshots of gcc 4.5 don't support this and we can't detect
47 * this in the preprocessor, but we can live with this because they're
48 * unreleased. Really, we need to have autoconf for the kernel.
49 */
50#define unreachable() __builtin_unreachable()
51#endif
52
39#endif 53#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 04fb5135b4e1..acbd654cc850 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -144,6 +144,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
144# define barrier() __memory_barrier() 144# define barrier() __memory_barrier()
145#endif 145#endif
146 146
147/* Unreachable code */
148#ifndef unreachable
149# define unreachable() do { } while (1)
150#endif
151
147#ifndef RELOC_HIDE 152#ifndef RELOC_HIDE
148# define RELOC_HIDE(ptr, off) \ 153# define RELOC_HIDE(ptr, off) \
149 ({ unsigned long __ptr; \ 154 ({ unsigned long __ptr; \
@@ -213,6 +218,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
213# define __maybe_unused /* unimplemented */ 218# define __maybe_unused /* unimplemented */
214#endif 219#endif
215 220
221#ifndef __always_unused
222# define __always_unused /* unimplemented */
223#endif
224
216#ifndef noinline 225#ifndef noinline
217#define noinline 226#define noinline
218#endif 227#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 4a2b162c256a..5de4c9e5856d 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -208,16 +208,9 @@ struct dmar_atsr_unit {
208 u8 include_all:1; /* include all ports */ 208 u8 include_all:1; /* include all ports */
209}; 209};
210 210
211/* Intel DMAR initialization functions */
212extern int intel_iommu_init(void); 211extern int intel_iommu_init(void);
213#else 212#else /* !CONFIG_DMAR: */
214static inline int intel_iommu_init(void) 213static inline int intel_iommu_init(void) { return -ENODEV; }
215{ 214#endif /* CONFIG_DMAR */
216#ifdef CONFIG_INTR_REMAP 215
217 return dmar_dev_scope_init();
218#else
219 return -ENODEV;
220#endif
221}
222#endif /* !CONFIG_DMAR */
223#endif /* __DMAR_H__ */ 216#endif /* __DMAR_H__ */
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index b80c88dedbbb..81f90a59cda6 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -81,7 +81,11 @@ struct gfs2_meta_header {
81 __be32 mh_type; 81 __be32 mh_type;
82 __be64 __pad0; /* Was generation number in gfs1 */ 82 __be64 __pad0; /* Was generation number in gfs1 */
83 __be32 mh_format; 83 __be32 mh_format;
84 __be32 __pad1; /* Was incarnation number in gfs1 */ 84 /* This union is to keep userspace happy */
85 union {
86 __be32 mh_jid; /* Was incarnation number in gfs1 */
87 __be32 __pad1;
88 };
85}; 89};
86 90
87/* 91/*
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 6d527ee82b2b..d5b387669dab 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -139,10 +139,34 @@ static inline void account_system_vtime(struct task_struct *tsk)
139#endif 139#endif
140 140
141#if defined(CONFIG_NO_HZ) 141#if defined(CONFIG_NO_HZ)
142#if defined(CONFIG_TINY_RCU)
143extern void rcu_enter_nohz(void);
144extern void rcu_exit_nohz(void);
145
146static inline void rcu_irq_enter(void)
147{
148 rcu_exit_nohz();
149}
150
151static inline void rcu_irq_exit(void)
152{
153 rcu_enter_nohz();
154}
155
156static inline void rcu_nmi_enter(void)
157{
158}
159
160static inline void rcu_nmi_exit(void)
161{
162}
163
164#else
142extern void rcu_irq_enter(void); 165extern void rcu_irq_enter(void);
143extern void rcu_irq_exit(void); 166extern void rcu_irq_exit(void);
144extern void rcu_nmi_enter(void); 167extern void rcu_nmi_enter(void);
145extern void rcu_nmi_exit(void); 168extern void rcu_nmi_exit(void);
169#endif
146#else 170#else
147# define rcu_irq_enter() do { } while (0) 171# define rcu_irq_enter() do { } while (0)
148# define rcu_irq_exit() do { } while (0) 172# define rcu_irq_exit() do { } while (0)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 21a6f5d9af22..8d10aa7fd4c9 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -83,16 +83,12 @@ extern struct group_info init_groups;
83#define INIT_IDS 83#define INIT_IDS
84#endif 84#endif
85 85
86#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
87/* 86/*
88 * Because of the reduced scope of CAP_SETPCAP when filesystem 87 * Because of the reduced scope of CAP_SETPCAP when filesystem
89 * capabilities are in effect, it is safe to allow CAP_SETPCAP to 88 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
90 * be available in the default configuration. 89 * be available in the default configuration.
91 */ 90 */
92# define CAP_INIT_BSET CAP_FULL_SET 91# define CAP_INIT_BSET CAP_FULL_SET
93#else
94# define CAP_INIT_BSET CAP_INIT_EFF_SET
95#endif
96 92
97#ifdef CONFIG_TREE_PREEMPT_RCU 93#ifdef CONFIG_TREE_PREEMPT_RCU
98#define INIT_TASK_RCU_PREEMPT(tsk) \ 94#define INIT_TASK_RCU_PREEMPT(tsk) \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 7ca72b74eec7..75f3f00ac1e5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -603,12 +603,6 @@ static inline void init_irq_proc(void)
603} 603}
604#endif 604#endif
605 605
606#if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
607extern void debug_poll_all_shared_irqs(void);
608#else
609static inline void debug_poll_all_shared_irqs(void) { }
610#endif
611
612struct seq_file; 606struct seq_file;
613int show_interrupts(struct seq_file *p, void *v); 607int show_interrupts(struct seq_file *p, void *v);
614 608
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index b02a3f1d46a0..006bf45eae30 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -124,6 +124,6 @@
124 typecheck(unsigned long, flags); \ 124 typecheck(unsigned long, flags); \
125 raw_irqs_disabled_flags(flags); \ 125 raw_irqs_disabled_flags(flags); \
126}) 126})
127#endif /* CONFIG_X86 */ 127#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
128 128
129#endif 129#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f4e3184fa054..3fa4c590cf12 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -15,7 +15,6 @@
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/typecheck.h> 17#include <linux/typecheck.h>
18#include <linux/ratelimit.h>
19#include <linux/dynamic_debug.h> 18#include <linux/dynamic_debug.h>
20#include <asm/byteorder.h> 19#include <asm/byteorder.h>
21#include <asm/bug.h> 20#include <asm/bug.h>
@@ -241,8 +240,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
241asmlinkage int printk(const char * fmt, ...) 240asmlinkage int printk(const char * fmt, ...)
242 __attribute__ ((format (printf, 1, 2))) __cold; 241 __attribute__ ((format (printf, 1, 2))) __cold;
243 242
244extern struct ratelimit_state printk_ratelimit_state; 243extern int __printk_ratelimit(const char *func);
245extern int printk_ratelimit(void); 244#define printk_ratelimit() __printk_ratelimit(__func__)
246extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, 245extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
247 unsigned int interval_msec); 246 unsigned int interval_msec);
248 247
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 190c37854870..f78f83d7663f 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -26,14 +26,15 @@
26 26
27/* Auxiliary data to use in generating the audit record. */ 27/* Auxiliary data to use in generating the audit record. */
28struct common_audit_data { 28struct common_audit_data {
29 char type; 29 char type;
30#define LSM_AUDIT_DATA_FS 1 30#define LSM_AUDIT_DATA_FS 1
31#define LSM_AUDIT_DATA_NET 2 31#define LSM_AUDIT_DATA_NET 2
32#define LSM_AUDIT_DATA_CAP 3 32#define LSM_AUDIT_DATA_CAP 3
33#define LSM_AUDIT_DATA_IPC 4 33#define LSM_AUDIT_DATA_IPC 4
34#define LSM_AUDIT_DATA_TASK 5 34#define LSM_AUDIT_DATA_TASK 5
35#define LSM_AUDIT_DATA_KEY 6 35#define LSM_AUDIT_DATA_KEY 6
36#define LSM_AUDIT_NO_AUDIT 7 36#define LSM_AUDIT_NO_AUDIT 7
37#define LSM_AUDIT_DATA_KMOD 8
37 struct task_struct *tsk; 38 struct task_struct *tsk;
38 union { 39 union {
39 struct { 40 struct {
@@ -66,6 +67,7 @@ struct common_audit_data {
66 char *key_desc; 67 char *key_desc;
67 } key_struct; 68 } key_struct;
68#endif 69#endif
70 char *kmod_name;
69 } u; 71 } u;
70 /* this union contains LSM specific data */ 72 /* this union contains LSM specific data */
71 union { 73 union {
diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h
index f95466343fb2..955d30fc6a27 100644
--- a/include/linux/mfd/wm831x/regulator.h
+++ b/include/linux/mfd/wm831x/regulator.h
@@ -1212,7 +1212,7 @@
1212#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ 1212#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */
1213#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ 1213#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */
1214 1214
1215#define WM831X_ISINK_MAX_ISEL 56 1215#define WM831X_ISINK_MAX_ISEL 55
1216extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL]; 1216extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
1217 1217
1218#endif 1218#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 6ce87663551c..5e8083cacc8b 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -280,6 +280,7 @@ extern int kernel_sock_shutdown(struct socket *sock,
280 280
281#ifdef CONFIG_SYSCTL 281#ifdef CONFIG_SYSCTL
282#include <linux/sysctl.h> 282#include <linux/sysctl.h>
283#include <linux/ratelimit.h>
283extern struct ratelimit_state net_ratelimit_state; 284extern struct ratelimit_state net_ratelimit_state;
284#endif 285#endif
285 286
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 84cf1f3b7838..daecca3c8300 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1633,6 +1633,8 @@
1633#define PCI_DEVICE_ID_O2_6730 0x673a 1633#define PCI_DEVICE_ID_O2_6730 0x673a
1634#define PCI_DEVICE_ID_O2_6832 0x6832 1634#define PCI_DEVICE_ID_O2_6832 0x6832
1635#define PCI_DEVICE_ID_O2_6836 0x6836 1635#define PCI_DEVICE_ID_O2_6836 0x6836
1636#define PCI_DEVICE_ID_O2_6812 0x6872
1637#define PCI_DEVICE_ID_O2_6933 0x6933
1636 1638
1637#define PCI_VENDOR_ID_3DFX 0x121a 1639#define PCI_VENDOR_ID_3DFX 0x121a
1638#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 1640#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 065a3652a3ea..67608161df6b 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -147,6 +147,20 @@ static inline void forget_cached_acl(struct inode *inode, int type)
147 if (old != ACL_NOT_CACHED) 147 if (old != ACL_NOT_CACHED)
148 posix_acl_release(old); 148 posix_acl_release(old);
149} 149}
150
151static inline void forget_all_cached_acls(struct inode *inode)
152{
153 struct posix_acl *old_access, *old_default;
154 spin_lock(&inode->i_lock);
155 old_access = inode->i_acl;
156 old_default = inode->i_default_acl;
157 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
158 spin_unlock(&inode->i_lock);
159 if (old_access != ACL_NOT_CACHED)
160 posix_acl_release(old_access);
161 if (old_default != ACL_NOT_CACHED)
162 posix_acl_release(old_default);
163}
150#endif 164#endif
151 165
152static inline void cache_no_acl(struct inode *inode) 166static inline void cache_no_acl(struct inode *inode)
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 78c48895b12a..ce9a9b2e5cd4 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -376,6 +376,17 @@ static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
376 return flags >> _DQUOT_STATE_FLAGS; 376 return flags >> _DQUOT_STATE_FLAGS;
377} 377}
378 378
379#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
380extern void quota_send_warning(short type, unsigned int id, dev_t dev,
381 const char warntype);
382#else
383static inline void quota_send_warning(short type, unsigned int id, dev_t dev,
384 const char warntype)
385{
386 return;
387}
388#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */
389
379struct quota_info { 390struct quota_info {
380 unsigned int flags; /* Flags for diskquotas on this device */ 391 unsigned int flags; /* Flags for diskquotas on this device */
381 struct mutex dqio_mutex; /* lock device while I/O in progress */ 392 struct mutex dqio_mutex; /* lock device while I/O in progress */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 00044b856453..668cf1bef030 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -1,20 +1,31 @@
1#ifndef _LINUX_RATELIMIT_H 1#ifndef _LINUX_RATELIMIT_H
2#define _LINUX_RATELIMIT_H 2#define _LINUX_RATELIMIT_H
3
3#include <linux/param.h> 4#include <linux/param.h>
5#include <linux/spinlock_types.h>
4 6
5#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) 7#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
6#define DEFAULT_RATELIMIT_BURST 10 8#define DEFAULT_RATELIMIT_BURST 10
7 9
8struct ratelimit_state { 10struct ratelimit_state {
9 int interval; 11 spinlock_t lock; /* protect the state */
10 int burst; 12
11 int printed; 13 int interval;
12 int missed; 14 int burst;
13 unsigned long begin; 15 int printed;
16 int missed;
17 unsigned long begin;
14}; 18};
15 19
16#define DEFINE_RATELIMIT_STATE(name, interval, burst) \ 20#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
17 struct ratelimit_state name = {interval, burst,} 21 \
22 struct ratelimit_state name = { \
23 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
24 .interval = interval_init, \
25 .burst = burst_init, \
26 }
27
28extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
29#define __ratelimit(state) ___ratelimit(state, __func__)
18 30
19extern int __ratelimit(struct ratelimit_state *rs); 31#endif /* _LINUX_RATELIMIT_H */
20#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 3ebd0b7bcb08..24440f4bf476 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,11 +52,6 @@ struct rcu_head {
52}; 52};
53 53
54/* Exported common interfaces */ 54/* Exported common interfaces */
55#ifdef CONFIG_TREE_PREEMPT_RCU
56extern void synchronize_rcu(void);
57#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
58#define synchronize_rcu synchronize_sched
59#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
60extern void synchronize_rcu_bh(void); 55extern void synchronize_rcu_bh(void);
61extern void synchronize_sched(void); 56extern void synchronize_sched(void);
62extern void rcu_barrier(void); 57extern void rcu_barrier(void);
@@ -67,12 +62,11 @@ extern int sched_expedited_torture_stats(char *page);
67 62
68/* Internal to kernel */ 63/* Internal to kernel */
69extern void rcu_init(void); 64extern void rcu_init(void);
70extern void rcu_scheduler_starting(void);
71extern int rcu_needs_cpu(int cpu);
72extern int rcu_scheduler_active;
73 65
74#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 66#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
75#include <linux/rcutree.h> 67#include <linux/rcutree.h>
68#elif defined(CONFIG_TINY_RCU)
69#include <linux/rcutiny.h>
76#else 70#else
77#error "Unknown RCU implementation specified to kernel configuration" 71#error "Unknown RCU implementation specified to kernel configuration"
78#endif 72#endif
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
new file mode 100644
index 000000000000..c4ba9a78721e
--- /dev/null
+++ b/include/linux/rcutiny.h
@@ -0,0 +1,104 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
24 */
25#ifndef __LINUX_TINY_H
26#define __LINUX_TINY_H
27
28#include <linux/cache.h>
29
30void rcu_sched_qs(int cpu);
31void rcu_bh_qs(int cpu);
32
33#define __rcu_read_lock() preempt_disable()
34#define __rcu_read_unlock() preempt_enable()
35#define __rcu_read_lock_bh() local_bh_disable()
36#define __rcu_read_unlock_bh() local_bh_enable()
37#define call_rcu_sched call_rcu
38
39#define rcu_init_sched() do { } while (0)
40extern void rcu_check_callbacks(int cpu, int user);
41
42static inline int rcu_needs_cpu(int cpu)
43{
44 return 0;
45}
46
47/*
48 * Return the number of grace periods.
49 */
50static inline long rcu_batches_completed(void)
51{
52 return 0;
53}
54
55/*
56 * Return the number of bottom-half grace periods.
57 */
58static inline long rcu_batches_completed_bh(void)
59{
60 return 0;
61}
62
63extern int rcu_expedited_torture_stats(char *page);
64
65#define synchronize_rcu synchronize_sched
66
67static inline void synchronize_rcu_expedited(void)
68{
69 synchronize_sched();
70}
71
72static inline void synchronize_rcu_bh_expedited(void)
73{
74 synchronize_sched();
75}
76
77struct notifier_block;
78
79#ifdef CONFIG_NO_HZ
80
81extern void rcu_enter_nohz(void);
82extern void rcu_exit_nohz(void);
83
84#else /* #ifdef CONFIG_NO_HZ */
85
86static inline void rcu_enter_nohz(void)
87{
88}
89
90static inline void rcu_exit_nohz(void)
91{
92}
93
94#endif /* #else #ifdef CONFIG_NO_HZ */
95
96static inline void rcu_scheduler_starting(void)
97{
98}
99
100static inline void exit_rcu(void)
101{
102}
103
104#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 9642c6bcb399..c93eee5911b0 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -34,15 +34,15 @@ struct notifier_block;
34 34
35extern void rcu_sched_qs(int cpu); 35extern void rcu_sched_qs(int cpu);
36extern void rcu_bh_qs(int cpu); 36extern void rcu_bh_qs(int cpu);
37extern int rcu_cpu_notify(struct notifier_block *self,
38 unsigned long action, void *hcpu);
39extern int rcu_needs_cpu(int cpu); 37extern int rcu_needs_cpu(int cpu);
38extern void rcu_scheduler_starting(void);
40extern int rcu_expedited_torture_stats(char *page); 39extern int rcu_expedited_torture_stats(char *page);
41 40
42#ifdef CONFIG_TREE_PREEMPT_RCU 41#ifdef CONFIG_TREE_PREEMPT_RCU
43 42
44extern void __rcu_read_lock(void); 43extern void __rcu_read_lock(void);
45extern void __rcu_read_unlock(void); 44extern void __rcu_read_unlock(void);
45extern void synchronize_rcu(void);
46extern void exit_rcu(void); 46extern void exit_rcu(void);
47 47
48#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 48#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
@@ -57,7 +57,7 @@ static inline void __rcu_read_unlock(void)
57 preempt_enable(); 57 preempt_enable();
58} 58}
59 59
60#define __synchronize_sched() synchronize_rcu() 60#define synchronize_rcu synchronize_sched
61 61
62static inline void exit_rcu(void) 62static inline void exit_rcu(void)
63{ 63{
@@ -83,7 +83,6 @@ static inline void synchronize_rcu_bh_expedited(void)
83 synchronize_sched_expedited(); 83 synchronize_sched_expedited();
84} 84}
85 85
86extern void __rcu_init(void);
87extern void rcu_check_callbacks(int cpu, int user); 86extern void rcu_check_callbacks(int cpu, int user);
88 87
89extern long rcu_batches_completed(void); 88extern long rcu_batches_completed(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 75e6e60bf583..882dc48163b4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1421,17 +1421,17 @@ struct task_struct {
1421#endif 1421#endif
1422#ifdef CONFIG_TRACE_IRQFLAGS 1422#ifdef CONFIG_TRACE_IRQFLAGS
1423 unsigned int irq_events; 1423 unsigned int irq_events;
1424 int hardirqs_enabled;
1425 unsigned long hardirq_enable_ip; 1424 unsigned long hardirq_enable_ip;
1426 unsigned int hardirq_enable_event;
1427 unsigned long hardirq_disable_ip; 1425 unsigned long hardirq_disable_ip;
1426 unsigned int hardirq_enable_event;
1428 unsigned int hardirq_disable_event; 1427 unsigned int hardirq_disable_event;
1429 int softirqs_enabled; 1428 int hardirqs_enabled;
1429 int hardirq_context;
1430 unsigned long softirq_disable_ip; 1430 unsigned long softirq_disable_ip;
1431 unsigned int softirq_disable_event;
1432 unsigned long softirq_enable_ip; 1431 unsigned long softirq_enable_ip;
1432 unsigned int softirq_disable_event;
1433 unsigned int softirq_enable_event; 1433 unsigned int softirq_enable_event;
1434 int hardirq_context; 1434 int softirqs_enabled;
1435 int softirq_context; 1435 int softirq_context;
1436#endif 1436#endif
1437#ifdef CONFIG_LOCKDEP 1437#ifdef CONFIG_LOCKDEP
@@ -2086,11 +2086,18 @@ static inline int is_si_special(const struct siginfo *info)
2086 return info <= SEND_SIG_FORCED; 2086 return info <= SEND_SIG_FORCED;
2087} 2087}
2088 2088
2089/* True if we are on the alternate signal stack. */ 2089/*
2090 2090 * True if we are on the alternate signal stack.
2091 */
2091static inline int on_sig_stack(unsigned long sp) 2092static inline int on_sig_stack(unsigned long sp)
2092{ 2093{
2093 return (sp - current->sas_ss_sp < current->sas_ss_size); 2094#ifdef CONFIG_STACK_GROWSUP
2095 return sp >= current->sas_ss_sp &&
2096 sp - current->sas_ss_sp < current->sas_ss_size;
2097#else
2098 return sp > current->sas_ss_sp &&
2099 sp - current->sas_ss_sp <= current->sas_ss_size;
2100#endif
2094} 2101}
2095 2102
2096static inline int sas_ss_flags(unsigned long sp) 2103static inline int sas_ss_flags(unsigned long sp)
diff --git a/include/linux/securebits.h b/include/linux/securebits.h
index d2c5ed845bcc..33406174cbe8 100644
--- a/include/linux/securebits.h
+++ b/include/linux/securebits.h
@@ -1,6 +1,15 @@
1#ifndef _LINUX_SECUREBITS_H 1#ifndef _LINUX_SECUREBITS_H
2#define _LINUX_SECUREBITS_H 1 2#define _LINUX_SECUREBITS_H 1
3 3
4/* Each securesetting is implemented using two bits. One bit specifies
5 whether the setting is on or off. The other bit specify whether the
6 setting is locked or not. A setting which is locked cannot be
7 changed from user-level. */
8#define issecure_mask(X) (1 << (X))
9#ifdef __KERNEL__
10#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits))
11#endif
12
4#define SECUREBITS_DEFAULT 0x00000000 13#define SECUREBITS_DEFAULT 0x00000000
5 14
6/* When set UID 0 has no special privileges. When unset, we support 15/* When set UID 0 has no special privileges. When unset, we support
@@ -12,6 +21,9 @@
12#define SECURE_NOROOT 0 21#define SECURE_NOROOT 0
13#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */ 22#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */
14 23
24#define SECBIT_NOROOT (issecure_mask(SECURE_NOROOT))
25#define SECBIT_NOROOT_LOCKED (issecure_mask(SECURE_NOROOT_LOCKED))
26
15/* When set, setuid to/from uid 0 does not trigger capability-"fixup". 27/* When set, setuid to/from uid 0 does not trigger capability-"fixup".
16 When unset, to provide compatiblility with old programs relying on 28 When unset, to provide compatiblility with old programs relying on
17 set*uid to gain/lose privilege, transitions to/from uid 0 cause 29 set*uid to gain/lose privilege, transitions to/from uid 0 cause
@@ -19,6 +31,10 @@
19#define SECURE_NO_SETUID_FIXUP 2 31#define SECURE_NO_SETUID_FIXUP 2
20#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */ 32#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */
21 33
34#define SECBIT_NO_SETUID_FIXUP (issecure_mask(SECURE_NO_SETUID_FIXUP))
35#define SECBIT_NO_SETUID_FIXUP_LOCKED \
36 (issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED))
37
22/* When set, a process can retain its capabilities even after 38/* When set, a process can retain its capabilities even after
23 transitioning to a non-root user (the set-uid fixup suppressed by 39 transitioning to a non-root user (the set-uid fixup suppressed by
24 bit 2). Bit-4 is cleared when a process calls exec(); setting both 40 bit 2). Bit-4 is cleared when a process calls exec(); setting both
@@ -27,12 +43,8 @@
27#define SECURE_KEEP_CAPS 4 43#define SECURE_KEEP_CAPS 4
28#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */ 44#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */
29 45
30/* Each securesetting is implemented using two bits. One bit specifies 46#define SECBIT_KEEP_CAPS (issecure_mask(SECURE_KEEP_CAPS))
31 whether the setting is on or off. The other bit specify whether the 47#define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED))
32 setting is locked or not. A setting which is locked cannot be
33 changed from user-level. */
34#define issecure_mask(X) (1 << (X))
35#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits))
36 48
37#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ 49#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
38 issecure_mask(SECURE_NO_SETUID_FIXUP) | \ 50 issecure_mask(SECURE_NO_SETUID_FIXUP) | \
diff --git a/include/linux/security.h b/include/linux/security.h
index 239e40d0450b..466cbadbd1ef 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -447,6 +447,22 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
447 * @new_dir contains the path structure for parent of the new link. 447 * @new_dir contains the path structure for parent of the new link.
448 * @new_dentry contains the dentry structure of the new link. 448 * @new_dentry contains the dentry structure of the new link.
449 * Return 0 if permission is granted. 449 * Return 0 if permission is granted.
450 * @path_chmod:
451 * Check for permission to change DAC's permission of a file or directory.
452 * @dentry contains the dentry structure.
453 * @mnt contains the vfsmnt structure.
454 * @mode contains DAC's mode.
455 * Return 0 if permission is granted.
456 * @path_chown:
457 * Check for permission to change owner/group of a file or directory.
458 * @path contains the path structure.
459 * @uid contains new owner's ID.
460 * @gid contains new group's ID.
461 * Return 0 if permission is granted.
462 * @path_chroot:
463 * Check for permission to change root directory.
464 * @path contains the path structure.
465 * Return 0 if permission is granted.
450 * @inode_readlink: 466 * @inode_readlink:
451 * Check the permission to read the symbolic link. 467 * Check the permission to read the symbolic link.
452 * @dentry contains the dentry structure for the file link. 468 * @dentry contains the dentry structure for the file link.
@@ -690,6 +706,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
690 * @kernel_module_request: 706 * @kernel_module_request:
691 * Ability to trigger the kernel to automatically upcall to userspace for 707 * Ability to trigger the kernel to automatically upcall to userspace for
692 * userspace to load a kernel module with the given name. 708 * userspace to load a kernel module with the given name.
709 * @kmod_name name of the module requested by the kernel
693 * Return 0 if successful. 710 * Return 0 if successful.
694 * @task_setuid: 711 * @task_setuid:
695 * Check permission before setting one or more of the user identity 712 * Check permission before setting one or more of the user identity
@@ -1488,6 +1505,10 @@ struct security_operations {
1488 struct dentry *new_dentry); 1505 struct dentry *new_dentry);
1489 int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, 1506 int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
1490 struct path *new_dir, struct dentry *new_dentry); 1507 struct path *new_dir, struct dentry *new_dentry);
1508 int (*path_chmod) (struct dentry *dentry, struct vfsmount *mnt,
1509 mode_t mode);
1510 int (*path_chown) (struct path *path, uid_t uid, gid_t gid);
1511 int (*path_chroot) (struct path *path);
1491#endif 1512#endif
1492 1513
1493 int (*inode_alloc_security) (struct inode *inode); 1514 int (*inode_alloc_security) (struct inode *inode);
@@ -1557,7 +1578,7 @@ struct security_operations {
1557 void (*cred_transfer)(struct cred *new, const struct cred *old); 1578 void (*cred_transfer)(struct cred *new, const struct cred *old);
1558 int (*kernel_act_as)(struct cred *new, u32 secid); 1579 int (*kernel_act_as)(struct cred *new, u32 secid);
1559 int (*kernel_create_files_as)(struct cred *new, struct inode *inode); 1580 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1560 int (*kernel_module_request)(void); 1581 int (*kernel_module_request)(char *kmod_name);
1561 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); 1582 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
1562 int (*task_fix_setuid) (struct cred *new, const struct cred *old, 1583 int (*task_fix_setuid) (struct cred *new, const struct cred *old,
1563 int flags); 1584 int flags);
@@ -1822,7 +1843,7 @@ void security_commit_creds(struct cred *new, const struct cred *old);
1822void security_transfer_creds(struct cred *new, const struct cred *old); 1843void security_transfer_creds(struct cred *new, const struct cred *old);
1823int security_kernel_act_as(struct cred *new, u32 secid); 1844int security_kernel_act_as(struct cred *new, u32 secid);
1824int security_kernel_create_files_as(struct cred *new, struct inode *inode); 1845int security_kernel_create_files_as(struct cred *new, struct inode *inode);
1825int security_kernel_module_request(void); 1846int security_kernel_module_request(char *kmod_name);
1826int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); 1847int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
1827int security_task_fix_setuid(struct cred *new, const struct cred *old, 1848int security_task_fix_setuid(struct cred *new, const struct cred *old,
1828 int flags); 1849 int flags);
@@ -2387,7 +2408,7 @@ static inline int security_kernel_create_files_as(struct cred *cred,
2387 return 0; 2408 return 0;
2388} 2409}
2389 2410
2390static inline int security_kernel_module_request(void) 2411static inline int security_kernel_module_request(char *kmod_name)
2391{ 2412{
2392 return 0; 2413 return 0;
2393} 2414}
@@ -2952,6 +2973,10 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir,
2952 struct dentry *new_dentry); 2973 struct dentry *new_dentry);
2953int security_path_rename(struct path *old_dir, struct dentry *old_dentry, 2974int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
2954 struct path *new_dir, struct dentry *new_dentry); 2975 struct path *new_dir, struct dentry *new_dentry);
2976int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
2977 mode_t mode);
2978int security_path_chown(struct path *path, uid_t uid, gid_t gid);
2979int security_path_chroot(struct path *path);
2955#else /* CONFIG_SECURITY_PATH */ 2980#else /* CONFIG_SECURITY_PATH */
2956static inline int security_path_unlink(struct path *dir, struct dentry *dentry) 2981static inline int security_path_unlink(struct path *dir, struct dentry *dentry)
2957{ 2982{
@@ -3001,6 +3026,23 @@ static inline int security_path_rename(struct path *old_dir,
3001{ 3026{
3002 return 0; 3027 return 0;
3003} 3028}
3029
3030static inline int security_path_chmod(struct dentry *dentry,
3031 struct vfsmount *mnt,
3032 mode_t mode)
3033{
3034 return 0;
3035}
3036
3037static inline int security_path_chown(struct path *path, uid_t uid, gid_t gid)
3038{
3039 return 0;
3040}
3041
3042static inline int security_path_chroot(struct path *path)
3043{
3044 return 0;
3045}
3004#endif /* CONFIG_SECURITY_PATH */ 3046#endif /* CONFIG_SECURITY_PATH */
3005 3047
3006#ifdef CONFIG_KEYS 3048#ifdef CONFIG_KEYS
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 39c64bae776d..7a0570e6a596 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -76,6 +76,9 @@ void smp_call_function_many(const struct cpumask *mask,
76void __smp_call_function_single(int cpuid, struct call_single_data *data, 76void __smp_call_function_single(int cpuid, struct call_single_data *data,
77 int wait); 77 int wait);
78 78
79int smp_call_function_any(const struct cpumask *mask,
80 void (*func)(void *info), void *info, int wait);
81
79/* 82/*
80 * Generic and arch helpers 83 * Generic and arch helpers
81 */ 84 */
@@ -137,9 +140,15 @@ static inline void smp_send_reschedule(int cpu) { }
137#define smp_prepare_boot_cpu() do {} while (0) 140#define smp_prepare_boot_cpu() do {} while (0)
138#define smp_call_function_many(mask, func, info, wait) \ 141#define smp_call_function_many(mask, func, info, wait) \
139 (up_smp_call_function(func, info)) 142 (up_smp_call_function(func, info))
140static inline void init_call_single_data(void) 143static inline void init_call_single_data(void) { }
144
145static inline int
146smp_call_function_any(const struct cpumask *mask, void (*func)(void *info),
147 void *info, int wait)
141{ 148{
149 return smp_call_function_single(0, func, info, wait);
142} 150}
151
143#endif /* !SMP */ 152#endif /* !SMP */
144 153
145/* 154/*
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 813be59bf345..2ea1dd1ba21c 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -24,8 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
24 return 0; 24 return 0;
25} 25}
26 26
27extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); 27extern void __lockfunc
28extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); 28_lock_kernel(const char *func, const char *file, int line)
29__acquires(kernel_lock);
30
31extern void __lockfunc
32_unlock_kernel(const char *func, const char *file, int line)
33__releases(kernel_lock);
34
35#define lock_kernel() do { \
36 _lock_kernel(__func__, __FILE__, __LINE__); \
37} while (0)
38
39#define unlock_kernel() do { \
40 _unlock_kernel(__func__, __FILE__, __LINE__); \
41} while (0)
29 42
30/* 43/*
31 * Various legacy drivers don't really need the BKL in a specific 44 * Various legacy drivers don't really need the BKL in a specific
@@ -41,8 +54,8 @@ static inline void cycle_kernel_lock(void)
41 54
42#else 55#else
43 56
44#define lock_kernel() do { } while(0) 57#define lock_kernel()
45#define unlock_kernel() do { } while(0) 58#define unlock_kernel()
46#define release_kernel_lock(task) do { } while(0) 59#define release_kernel_lock(task) do { } while(0)
47#define cycle_kernel_lock() do { } while(0) 60#define cycle_kernel_lock() do { } while(0)
48#define reacquire_kernel_lock(task) 0 61#define reacquire_kernel_lock(task) 0
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index f0ca7a7a1757..71dccfeb0d88 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -79,8 +79,6 @@
79 */ 79 */
80#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
81 81
82extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
83
84/* 82/*
85 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
86 */ 84 */
@@ -102,7 +100,7 @@ do { \
102 100
103#else 101#else
104# define spin_lock_init(lock) \ 102# define spin_lock_init(lock) \
105 do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) 103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
106#endif 104#endif
107 105
108#ifdef CONFIG_DEBUG_SPINLOCK 106#ifdef CONFIG_DEBUG_SPINLOCK
@@ -116,7 +114,7 @@ do { \
116} while (0) 114} while (0)
117#else 115#else
118# define rwlock_init(lock) \ 116# define rwlock_init(lock) \
119 do { *(lock) = RW_LOCK_UNLOCKED; } while (0) 117 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
120#endif 118#endif
121 119
122#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 120#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 7a7e18fc2415..8264a7f459bc 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -60,137 +60,118 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 __releases(lock); 61 __releases(lock);
62 62
63/* 63#ifdef CONFIG_INLINE_SPIN_LOCK
64 * We inline the unlock functions in the nondebug case:
65 */
66#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
67#define __always_inline__spin_unlock
68#define __always_inline__read_unlock
69#define __always_inline__write_unlock
70#define __always_inline__spin_unlock_irq
71#define __always_inline__read_unlock_irq
72#define __always_inline__write_unlock_irq
73#endif
74
75#ifndef CONFIG_DEBUG_SPINLOCK
76#ifndef CONFIG_GENERIC_LOCKBREAK
77
78#ifdef __always_inline__spin_lock
79#define _spin_lock(lock) __spin_lock(lock) 64#define _spin_lock(lock) __spin_lock(lock)
80#endif 65#endif
81 66
82#ifdef __always_inline__read_lock 67#ifdef CONFIG_INLINE_READ_LOCK
83#define _read_lock(lock) __read_lock(lock) 68#define _read_lock(lock) __read_lock(lock)
84#endif 69#endif
85 70
86#ifdef __always_inline__write_lock 71#ifdef CONFIG_INLINE_WRITE_LOCK
87#define _write_lock(lock) __write_lock(lock) 72#define _write_lock(lock) __write_lock(lock)
88#endif 73#endif
89 74
90#ifdef __always_inline__spin_lock_bh 75#ifdef CONFIG_INLINE_SPIN_LOCK_BH
91#define _spin_lock_bh(lock) __spin_lock_bh(lock) 76#define _spin_lock_bh(lock) __spin_lock_bh(lock)
92#endif 77#endif
93 78
94#ifdef __always_inline__read_lock_bh 79#ifdef CONFIG_INLINE_READ_LOCK_BH
95#define _read_lock_bh(lock) __read_lock_bh(lock) 80#define _read_lock_bh(lock) __read_lock_bh(lock)
96#endif 81#endif
97 82
98#ifdef __always_inline__write_lock_bh 83#ifdef CONFIG_INLINE_WRITE_LOCK_BH
99#define _write_lock_bh(lock) __write_lock_bh(lock) 84#define _write_lock_bh(lock) __write_lock_bh(lock)
100#endif 85#endif
101 86
102#ifdef __always_inline__spin_lock_irq 87#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
103#define _spin_lock_irq(lock) __spin_lock_irq(lock) 88#define _spin_lock_irq(lock) __spin_lock_irq(lock)
104#endif 89#endif
105 90
106#ifdef __always_inline__read_lock_irq 91#ifdef CONFIG_INLINE_READ_LOCK_IRQ
107#define _read_lock_irq(lock) __read_lock_irq(lock) 92#define _read_lock_irq(lock) __read_lock_irq(lock)
108#endif 93#endif
109 94
110#ifdef __always_inline__write_lock_irq 95#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
111#define _write_lock_irq(lock) __write_lock_irq(lock) 96#define _write_lock_irq(lock) __write_lock_irq(lock)
112#endif 97#endif
113 98
114#ifdef __always_inline__spin_lock_irqsave 99#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
115#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) 100#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
116#endif 101#endif
117 102
118#ifdef __always_inline__read_lock_irqsave 103#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
119#define _read_lock_irqsave(lock) __read_lock_irqsave(lock) 104#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
120#endif 105#endif
121 106
122#ifdef __always_inline__write_lock_irqsave 107#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
123#define _write_lock_irqsave(lock) __write_lock_irqsave(lock) 108#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
124#endif 109#endif
125 110
126#endif /* !CONFIG_GENERIC_LOCKBREAK */ 111#ifdef CONFIG_INLINE_SPIN_TRYLOCK
127
128#ifdef __always_inline__spin_trylock
129#define _spin_trylock(lock) __spin_trylock(lock) 112#define _spin_trylock(lock) __spin_trylock(lock)
130#endif 113#endif
131 114
132#ifdef __always_inline__read_trylock 115#ifdef CONFIG_INLINE_READ_TRYLOCK
133#define _read_trylock(lock) __read_trylock(lock) 116#define _read_trylock(lock) __read_trylock(lock)
134#endif 117#endif
135 118
136#ifdef __always_inline__write_trylock 119#ifdef CONFIG_INLINE_WRITE_TRYLOCK
137#define _write_trylock(lock) __write_trylock(lock) 120#define _write_trylock(lock) __write_trylock(lock)
138#endif 121#endif
139 122
140#ifdef __always_inline__spin_trylock_bh 123#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
141#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) 124#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
142#endif 125#endif
143 126
144#ifdef __always_inline__spin_unlock 127#ifdef CONFIG_INLINE_SPIN_UNLOCK
145#define _spin_unlock(lock) __spin_unlock(lock) 128#define _spin_unlock(lock) __spin_unlock(lock)
146#endif 129#endif
147 130
148#ifdef __always_inline__read_unlock 131#ifdef CONFIG_INLINE_READ_UNLOCK
149#define _read_unlock(lock) __read_unlock(lock) 132#define _read_unlock(lock) __read_unlock(lock)
150#endif 133#endif
151 134
152#ifdef __always_inline__write_unlock 135#ifdef CONFIG_INLINE_WRITE_UNLOCK
153#define _write_unlock(lock) __write_unlock(lock) 136#define _write_unlock(lock) __write_unlock(lock)
154#endif 137#endif
155 138
156#ifdef __always_inline__spin_unlock_bh 139#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
157#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) 140#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
158#endif 141#endif
159 142
160#ifdef __always_inline__read_unlock_bh 143#ifdef CONFIG_INLINE_READ_UNLOCK_BH
161#define _read_unlock_bh(lock) __read_unlock_bh(lock) 144#define _read_unlock_bh(lock) __read_unlock_bh(lock)
162#endif 145#endif
163 146
164#ifdef __always_inline__write_unlock_bh 147#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
165#define _write_unlock_bh(lock) __write_unlock_bh(lock) 148#define _write_unlock_bh(lock) __write_unlock_bh(lock)
166#endif 149#endif
167 150
168#ifdef __always_inline__spin_unlock_irq 151#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
169#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) 152#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
170#endif 153#endif
171 154
172#ifdef __always_inline__read_unlock_irq 155#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
173#define _read_unlock_irq(lock) __read_unlock_irq(lock) 156#define _read_unlock_irq(lock) __read_unlock_irq(lock)
174#endif 157#endif
175 158
176#ifdef __always_inline__write_unlock_irq 159#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
177#define _write_unlock_irq(lock) __write_unlock_irq(lock) 160#define _write_unlock_irq(lock) __write_unlock_irq(lock)
178#endif 161#endif
179 162
180#ifdef __always_inline__spin_unlock_irqrestore 163#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
181#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) 164#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
182#endif 165#endif
183 166
184#ifdef __always_inline__read_unlock_irqrestore 167#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
185#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) 168#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
186#endif 169#endif
187 170
188#ifdef __always_inline__write_unlock_irqrestore 171#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
189#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) 172#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
190#endif 173#endif
191 174
192#endif /* CONFIG_DEBUG_SPINLOCK */
193
194static inline int __spin_trylock(spinlock_t *lock) 175static inline int __spin_trylock(spinlock_t *lock)
195{ 176{
196 preempt_disable(); 177 preempt_disable();
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index aca0eee53930..4765d97dcafb 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -48,6 +48,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp);
48int srcu_read_lock(struct srcu_struct *sp) __acquires(sp); 48int srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
49void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 49void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
50void synchronize_srcu(struct srcu_struct *sp); 50void synchronize_srcu(struct srcu_struct *sp);
51void synchronize_srcu_expedited(struct srcu_struct *sp);
51long srcu_batches_completed(struct srcu_struct *sp); 52long srcu_batches_completed(struct srcu_struct *sp);
52 53
53#endif 54#endif
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 73b1f1cec423..febedcf67c7e 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -7,6 +7,8 @@ struct device;
7struct dma_attrs; 7struct dma_attrs;
8struct scatterlist; 8struct scatterlist;
9 9
10extern int swiotlb_force;
11
10/* 12/*
11 * Maximum allowable number of contiguous slabs to map, 13 * Maximum allowable number of contiguous slabs to map,
12 * must be a power of 2. What is the appropriate value ? 14 * must be a power of 2. What is the appropriate value ?
@@ -20,8 +22,7 @@ struct scatterlist;
20 */ 22 */
21#define IO_TLB_SHIFT 11 23#define IO_TLB_SHIFT 11
22 24
23extern void 25extern void swiotlb_init(int verbose);
24swiotlb_init(void);
25 26
26extern void 27extern void
27*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 28*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -88,4 +89,11 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
88extern int 89extern int
89swiotlb_dma_supported(struct device *hwdev, u64 mask); 90swiotlb_dma_supported(struct device *hwdev, u64 mask);
90 91
92#ifdef CONFIG_SWIOTLB
93extern void __init swiotlb_free(void);
94#else
95static inline void swiotlb_free(void) { }
96#endif
97
98extern void swiotlb_print_info(void);
91#endif /* __LINUX_SWIOTLB_H */ 99#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 3338b3f5c21a..ac5d1c1285d9 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -27,9 +27,16 @@
27 */ 27 */
28#define TPM_ANY_NUM 0xFFFF 28#define TPM_ANY_NUM 0xFFFF
29 29
30#if defined(CONFIG_TCG_TPM) 30#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
31 31
32extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); 32extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
33extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); 33extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
34#else
35static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
36 return -ENODEV;
37}
38static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) {
39 return -ENODEV;
40}
34#endif 41#endif
35#endif 42#endif
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h
index 904468a191ef..afc2bfb9e917 100644
--- a/include/pcmcia/cs.h
+++ b/include/pcmcia/cs.h
@@ -15,6 +15,10 @@
15#ifndef _LINUX_CS_H 15#ifndef _LINUX_CS_H
16#define _LINUX_CS_H 16#define _LINUX_CS_H
17 17
18#ifdef __KERNEL__
19#include <linux/interrupt.h>
20#endif
21
18/* For AccessConfigurationRegister */ 22/* For AccessConfigurationRegister */
19typedef struct conf_reg_t { 23typedef struct conf_reg_t {
20 u_char Function; 24 u_char Function;
@@ -111,11 +115,9 @@ typedef struct io_req_t {
111 115
112/* For RequestIRQ and ReleaseIRQ */ 116/* For RequestIRQ and ReleaseIRQ */
113typedef struct irq_req_t { 117typedef struct irq_req_t {
114 u_int Attributes; 118 u_int Attributes;
115 u_int AssignedIRQ; 119 u_int AssignedIRQ;
116 u_int IRQInfo1, IRQInfo2; /* IRQInfo2 is ignored */ 120 irq_handler_t Handler;
117 void *Handler;
118 void *Instance;
119} irq_req_t; 121} irq_req_t;
120 122
121/* Attributes for RequestIRQ and ReleaseIRQ */ 123/* Attributes for RequestIRQ and ReleaseIRQ */
@@ -125,7 +127,7 @@ typedef struct irq_req_t {
125#define IRQ_TYPE_DYNAMIC_SHARING 0x02 127#define IRQ_TYPE_DYNAMIC_SHARING 0x02
126#define IRQ_FORCED_PULSE 0x04 128#define IRQ_FORCED_PULSE 0x04
127#define IRQ_FIRST_SHARED 0x08 129#define IRQ_FIRST_SHARED 0x08
128#define IRQ_HANDLE_PRESENT 0x10 130//#define IRQ_HANDLE_PRESENT 0x10
129#define IRQ_PULSE_ALLOCATED 0x100 131#define IRQ_PULSE_ALLOCATED 0x100
130 132
131/* Bits in IRQInfo1 field */ 133/* Bits in IRQInfo1 field */
diff --git a/include/pcmcia/cs_types.h b/include/pcmcia/cs_types.h
index 315965a37930..f5e3b8386c8f 100644
--- a/include/pcmcia/cs_types.h
+++ b/include/pcmcia/cs_types.h
@@ -26,8 +26,7 @@ typedef u_int event_t;
26typedef u_char cisdata_t; 26typedef u_char cisdata_t;
27typedef u_short page_t; 27typedef u_short page_t;
28 28
29struct window_t; 29typedef unsigned long window_handle_t;
30typedef struct window_t *window_handle_t;
31 30
32struct region_t; 31struct region_t;
33typedef struct region_t *memory_handle_t; 32typedef struct region_t *memory_handle_t;
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index a2be80b9a095..d403c12f7978 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -34,6 +34,7 @@
34struct pcmcia_socket; 34struct pcmcia_socket;
35struct pcmcia_device; 35struct pcmcia_device;
36struct config_t; 36struct config_t;
37struct net_device;
37 38
38/* dynamic device IDs for PCMCIA device drivers. See 39/* dynamic device IDs for PCMCIA device drivers. See
39 * Documentation/pcmcia/driver.txt for details. 40 * Documentation/pcmcia/driver.txt for details.
@@ -137,65 +138,39 @@ struct pcmcia_device {
137#define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev) 138#define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev)
138#define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv) 139#define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv)
139 140
140/* deprecated -- don't use! */
141#define handle_to_dev(handle) (handle->dev)
142 141
143 142/*
144/* (deprecated) error reporting by PCMCIA devices. Use dev_printk() 143 * CIS access.
145 * or dev_dbg() directly in the driver, without referring to pcmcia_error_func() 144 *
146 * and/or pcmcia_error_ret() for those functions will go away soon. 145 * Please use the following functions to access CIS tuples:
147 */ 146 * - pcmcia_get_tuple()
148enum service { 147 * - pcmcia_loop_tuple()
149 AccessConfigurationRegister, AddSocketServices, 148 * - pcmcia_get_mac_from_cis()
150 AdjustResourceInfo, CheckEraseQueue, CloseMemory, CopyMemory, 149 *
151 DeregisterClient, DeregisterEraseQueue, GetCardServicesInfo, 150 * To parse a tuple_t, pcmcia_parse_tuple() exists. Its interface
152 GetClientInfo, GetConfigurationInfo, GetEventMask, 151 * might change in future.
153 GetFirstClient, GetFirstPartion, GetFirstRegion, GetFirstTuple,
154 GetNextClient, GetNextPartition, GetNextRegion, GetNextTuple,
155 GetStatus, GetTupleData, MapLogSocket, MapLogWindow, MapMemPage,
156 MapPhySocket, MapPhyWindow, ModifyConfiguration, ModifyWindow,
157 OpenMemory, ParseTuple, ReadMemory, RegisterClient,
158 RegisterEraseQueue, RegisterMTD, RegisterTimer,
159 ReleaseConfiguration, ReleaseExclusive, ReleaseIO, ReleaseIRQ,
160 ReleaseSocketMask, ReleaseWindow, ReplaceSocketServices,
161 RequestConfiguration, RequestExclusive, RequestIO, RequestIRQ,
162 RequestSocketMask, RequestWindow, ResetCard, ReturnSSEntry,
163 SetEventMask, SetRegion, ValidateCIS, VendorSpecific,
164 WriteMemory, BindDevice, BindMTD, ReportError,
165 SuspendCard, ResumeCard, EjectCard, InsertCard, ReplaceCIS,
166 GetFirstWindow, GetNextWindow, GetMemPage
167};
168const char *pcmcia_error_func(int func);
169const char *pcmcia_error_ret(int ret);
170
171#define cs_error(p_dev, func, ret) \
172 { \
173 dev_printk(KERN_NOTICE, &p_dev->dev, \
174 "%s : %s\n", \
175 pcmcia_error_func(func), \
176 pcmcia_error_ret(ret)); \
177 }
178
179/* CIS access.
180 * Use the pcmcia_* versions in PCMCIA drivers
181 */ 152 */
182int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse);
183 153
184int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, 154/* get the very first CIS entry of type @code. Note that buf is pointer
185 tuple_t *tuple); 155 * to u8 *buf; and that you need to kfree(buf) afterwards. */
186#define pcmcia_get_first_tuple(p_dev, tuple) \ 156size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
187 pccard_get_first_tuple(p_dev->socket, p_dev->func, tuple) 157 u8 **buf);
188 158
189int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, 159/* loop over CIS entries */
190 tuple_t *tuple); 160int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
191#define pcmcia_get_next_tuple(p_dev, tuple) \ 161 int (*loop_tuple) (struct pcmcia_device *p_dev,
192 pccard_get_next_tuple(p_dev->socket, p_dev->func, tuple) 162 tuple_t *tuple,
163 void *priv_data),
164 void *priv_data);
193 165
194int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple); 166/* get the MAC address from CISTPL_FUNCE */
195#define pcmcia_get_tuple_data(p_dev, tuple) \ 167int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev,
196 pccard_get_tuple_data(p_dev->socket, tuple) 168 struct net_device *dev);
197 169
198 170
171/* parse a tuple_t */
172int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse);
173
199/* loop CIS entries for valid configuration */ 174/* loop CIS entries for valid configuration */
200int pcmcia_loop_config(struct pcmcia_device *p_dev, 175int pcmcia_loop_config(struct pcmcia_device *p_dev,
201 int (*conf_check) (struct pcmcia_device *p_dev, 176 int (*conf_check) (struct pcmcia_device *p_dev,
@@ -221,12 +196,11 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req);
221int pcmcia_request_configuration(struct pcmcia_device *p_dev, 196int pcmcia_request_configuration(struct pcmcia_device *p_dev,
222 config_req_t *req); 197 config_req_t *req);
223 198
224int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, 199int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req,
225 window_handle_t *wh); 200 window_handle_t *wh);
226int pcmcia_release_window(window_handle_t win); 201int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t win);
227 202int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t win,
228int pcmcia_get_mem_page(window_handle_t win, memreq_t *req); 203 memreq_t *req);
229int pcmcia_map_mem_page(window_handle_t win, memreq_t *req);
230 204
231int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod); 205int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod);
232void pcmcia_disable_device(struct pcmcia_device *p_dev); 206void pcmcia_disable_device(struct pcmcia_device *p_dev);
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index e0f6feb8588c..7c23be706f12 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -107,15 +107,6 @@ typedef struct io_window_t {
107 struct resource *res; 107 struct resource *res;
108} io_window_t; 108} io_window_t;
109 109
110#define WINDOW_MAGIC 0xB35C
111typedef struct window_t {
112 u_short magic;
113 u_short index;
114 struct pcmcia_device *handle;
115 struct pcmcia_socket *sock;
116 pccard_mem_map ctl;
117} window_t;
118
119/* Maximum number of IO windows per socket */ 110/* Maximum number of IO windows per socket */
120#define MAX_IO_WIN 2 111#define MAX_IO_WIN 2
121 112
@@ -155,7 +146,7 @@ struct pcmcia_socket {
155 u_int Config; 146 u_int Config;
156 } irq; 147 } irq;
157 io_window_t io[MAX_IO_WIN]; 148 io_window_t io[MAX_IO_WIN];
158 window_t win[MAX_WIN]; 149 pccard_mem_map win[MAX_WIN];
159 struct list_head cis_cache; 150 struct list_head cis_cache;
160 size_t fake_cis_len; 151 size_t fake_cis_len;
161 u8 *fake_cis; 152 u8 *fake_cis;
@@ -172,7 +163,7 @@ struct pcmcia_socket {
172 u_int irq_mask; 163 u_int irq_mask;
173 u_int map_size; 164 u_int map_size;
174 u_int io_offset; 165 u_int io_offset;
175 u_char pci_irq; 166 u_int pci_irq;
176 struct pci_dev * cb_dev; 167 struct pci_dev * cb_dev;
177 168
178 169
diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h
new file mode 100644
index 000000000000..8abd620a490e
--- /dev/null
+++ b/include/trace/events/bkl.h
@@ -0,0 +1,61 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM bkl
3
4#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BKL_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT(lock_kernel,
10
11 TP_PROTO(const char *func, const char *file, int line),
12
13 TP_ARGS(func, file, line),
14
15 TP_STRUCT__entry(
16 __field( int, lock_depth )
17 __field_ext( const char *, func, FILTER_PTR_STRING )
18 __field_ext( const char *, file, FILTER_PTR_STRING )
19 __field( int, line )
20 ),
21
22 TP_fast_assign(
23 /* We want to record the lock_depth after lock is acquired */
24 __entry->lock_depth = current->lock_depth + 1;
25 __entry->func = func;
26 __entry->file = file;
27 __entry->line = line;
28 ),
29
30 TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth,
31 __entry->file, __entry->line, __entry->func)
32);
33
34TRACE_EVENT(unlock_kernel,
35
36 TP_PROTO(const char *func, const char *file, int line),
37
38 TP_ARGS(func, file, line),
39
40 TP_STRUCT__entry(
41 __field(int, lock_depth)
42 __field(const char *, func)
43 __field(const char *, file)
44 __field(int, line)
45 ),
46
47 TP_fast_assign(
48 __entry->lock_depth = current->lock_depth;
49 __entry->func = func;
50 __entry->file = file;
51 __entry->line = line;
52 ),
53
54 TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth,
55 __entry->file, __entry->line, __entry->func)
56);
57
58#endif /* _TRACE_BKL_H */
59
60/* This part must be outside protection */
61#include <trace/define_trace.h>
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 397dff2dbd5a..fb726ac7caee 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -1,5 +1,6 @@
1#undef TRACE_SYSTEM 1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM syscalls 2#define TRACE_SYSTEM raw_syscalls
3#define TRACE_INCLUDE_FILE syscalls
3 4
4#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) 5#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_EVENTS_SYSCALLS_H 6#define _TRACE_EVENTS_SYSCALLS_H
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index cc0d9667e182..dacb8ef67000 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -159,7 +159,7 @@
159#undef __get_str 159#undef __get_str
160 160
161#undef TP_printk 161#undef TP_printk
162#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) 162#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
163 163
164#undef TP_fast_assign 164#undef TP_fast_assign
165#define TP_fast_assign(args...) args 165#define TP_fast_assign(args...) args
diff --git a/include/trace/power.h b/include/trace/power.h
deleted file mode 100644
index ef204666e983..000000000000
--- a/include/trace/power.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef _TRACE_POWER_H
2#define _TRACE_POWER_H
3
4#include <linux/ktime.h>
5#include <linux/tracepoint.h>
6
7enum {
8 POWER_NONE = 0,
9 POWER_CSTATE = 1,
10 POWER_PSTATE = 2,
11};
12
13struct power_trace {
14 ktime_t stamp;
15 ktime_t end;
16 int type;
17 int state;
18};
19
20DECLARE_TRACE(power_start,
21 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
22 TP_ARGS(it, type, state));
23
24DECLARE_TRACE(power_mark,
25 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
26 TP_ARGS(it, type, state));
27
28DECLARE_TRACE(power_end,
29 TP_PROTO(struct power_trace *it),
30 TP_ARGS(it));
31
32#endif /* _TRACE_POWER_H */
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 5dc283ba5ae0..e972f0a40f8d 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -33,7 +33,7 @@ struct syscall_metadata {
33}; 33};
34 34
35#ifdef CONFIG_FTRACE_SYSCALLS 35#ifdef CONFIG_FTRACE_SYSCALLS
36extern struct syscall_metadata *syscall_nr_to_meta(int nr); 36extern unsigned long arch_syscall_addr(int nr);
37extern int syscall_name_to_nr(char *name); 37extern int syscall_name_to_nr(char *name);
38void set_syscall_enter_id(int num, int id); 38void set_syscall_enter_id(int num, int id);
39void set_syscall_exit_id(int num, int id); 39void set_syscall_exit_id(int num, int id);
diff --git a/init/Kconfig b/init/Kconfig
index 39923ccc287b..9ee778294756 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -334,6 +334,15 @@ config TREE_PREEMPT_RCU
334 is also required. It also scales down nicely to 334 is also required. It also scales down nicely to
335 smaller systems. 335 smaller systems.
336 336
337config TINY_RCU
338 bool "UP-only small-memory-footprint RCU"
339 depends on !SMP
340 help
341 This option selects the RCU implementation that is
342 designed for UP systems from which real-time response
343 is not required. This option greatly reduces the
344 memory footprint of RCU.
345
337endchoice 346endchoice
338 347
339config RCU_TRACE 348config RCU_TRACE
@@ -606,7 +615,7 @@ config SYSFS_DEPRECATED
606 bool 615 bool
607 616
608config SYSFS_DEPRECATED_V2 617config SYSFS_DEPRECATED_V2
609 bool "remove sysfs features which may confuse old userspace tools" 618 bool "enable deprecated sysfs features which may confuse old userspace tools"
610 depends on SYSFS 619 depends on SYSFS
611 default n 620 default n
612 select SYSFS_DEPRECATED 621 select SYSFS_DEPRECATED
@@ -1220,3 +1229,4 @@ source "block/Kconfig"
1220config PREEMPT_NOTIFIERS 1229config PREEMPT_NOTIFIERS
1221 bool 1230 bool
1222 1231
1232source "kernel/Kconfig.locks"
diff --git a/init/main.c b/init/main.c
index 5988debfc505..4051d75dd2d6 100644
--- a/init/main.c
+++ b/init/main.c
@@ -251,7 +251,7 @@ early_param("loglevel", loglevel);
251 251
252/* 252/*
253 * Unknown boot options get handed to init, unless they look like 253 * Unknown boot options get handed to init, unless they look like
254 * failed parameters 254 * unused parameters (modprobe will find them in /proc/cmdline).
255 */ 255 */
256static int __init unknown_bootoption(char *param, char *val) 256static int __init unknown_bootoption(char *param, char *val)
257{ 257{
@@ -272,14 +272,9 @@ static int __init unknown_bootoption(char *param, char *val)
272 if (obsolete_checksetup(param)) 272 if (obsolete_checksetup(param))
273 return 0; 273 return 0;
274 274
275 /* 275 /* Unused module parameter. */
276 * Preemptive maintenance for "why didn't my misspelled command 276 if (strchr(param, '.') && (!val || strchr(param, '.') < val))
277 * line work?"
278 */
279 if (strchr(param, '.') && (!val || strchr(param, '.') < val)) {
280 printk(KERN_ERR "Unknown boot option `%s': ignoring\n", param);
281 return 0; 277 return 0;
282 }
283 278
284 if (panic_later) 279 if (panic_later)
285 return 0; 280 return 0;
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
new file mode 100644
index 000000000000..88c92fb44618
--- /dev/null
+++ b/kernel/Kconfig.locks
@@ -0,0 +1,202 @@
1#
2# The ARCH_INLINE foo is necessary because select ignores "depends on"
3#
4config ARCH_INLINE_SPIN_TRYLOCK
5 bool
6
7config ARCH_INLINE_SPIN_TRYLOCK_BH
8 bool
9
10config ARCH_INLINE_SPIN_LOCK
11 bool
12
13config ARCH_INLINE_SPIN_LOCK_BH
14 bool
15
16config ARCH_INLINE_SPIN_LOCK_IRQ
17 bool
18
19config ARCH_INLINE_SPIN_LOCK_IRQSAVE
20 bool
21
22config ARCH_INLINE_SPIN_UNLOCK
23 bool
24
25config ARCH_INLINE_SPIN_UNLOCK_BH
26 bool
27
28config ARCH_INLINE_SPIN_UNLOCK_IRQ
29 bool
30
31config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
32 bool
33
34
35config ARCH_INLINE_READ_TRYLOCK
36 bool
37
38config ARCH_INLINE_READ_LOCK
39 bool
40
41config ARCH_INLINE_READ_LOCK_BH
42 bool
43
44config ARCH_INLINE_READ_LOCK_IRQ
45 bool
46
47config ARCH_INLINE_READ_LOCK_IRQSAVE
48 bool
49
50config ARCH_INLINE_READ_UNLOCK
51 bool
52
53config ARCH_INLINE_READ_UNLOCK_BH
54 bool
55
56config ARCH_INLINE_READ_UNLOCK_IRQ
57 bool
58
59config ARCH_INLINE_READ_UNLOCK_IRQRESTORE
60 bool
61
62
63config ARCH_INLINE_WRITE_TRYLOCK
64 bool
65
66config ARCH_INLINE_WRITE_LOCK
67 bool
68
69config ARCH_INLINE_WRITE_LOCK_BH
70 bool
71
72config ARCH_INLINE_WRITE_LOCK_IRQ
73 bool
74
75config ARCH_INLINE_WRITE_LOCK_IRQSAVE
76 bool
77
78config ARCH_INLINE_WRITE_UNLOCK
79 bool
80
81config ARCH_INLINE_WRITE_UNLOCK_BH
82 bool
83
84config ARCH_INLINE_WRITE_UNLOCK_IRQ
85 bool
86
87config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
88 bool
89
90#
91# lock_* functions are inlined when:
92# - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y
93#
94# trylock_* functions are inlined when:
95# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
96#
97# unlock and unlock_irq functions are inlined when:
98# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
99# or
100# - DEBUG_SPINLOCK=n and PREEMPT=n
101#
102# unlock_bh and unlock_irqrestore functions are inlined when:
103# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
104#
105
106config INLINE_SPIN_TRYLOCK
107 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK
108
109config INLINE_SPIN_TRYLOCK_BH
110 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH
111
112config INLINE_SPIN_LOCK
113 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK
114
115config INLINE_SPIN_LOCK_BH
116 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
117 ARCH_INLINE_SPIN_LOCK_BH
118
119config INLINE_SPIN_LOCK_IRQ
120 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
121 ARCH_INLINE_SPIN_LOCK_IRQ
122
123config INLINE_SPIN_LOCK_IRQSAVE
124 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
125 ARCH_INLINE_SPIN_LOCK_IRQSAVE
126
127config INLINE_SPIN_UNLOCK
128 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK)
129
130config INLINE_SPIN_UNLOCK_BH
131 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
132
133config INLINE_SPIN_UNLOCK_IRQ
134 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH)
135
136config INLINE_SPIN_UNLOCK_IRQRESTORE
137 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
138
139
140config INLINE_READ_TRYLOCK
141 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK
142
143config INLINE_READ_LOCK
144 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK
145
146config INLINE_READ_LOCK_BH
147 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
148 ARCH_INLINE_READ_LOCK_BH
149
150config INLINE_READ_LOCK_IRQ
151 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
152 ARCH_INLINE_READ_LOCK_IRQ
153
154config INLINE_READ_LOCK_IRQSAVE
155 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
156 ARCH_INLINE_READ_LOCK_IRQSAVE
157
158config INLINE_READ_UNLOCK
159 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK)
160
161config INLINE_READ_UNLOCK_BH
162 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH
163
164config INLINE_READ_UNLOCK_IRQ
165 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH)
166
167config INLINE_READ_UNLOCK_IRQRESTORE
168 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE
169
170
171config INLINE_WRITE_TRYLOCK
172 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK
173
174config INLINE_WRITE_LOCK
175 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK
176
177config INLINE_WRITE_LOCK_BH
178 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
179 ARCH_INLINE_WRITE_LOCK_BH
180
181config INLINE_WRITE_LOCK_IRQ
182 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
183 ARCH_INLINE_WRITE_LOCK_IRQ
184
185config INLINE_WRITE_LOCK_IRQSAVE
186 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
187 ARCH_INLINE_WRITE_LOCK_IRQSAVE
188
189config INLINE_WRITE_UNLOCK
190 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK)
191
192config INLINE_WRITE_UNLOCK_BH
193 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH
194
195config INLINE_WRITE_UNLOCK_IRQ
196 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH)
197
198config INLINE_WRITE_UNLOCK_IRQRESTORE
199 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
200
201config MUTEX_SPIN_ON_OWNER
202 def_bool SMP && !DEBUG_MUTEXES && !HAVE_DEFAULT_NO_SPIN_MUTEXES
diff --git a/kernel/Makefile b/kernel/Makefile
index d7c13d249b2d..dcf6789bf547 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
82obj-$(CONFIG_TREE_RCU) += rcutree.o 82obj-$(CONFIG_TREE_RCU) += rcutree.o
83obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o 83obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
84obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o 84obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
85obj-$(CONFIG_TINY_RCU) += rcutiny.o
85obj-$(CONFIG_RELAY) += relay.o 86obj-$(CONFIG_RELAY) += relay.o
86obj-$(CONFIG_SYSCTL) += utsname_sysctl.o 87obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
87obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o 88obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
diff --git a/kernel/capability.c b/kernel/capability.c
index 4e17041963f5..7f876e60521f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -29,7 +29,6 @@ EXPORT_SYMBOL(__cap_empty_set);
29EXPORT_SYMBOL(__cap_full_set); 29EXPORT_SYMBOL(__cap_full_set);
30EXPORT_SYMBOL(__cap_init_eff_set); 30EXPORT_SYMBOL(__cap_init_eff_set);
31 31
32#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
33int file_caps_enabled = 1; 32int file_caps_enabled = 1;
34 33
35static int __init file_caps_disable(char *str) 34static int __init file_caps_disable(char *str)
@@ -38,7 +37,6 @@ static int __init file_caps_disable(char *str)
38 return 1; 37 return 1;
39} 38}
40__setup("no_file_caps", file_caps_disable); 39__setup("no_file_caps", file_caps_disable);
41#endif
42 40
43/* 41/*
44 * More recent versions of libcap are available from: 42 * More recent versions of libcap are available from:
@@ -169,8 +167,8 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
169 kernel_cap_t pE, pI, pP; 167 kernel_cap_t pE, pI, pP;
170 168
171 ret = cap_validate_magic(header, &tocopy); 169 ret = cap_validate_magic(header, &tocopy);
172 if (ret != 0) 170 if ((dataptr == NULL) || (ret != 0))
173 return ret; 171 return ((dataptr == NULL) && (ret == -EINVAL)) ? 0 : ret;
174 172
175 if (get_user(pid, &header->pid)) 173 if (get_user(pid, &header->pid))
176 return -EFAULT; 174 return -EFAULT;
@@ -238,7 +236,7 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
238SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) 236SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
239{ 237{
240 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; 238 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
241 unsigned i, tocopy; 239 unsigned i, tocopy, copybytes;
242 kernel_cap_t inheritable, permitted, effective; 240 kernel_cap_t inheritable, permitted, effective;
243 struct cred *new; 241 struct cred *new;
244 int ret; 242 int ret;
@@ -255,8 +253,11 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
255 if (pid != 0 && pid != task_pid_vnr(current)) 253 if (pid != 0 && pid != task_pid_vnr(current))
256 return -EPERM; 254 return -EPERM;
257 255
258 if (copy_from_user(&kdata, data, 256 copybytes = tocopy * sizeof(struct __user_cap_data_struct);
259 tocopy * sizeof(struct __user_cap_data_struct))) 257 if (copybytes > sizeof(kdata))
258 return -EFAULT;
259
260 if (copy_from_user(&kdata, data, copybytes))
260 return -EFAULT; 261 return -EFAULT;
261 262
262 for (i = 0; i < tocopy; i++) { 263 for (i = 0; i < tocopy; i++) {
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index d4e841747400..0c642d51aac2 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -144,7 +144,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
144 144
145 rcu_read_lock(); 145 rcu_read_lock();
146 do_each_thread(g, t) { 146 do_each_thread(g, t) {
147 if (!--max_count) 147 if (!max_count--)
148 goto unlock; 148 goto unlock;
149 if (!--batch_count) { 149 if (!--batch_count) {
150 batch_count = HUNG_TASK_BATCHING; 150 batch_count = HUNG_TASK_BATCHING;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index c1660194d115..ba566c261adc 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -166,11 +166,11 @@ int set_irq_data(unsigned int irq, void *data)
166EXPORT_SYMBOL(set_irq_data); 166EXPORT_SYMBOL(set_irq_data);
167 167
168/** 168/**
169 * set_irq_data - set irq type data for an irq 169 * set_irq_msi - set MSI descriptor data for an irq
170 * @irq: Interrupt number 170 * @irq: Interrupt number
171 * @entry: Pointer to MSI descriptor data 171 * @entry: Pointer to MSI descriptor data
172 * 172 *
173 * Set the hardware irq controller data for an irq 173 * Set the MSI descriptor entry for an irq
174 */ 174 */
175int set_irq_msi(unsigned int irq, struct msi_desc *entry) 175int set_irq_msi(unsigned int irq, struct msi_desc *entry)
176{ 176{
@@ -590,7 +590,7 @@ out_unlock:
590} 590}
591 591
592/** 592/**
593 * handle_percpu_IRQ - Per CPU local irq handler 593 * handle_percpu_irq - Per CPU local irq handler
594 * @irq: the interrupt number 594 * @irq: the interrupt number
595 * @desc: the interrupt description structure for this irq 595 * @desc: the interrupt description structure for this irq
596 * 596 *
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 692363dd591f..0832145fea97 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -136,7 +136,7 @@ out:
136 136
137static int default_affinity_open(struct inode *inode, struct file *file) 137static int default_affinity_open(struct inode *inode, struct file *file)
138{ 138{
139 return single_open(file, default_affinity_show, NULL); 139 return single_open(file, default_affinity_show, PDE(inode)->data);
140} 140}
141 141
142static const struct file_operations default_affinity_proc_fops = { 142static const struct file_operations default_affinity_proc_fops = {
@@ -148,18 +148,28 @@ static const struct file_operations default_affinity_proc_fops = {
148}; 148};
149#endif 149#endif
150 150
151static int irq_spurious_read(char *page, char **start, off_t off, 151static int irq_spurious_proc_show(struct seq_file *m, void *v)
152 int count, int *eof, void *data)
153{ 152{
154 struct irq_desc *desc = irq_to_desc((long) data); 153 struct irq_desc *desc = irq_to_desc((long) m->private);
155 return sprintf(page, "count %u\n" 154
156 "unhandled %u\n" 155 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
157 "last_unhandled %u ms\n", 156 desc->irq_count, desc->irqs_unhandled,
158 desc->irq_count, 157 jiffies_to_msecs(desc->last_unhandled));
159 desc->irqs_unhandled, 158 return 0;
160 jiffies_to_msecs(desc->last_unhandled)); 159}
160
161static int irq_spurious_proc_open(struct inode *inode, struct file *file)
162{
163 return single_open(file, irq_spurious_proc_show, NULL);
161} 164}
162 165
166static const struct file_operations irq_spurious_proc_fops = {
167 .open = irq_spurious_proc_open,
168 .read = seq_read,
169 .llseek = seq_lseek,
170 .release = single_release,
171};
172
163#define MAX_NAMELEN 128 173#define MAX_NAMELEN 128
164 174
165static int name_unique(unsigned int irq, struct irqaction *new_action) 175static int name_unique(unsigned int irq, struct irqaction *new_action)
@@ -204,7 +214,6 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
204void register_irq_proc(unsigned int irq, struct irq_desc *desc) 214void register_irq_proc(unsigned int irq, struct irq_desc *desc)
205{ 215{
206 char name [MAX_NAMELEN]; 216 char name [MAX_NAMELEN];
207 struct proc_dir_entry *entry;
208 217
209 if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) 218 if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
210 return; 219 return;
@@ -214,6 +223,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
214 223
215 /* create /proc/irq/1234 */ 224 /* create /proc/irq/1234 */
216 desc->dir = proc_mkdir(name, root_irq_dir); 225 desc->dir = proc_mkdir(name, root_irq_dir);
226 if (!desc->dir)
227 return;
217 228
218#ifdef CONFIG_SMP 229#ifdef CONFIG_SMP
219 /* create /proc/irq/<irq>/smp_affinity */ 230 /* create /proc/irq/<irq>/smp_affinity */
@@ -221,11 +232,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
221 &irq_affinity_proc_fops, (void *)(long)irq); 232 &irq_affinity_proc_fops, (void *)(long)irq);
222#endif 233#endif
223 234
224 entry = create_proc_entry("spurious", 0444, desc->dir); 235 proc_create_data("spurious", 0444, desc->dir,
225 if (entry) { 236 &irq_spurious_proc_fops, (void *)(long)irq);
226 entry->data = (void *)(long)irq;
227 entry->read_proc = irq_spurious_read;
228 }
229} 237}
230 238
231#undef MAX_NAMELEN 239#undef MAX_NAMELEN
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index bd7273e6282e..22b0a6eedf24 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq)
104 return ok; 104 return ok;
105} 105}
106 106
107static void poll_all_shared_irqs(void) 107static void poll_spurious_irqs(unsigned long dummy)
108{ 108{
109 struct irq_desc *desc; 109 struct irq_desc *desc;
110 int i; 110 int i;
@@ -125,23 +125,11 @@ static void poll_all_shared_irqs(void)
125 try_one_irq(i, desc); 125 try_one_irq(i, desc);
126 local_irq_enable(); 126 local_irq_enable();
127 } 127 }
128}
129
130static void poll_spurious_irqs(unsigned long dummy)
131{
132 poll_all_shared_irqs();
133 128
134 mod_timer(&poll_spurious_irq_timer, 129 mod_timer(&poll_spurious_irq_timer,
135 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 130 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
136} 131}
137 132
138#ifdef CONFIG_DEBUG_SHIRQ
139void debug_poll_all_shared_irqs(void)
140{
141 poll_all_shared_irqs();
142}
143#endif
144
145/* 133/*
146 * If 99,900 of the previous 100,000 interrupts have not been handled 134 * If 99,900 of the previous 100,000 interrupts have not been handled
147 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 135 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 9fcb53a11f87..25b103190364 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -80,16 +80,16 @@ int __request_module(bool wait, const char *fmt, ...)
80#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ 80#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
81 static int kmod_loop_msg; 81 static int kmod_loop_msg;
82 82
83 ret = security_kernel_module_request();
84 if (ret)
85 return ret;
86
87 va_start(args, fmt); 83 va_start(args, fmt);
88 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); 84 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89 va_end(args); 85 va_end(args);
90 if (ret >= MODULE_NAME_LEN) 86 if (ret >= MODULE_NAME_LEN)
91 return -ENAMETOOLONG; 87 return -ENAMETOOLONG;
92 88
89 ret = security_kernel_module_request(module_name);
90 if (ret)
91 return ret;
92
93 /* If modprobe needs a service that is in a module, we get a recursive 93 /* If modprobe needs a service that is in a module, we get a recursive
94 * loop. Limit the number of running kmod threads to max_threads/2 or 94 * loop. Limit the number of running kmod threads to max_threads/2 or
95 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method 95 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5240d75f4c60..1494e85b35f2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1014,9 +1014,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
1014 /* Pre-allocate memory for max kretprobe instances */ 1014 /* Pre-allocate memory for max kretprobe instances */
1015 if (rp->maxactive <= 0) { 1015 if (rp->maxactive <= 0) {
1016#ifdef CONFIG_PREEMPT 1016#ifdef CONFIG_PREEMPT
1017 rp->maxactive = max(10, 2 * NR_CPUS); 1017 rp->maxactive = max(10, 2 * num_possible_cpus());
1018#else 1018#else
1019 rp->maxactive = NR_CPUS; 1019 rp->maxactive = num_possible_cpus();
1020#endif 1020#endif
1021 } 1021 }
1022 spin_lock_init(&rp->lock); 1022 spin_lock_init(&rp->lock);
diff --git a/kernel/module.c b/kernel/module.c
index 8b7d8805819d..5842a71cf052 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1187,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1187 1187
1188 /* Count loaded sections and allocate structures */ 1188 /* Count loaded sections and allocate structures */
1189 for (i = 0; i < nsect; i++) 1189 for (i = 0; i < nsect; i++)
1190 if (sechdrs[i].sh_flags & SHF_ALLOC) 1190 if (sechdrs[i].sh_flags & SHF_ALLOC
1191 && sechdrs[i].sh_size)
1191 nloaded++; 1192 nloaded++;
1192 size[0] = ALIGN(sizeof(*sect_attrs) 1193 size[0] = ALIGN(sizeof(*sect_attrs)
1193 + nloaded * sizeof(sect_attrs->attrs[0]), 1194 + nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1207,6 +1208,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1207 for (i = 0; i < nsect; i++) { 1208 for (i = 0; i < nsect; i++) {
1208 if (! (sechdrs[i].sh_flags & SHF_ALLOC)) 1209 if (! (sechdrs[i].sh_flags & SHF_ALLOC))
1209 continue; 1210 continue;
1211 if (!sechdrs[i].sh_size)
1212 continue;
1210 sattr->address = sechdrs[i].sh_addr; 1213 sattr->address = sechdrs[i].sh_addr;
1211 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, 1214 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
1212 GFP_KERNEL); 1215 GFP_KERNEL);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 947b3ad551f8..632f04c57d82 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -148,8 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
148 148
149 preempt_disable(); 149 preempt_disable();
150 mutex_acquire(&lock->dep_map, subclass, 0, ip); 150 mutex_acquire(&lock->dep_map, subclass, 0, ip);
151#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ 151
152 !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) 152#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
153 /* 153 /*
154 * Optimistic spinning. 154 * Optimistic spinning.
155 * 155 *
diff --git a/kernel/printk.c b/kernel/printk.c
index f38b07f78a4e..b5ac4d99c667 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -33,6 +33,7 @@
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/kexec.h> 35#include <linux/kexec.h>
36#include <linux/ratelimit.h>
36 37
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
@@ -1376,11 +1377,11 @@ late_initcall(disable_boot_consoles);
1376 */ 1377 */
1377DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); 1378DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
1378 1379
1379int printk_ratelimit(void) 1380int __printk_ratelimit(const char *func)
1380{ 1381{
1381 return __ratelimit(&printk_ratelimit_state); 1382 return ___ratelimit(&printk_ratelimit_state, func);
1382} 1383}
1383EXPORT_SYMBOL(printk_ratelimit); 1384EXPORT_SYMBOL(__printk_ratelimit);
1384 1385
1385/** 1386/**
1386 * printk_timed_ratelimit - caller-controlled printk ratelimiting 1387 * printk_timed_ratelimit - caller-controlled printk ratelimiting
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 400183346ad2..9b7fd4723878 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,7 +44,6 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
48 47
49#ifdef CONFIG_DEBUG_LOCK_ALLOC 48#ifdef CONFIG_DEBUG_LOCK_ALLOC
50static struct lock_class_key rcu_lock_key; 49static struct lock_class_key rcu_lock_key;
@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map =
53EXPORT_SYMBOL_GPL(rcu_lock_map); 52EXPORT_SYMBOL_GPL(rcu_lock_map);
54#endif 53#endif
55 54
56int rcu_scheduler_active __read_mostly;
57
58/* 55/*
59 * Awaken the corresponding synchronize_rcu() instance now that a 56 * Awaken the corresponding synchronize_rcu() instance now that a
60 * grace period has elapsed. 57 * grace period has elapsed.
@@ -66,122 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head)
66 rcu = container_of(head, struct rcu_synchronize, head); 63 rcu = container_of(head, struct rcu_synchronize, head);
67 complete(&rcu->completion); 64 complete(&rcu->completion);
68} 65}
69
70#ifdef CONFIG_TREE_PREEMPT_RCU
71
72/**
73 * synchronize_rcu - wait until a grace period has elapsed.
74 *
75 * Control will return to the caller some time after a full grace
76 * period has elapsed, in other words after all currently executing RCU
77 * read-side critical sections have completed. RCU read-side critical
78 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
79 * and may be nested.
80 */
81void synchronize_rcu(void)
82{
83 struct rcu_synchronize rcu;
84
85 if (!rcu_scheduler_active)
86 return;
87
88 init_completion(&rcu.completion);
89 /* Will wake me after RCU finished. */
90 call_rcu(&rcu.head, wakeme_after_rcu);
91 /* Wait for it. */
92 wait_for_completion(&rcu.completion);
93}
94EXPORT_SYMBOL_GPL(synchronize_rcu);
95
96#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
97
98/**
99 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
100 *
101 * Control will return to the caller some time after a full rcu-sched
102 * grace period has elapsed, in other words after all currently executing
103 * rcu-sched read-side critical sections have completed. These read-side
104 * critical sections are delimited by rcu_read_lock_sched() and
105 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
106 * local_irq_disable(), and so on may be used in place of
107 * rcu_read_lock_sched().
108 *
109 * This means that all preempt_disable code sequences, including NMI and
110 * hardware-interrupt handlers, in progress on entry will have completed
111 * before this primitive returns. However, this does not guarantee that
112 * softirq handlers will have completed, since in some kernels, these
113 * handlers can run in process context, and can block.
114 *
115 * This primitive provides the guarantees made by the (now removed)
116 * synchronize_kernel() API. In contrast, synchronize_rcu() only
117 * guarantees that rcu_read_lock() sections will have completed.
118 * In "classic RCU", these two guarantees happen to be one and
119 * the same, but can differ in realtime RCU implementations.
120 */
121void synchronize_sched(void)
122{
123 struct rcu_synchronize rcu;
124
125 if (rcu_blocking_is_gp())
126 return;
127
128 init_completion(&rcu.completion);
129 /* Will wake me after RCU finished. */
130 call_rcu_sched(&rcu.head, wakeme_after_rcu);
131 /* Wait for it. */
132 wait_for_completion(&rcu.completion);
133}
134EXPORT_SYMBOL_GPL(synchronize_sched);
135
136/**
137 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
138 *
139 * Control will return to the caller some time after a full rcu_bh grace
140 * period has elapsed, in other words after all currently executing rcu_bh
141 * read-side critical sections have completed. RCU read-side critical
142 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
143 * and may be nested.
144 */
145void synchronize_rcu_bh(void)
146{
147 struct rcu_synchronize rcu;
148
149 if (rcu_blocking_is_gp())
150 return;
151
152 init_completion(&rcu.completion);
153 /* Will wake me after RCU finished. */
154 call_rcu_bh(&rcu.head, wakeme_after_rcu);
155 /* Wait for it. */
156 wait_for_completion(&rcu.completion);
157}
158EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
159
160static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
161 unsigned long action, void *hcpu)
162{
163 return rcu_cpu_notify(self, action, hcpu);
164}
165
166void __init rcu_init(void)
167{
168 int i;
169
170 __rcu_init();
171 cpu_notifier(rcu_barrier_cpu_hotplug, 0);
172
173 /*
174 * We don't need protection against CPU-hotplug here because
175 * this is called early in boot, before either interrupts
176 * or the scheduler are operational.
177 */
178 for_each_online_cpu(i)
179 rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i);
180}
181
182void rcu_scheduler_starting(void)
183{
184 WARN_ON(num_online_cpus() != 1);
185 WARN_ON(nr_context_switches() > 0);
186 rcu_scheduler_active = 1;
187}
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
new file mode 100644
index 000000000000..9f6d9ff2572c
--- /dev/null
+++ b/kernel/rcutiny.c
@@ -0,0 +1,282 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
24 */
25#include <linux/moduleparam.h>
26#include <linux/completion.h>
27#include <linux/interrupt.h>
28#include <linux/notifier.h>
29#include <linux/rcupdate.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/mutex.h>
33#include <linux/sched.h>
34#include <linux/types.h>
35#include <linux/init.h>
36#include <linux/time.h>
37#include <linux/cpu.h>
38
39/* Global control variables for rcupdate callback mechanism. */
40struct rcu_ctrlblk {
41 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
42 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
43 struct rcu_head **curtail; /* ->next pointer of last CB. */
44};
45
46/* Definition for rcupdate control block. */
47static struct rcu_ctrlblk rcu_ctrlblk = {
48 .donetail = &rcu_ctrlblk.rcucblist,
49 .curtail = &rcu_ctrlblk.rcucblist,
50};
51
52static struct rcu_ctrlblk rcu_bh_ctrlblk = {
53 .donetail = &rcu_bh_ctrlblk.rcucblist,
54 .curtail = &rcu_bh_ctrlblk.rcucblist,
55};
56
57#ifdef CONFIG_NO_HZ
58
59static long rcu_dynticks_nesting = 1;
60
61/*
62 * Enter dynticks-idle mode, which is an extended quiescent state
63 * if we have fully entered that mode (i.e., if the new value of
64 * dynticks_nesting is zero).
65 */
66void rcu_enter_nohz(void)
67{
68 if (--rcu_dynticks_nesting == 0)
69 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
70}
71
72/*
73 * Exit dynticks-idle mode, so that we are no longer in an extended
74 * quiescent state.
75 */
76void rcu_exit_nohz(void)
77{
78 rcu_dynticks_nesting++;
79}
80
81#endif /* #ifdef CONFIG_NO_HZ */
82
83/*
84 * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc().
85 * Also disable irqs to avoid confusion due to interrupt handlers
86 * invoking call_rcu().
87 */
88static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
89{
90 unsigned long flags;
91
92 local_irq_save(flags);
93 if (rcp->rcucblist != NULL &&
94 rcp->donetail != rcp->curtail) {
95 rcp->donetail = rcp->curtail;
96 local_irq_restore(flags);
97 return 1;
98 }
99 local_irq_restore(flags);
100
101 return 0;
102}
103
104/*
105 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
106 * are at it, given that any rcu quiescent state is also an rcu_bh
107 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
108 */
109void rcu_sched_qs(int cpu)
110{
111 if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk))
112 raise_softirq(RCU_SOFTIRQ);
113}
114
115/*
116 * Record an rcu_bh quiescent state.
117 */
118void rcu_bh_qs(int cpu)
119{
120 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
121 raise_softirq(RCU_SOFTIRQ);
122}
123
124/*
125 * Check to see if the scheduling-clock interrupt came from an extended
126 * quiescent state, and, if so, tell RCU about it.
127 */
128void rcu_check_callbacks(int cpu, int user)
129{
130 if (user ||
131 (idle_cpu(cpu) &&
132 !in_softirq() &&
133 hardirq_count() <= (1 << HARDIRQ_SHIFT)))
134 rcu_sched_qs(cpu);
135 else if (!in_softirq())
136 rcu_bh_qs(cpu);
137}
138
139/*
140 * Helper function for rcu_process_callbacks() that operates on the
141 * specified rcu_ctrlkblk structure.
142 */
143static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
144{
145 struct rcu_head *next, *list;
146 unsigned long flags;
147
148 /* If no RCU callbacks ready to invoke, just return. */
149 if (&rcp->rcucblist == rcp->donetail)
150 return;
151
152 /* Move the ready-to-invoke callbacks to a local list. */
153 local_irq_save(flags);
154 list = rcp->rcucblist;
155 rcp->rcucblist = *rcp->donetail;
156 *rcp->donetail = NULL;
157 if (rcp->curtail == rcp->donetail)
158 rcp->curtail = &rcp->rcucblist;
159 rcp->donetail = &rcp->rcucblist;
160 local_irq_restore(flags);
161
162 /* Invoke the callbacks on the local list. */
163 while (list) {
164 next = list->next;
165 prefetch(next);
166 list->func(list);
167 list = next;
168 }
169}
170
171/*
172 * Invoke any callbacks whose grace period has completed.
173 */
174static void rcu_process_callbacks(struct softirq_action *unused)
175{
176 __rcu_process_callbacks(&rcu_ctrlblk);
177 __rcu_process_callbacks(&rcu_bh_ctrlblk);
178}
179
180/*
181 * Wait for a grace period to elapse. But it is illegal to invoke
182 * synchronize_sched() from within an RCU read-side critical section.
183 * Therefore, any legal call to synchronize_sched() is a quiescent
184 * state, and so on a UP system, synchronize_sched() need do nothing.
185 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
186 * benefits of doing might_sleep() to reduce latency.)
187 *
188 * Cool, huh? (Due to Josh Triplett.)
189 *
190 * But we want to make this a static inline later.
191 */
192void synchronize_sched(void)
193{
194 cond_resched();
195}
196EXPORT_SYMBOL_GPL(synchronize_sched);
197
198void synchronize_rcu_bh(void)
199{
200 synchronize_sched();
201}
202EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
203
204/*
205 * Helper function for call_rcu() and call_rcu_bh().
206 */
207static void __call_rcu(struct rcu_head *head,
208 void (*func)(struct rcu_head *rcu),
209 struct rcu_ctrlblk *rcp)
210{
211 unsigned long flags;
212
213 head->func = func;
214 head->next = NULL;
215
216 local_irq_save(flags);
217 *rcp->curtail = head;
218 rcp->curtail = &head->next;
219 local_irq_restore(flags);
220}
221
222/*
223 * Post an RCU callback to be invoked after the end of an RCU grace
224 * period. But since we have but one CPU, that would be after any
225 * quiescent state.
226 */
227void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
228{
229 __call_rcu(head, func, &rcu_ctrlblk);
230}
231EXPORT_SYMBOL_GPL(call_rcu);
232
233/*
234 * Post an RCU bottom-half callback to be invoked after any subsequent
235 * quiescent state.
236 */
237void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
238{
239 __call_rcu(head, func, &rcu_bh_ctrlblk);
240}
241EXPORT_SYMBOL_GPL(call_rcu_bh);
242
243void rcu_barrier(void)
244{
245 struct rcu_synchronize rcu;
246
247 init_completion(&rcu.completion);
248 /* Will wake me after RCU finished. */
249 call_rcu(&rcu.head, wakeme_after_rcu);
250 /* Wait for it. */
251 wait_for_completion(&rcu.completion);
252}
253EXPORT_SYMBOL_GPL(rcu_barrier);
254
255void rcu_barrier_bh(void)
256{
257 struct rcu_synchronize rcu;
258
259 init_completion(&rcu.completion);
260 /* Will wake me after RCU finished. */
261 call_rcu_bh(&rcu.head, wakeme_after_rcu);
262 /* Wait for it. */
263 wait_for_completion(&rcu.completion);
264}
265EXPORT_SYMBOL_GPL(rcu_barrier_bh);
266
267void rcu_barrier_sched(void)
268{
269 struct rcu_synchronize rcu;
270
271 init_completion(&rcu.completion);
272 /* Will wake me after RCU finished. */
273 call_rcu_sched(&rcu.head, wakeme_after_rcu);
274 /* Wait for it. */
275 wait_for_completion(&rcu.completion);
276}
277EXPORT_SYMBOL_GPL(rcu_barrier_sched);
278
279void __init rcu_init(void)
280{
281 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
282}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 697c0a0229d4..a621a67ef4e3 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -327,6 +327,11 @@ rcu_torture_cb(struct rcu_head *p)
327 cur_ops->deferred_free(rp); 327 cur_ops->deferred_free(rp);
328} 328}
329 329
330static int rcu_no_completed(void)
331{
332 return 0;
333}
334
330static void rcu_torture_deferred_free(struct rcu_torture *p) 335static void rcu_torture_deferred_free(struct rcu_torture *p)
331{ 336{
332 call_rcu(&p->rtort_rcu, rcu_torture_cb); 337 call_rcu(&p->rtort_rcu, rcu_torture_cb);
@@ -388,6 +393,21 @@ static struct rcu_torture_ops rcu_sync_ops = {
388 .name = "rcu_sync" 393 .name = "rcu_sync"
389}; 394};
390 395
396static struct rcu_torture_ops rcu_expedited_ops = {
397 .init = rcu_sync_torture_init,
398 .cleanup = NULL,
399 .readlock = rcu_torture_read_lock,
400 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
401 .readunlock = rcu_torture_read_unlock,
402 .completed = rcu_no_completed,
403 .deferred_free = rcu_sync_torture_deferred_free,
404 .sync = synchronize_rcu_expedited,
405 .cb_barrier = NULL,
406 .stats = NULL,
407 .irq_capable = 1,
408 .name = "rcu_expedited"
409};
410
391/* 411/*
392 * Definitions for rcu_bh torture testing. 412 * Definitions for rcu_bh torture testing.
393 */ 413 */
@@ -547,6 +567,25 @@ static struct rcu_torture_ops srcu_ops = {
547 .name = "srcu" 567 .name = "srcu"
548}; 568};
549 569
570static void srcu_torture_synchronize_expedited(void)
571{
572 synchronize_srcu_expedited(&srcu_ctl);
573}
574
575static struct rcu_torture_ops srcu_expedited_ops = {
576 .init = srcu_torture_init,
577 .cleanup = srcu_torture_cleanup,
578 .readlock = srcu_torture_read_lock,
579 .read_delay = srcu_read_delay,
580 .readunlock = srcu_torture_read_unlock,
581 .completed = srcu_torture_completed,
582 .deferred_free = rcu_sync_torture_deferred_free,
583 .sync = srcu_torture_synchronize_expedited,
584 .cb_barrier = NULL,
585 .stats = srcu_torture_stats,
586 .name = "srcu_expedited"
587};
588
550/* 589/*
551 * Definitions for sched torture testing. 590 * Definitions for sched torture testing.
552 */ 591 */
@@ -562,11 +601,6 @@ static void sched_torture_read_unlock(int idx)
562 preempt_enable(); 601 preempt_enable();
563} 602}
564 603
565static int sched_torture_completed(void)
566{
567 return 0;
568}
569
570static void rcu_sched_torture_deferred_free(struct rcu_torture *p) 604static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
571{ 605{
572 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); 606 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
@@ -583,7 +617,7 @@ static struct rcu_torture_ops sched_ops = {
583 .readlock = sched_torture_read_lock, 617 .readlock = sched_torture_read_lock,
584 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 618 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
585 .readunlock = sched_torture_read_unlock, 619 .readunlock = sched_torture_read_unlock,
586 .completed = sched_torture_completed, 620 .completed = rcu_no_completed,
587 .deferred_free = rcu_sched_torture_deferred_free, 621 .deferred_free = rcu_sched_torture_deferred_free,
588 .sync = sched_torture_synchronize, 622 .sync = sched_torture_synchronize,
589 .cb_barrier = rcu_barrier_sched, 623 .cb_barrier = rcu_barrier_sched,
@@ -592,13 +626,13 @@ static struct rcu_torture_ops sched_ops = {
592 .name = "sched" 626 .name = "sched"
593}; 627};
594 628
595static struct rcu_torture_ops sched_ops_sync = { 629static struct rcu_torture_ops sched_sync_ops = {
596 .init = rcu_sync_torture_init, 630 .init = rcu_sync_torture_init,
597 .cleanup = NULL, 631 .cleanup = NULL,
598 .readlock = sched_torture_read_lock, 632 .readlock = sched_torture_read_lock,
599 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 633 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
600 .readunlock = sched_torture_read_unlock, 634 .readunlock = sched_torture_read_unlock,
601 .completed = sched_torture_completed, 635 .completed = rcu_no_completed,
602 .deferred_free = rcu_sync_torture_deferred_free, 636 .deferred_free = rcu_sync_torture_deferred_free,
603 .sync = sched_torture_synchronize, 637 .sync = sched_torture_synchronize,
604 .cb_barrier = NULL, 638 .cb_barrier = NULL,
@@ -612,7 +646,7 @@ static struct rcu_torture_ops sched_expedited_ops = {
612 .readlock = sched_torture_read_lock, 646 .readlock = sched_torture_read_lock,
613 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 647 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
614 .readunlock = sched_torture_read_unlock, 648 .readunlock = sched_torture_read_unlock,
615 .completed = sched_torture_completed, 649 .completed = rcu_no_completed,
616 .deferred_free = rcu_sync_torture_deferred_free, 650 .deferred_free = rcu_sync_torture_deferred_free,
617 .sync = synchronize_sched_expedited, 651 .sync = synchronize_sched_expedited,
618 .cb_barrier = NULL, 652 .cb_barrier = NULL,
@@ -1097,9 +1131,10 @@ rcu_torture_init(void)
1097 int cpu; 1131 int cpu;
1098 int firsterr = 0; 1132 int firsterr = 0;
1099 static struct rcu_torture_ops *torture_ops[] = 1133 static struct rcu_torture_ops *torture_ops[] =
1100 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, 1134 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1101 &sched_expedited_ops, 1135 &rcu_bh_ops, &rcu_bh_sync_ops,
1102 &srcu_ops, &sched_ops, &sched_ops_sync, }; 1136 &srcu_ops, &srcu_expedited_ops,
1137 &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1103 1138
1104 mutex_lock(&fullstop_mutex); 1139 mutex_lock(&fullstop_mutex);
1105 1140
@@ -1110,8 +1145,12 @@ rcu_torture_init(void)
1110 break; 1145 break;
1111 } 1146 }
1112 if (i == ARRAY_SIZE(torture_ops)) { 1147 if (i == ARRAY_SIZE(torture_ops)) {
1113 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", 1148 printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1114 torture_type); 1149 torture_type);
1150 printk(KERN_ALERT "rcu-torture types:");
1151 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1152 printk(KERN_ALERT " %s", torture_ops[i]->name);
1153 printk(KERN_ALERT "\n");
1115 mutex_unlock(&fullstop_mutex); 1154 mutex_unlock(&fullstop_mutex);
1116 return -EINVAL; 1155 return -EINVAL;
1117 } 1156 }
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f3077c0ab181..53ae9598f798 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -46,18 +46,22 @@
46#include <linux/cpu.h> 46#include <linux/cpu.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/time.h> 48#include <linux/time.h>
49#include <linux/kernel_stat.h>
49 50
50#include "rcutree.h" 51#include "rcutree.h"
51 52
52/* Data structures. */ 53/* Data structures. */
53 54
55static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
56
54#define RCU_STATE_INITIALIZER(name) { \ 57#define RCU_STATE_INITIALIZER(name) { \
55 .level = { &name.node[0] }, \ 58 .level = { &name.node[0] }, \
56 .levelcnt = { \ 59 .levelcnt = { \
57 NUM_RCU_LVL_0, /* root of hierarchy. */ \ 60 NUM_RCU_LVL_0, /* root of hierarchy. */ \
58 NUM_RCU_LVL_1, \ 61 NUM_RCU_LVL_1, \
59 NUM_RCU_LVL_2, \ 62 NUM_RCU_LVL_2, \
60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 63 NUM_RCU_LVL_3, \
64 NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
61 }, \ 65 }, \
62 .signaled = RCU_GP_IDLE, \ 66 .signaled = RCU_GP_IDLE, \
63 .gpnum = -300, \ 67 .gpnum = -300, \
@@ -77,6 +81,8 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
77struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
78DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
79 83
84static int rcu_scheduler_active __read_mostly;
85
80 86
81/* 87/*
82 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 88 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
@@ -98,7 +104,7 @@ void rcu_sched_qs(int cpu)
98 struct rcu_data *rdp; 104 struct rcu_data *rdp;
99 105
100 rdp = &per_cpu(rcu_sched_data, cpu); 106 rdp = &per_cpu(rcu_sched_data, cpu);
101 rdp->passed_quiesc_completed = rdp->completed; 107 rdp->passed_quiesc_completed = rdp->gpnum - 1;
102 barrier(); 108 barrier();
103 rdp->passed_quiesc = 1; 109 rdp->passed_quiesc = 1;
104 rcu_preempt_note_context_switch(cpu); 110 rcu_preempt_note_context_switch(cpu);
@@ -109,7 +115,7 @@ void rcu_bh_qs(int cpu)
109 struct rcu_data *rdp; 115 struct rcu_data *rdp;
110 116
111 rdp = &per_cpu(rcu_bh_data, cpu); 117 rdp = &per_cpu(rcu_bh_data, cpu);
112 rdp->passed_quiesc_completed = rdp->completed; 118 rdp->passed_quiesc_completed = rdp->gpnum - 1;
113 barrier(); 119 barrier();
114 rdp->passed_quiesc = 1; 120 rdp->passed_quiesc = 1;
115} 121}
@@ -335,28 +341,9 @@ void rcu_irq_exit(void)
335 set_need_resched(); 341 set_need_resched();
336} 342}
337 343
338/*
339 * Record the specified "completed" value, which is later used to validate
340 * dynticks counter manipulations. Specify "rsp->completed - 1" to
341 * unconditionally invalidate any future dynticks manipulations (which is
342 * useful at the beginning of a grace period).
343 */
344static void dyntick_record_completed(struct rcu_state *rsp, long comp)
345{
346 rsp->dynticks_completed = comp;
347}
348
349#ifdef CONFIG_SMP 344#ifdef CONFIG_SMP
350 345
351/* 346/*
352 * Recall the previously recorded value of the completion for dynticks.
353 */
354static long dyntick_recall_completed(struct rcu_state *rsp)
355{
356 return rsp->dynticks_completed;
357}
358
359/*
360 * Snapshot the specified CPU's dynticks counter so that we can later 347 * Snapshot the specified CPU's dynticks counter so that we can later
361 * credit them with an implicit quiescent state. Return 1 if this CPU 348 * credit them with an implicit quiescent state. Return 1 if this CPU
362 * is in dynticks idle mode, which is an extended quiescent state. 349 * is in dynticks idle mode, which is an extended quiescent state.
@@ -419,24 +406,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
419 406
420#else /* #ifdef CONFIG_NO_HZ */ 407#else /* #ifdef CONFIG_NO_HZ */
421 408
422static void dyntick_record_completed(struct rcu_state *rsp, long comp)
423{
424}
425
426#ifdef CONFIG_SMP 409#ifdef CONFIG_SMP
427 410
428/*
429 * If there are no dynticks, then the only way that a CPU can passively
430 * be in a quiescent state is to be offline. Unlike dynticks idle, which
431 * is a point in time during the prior (already finished) grace period,
432 * an offline CPU is always in a quiescent state, and thus can be
433 * unconditionally applied. So just return the current value of completed.
434 */
435static long dyntick_recall_completed(struct rcu_state *rsp)
436{
437 return rsp->completed;
438}
439
440static int dyntick_save_progress_counter(struct rcu_data *rdp) 411static int dyntick_save_progress_counter(struct rcu_data *rdp)
441{ 412{
442 return 0; 413 return 0;
@@ -553,13 +524,33 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
553/* 524/*
554 * Update CPU-local rcu_data state to record the newly noticed grace period. 525 * Update CPU-local rcu_data state to record the newly noticed grace period.
555 * This is used both when we started the grace period and when we notice 526 * This is used both when we started the grace period and when we notice
556 * that someone else started the grace period. 527 * that someone else started the grace period. The caller must hold the
528 * ->lock of the leaf rcu_node structure corresponding to the current CPU,
529 * and must have irqs disabled.
557 */ 530 */
531static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
532{
533 if (rdp->gpnum != rnp->gpnum) {
534 rdp->qs_pending = 1;
535 rdp->passed_quiesc = 0;
536 rdp->gpnum = rnp->gpnum;
537 }
538}
539
558static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) 540static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
559{ 541{
560 rdp->qs_pending = 1; 542 unsigned long flags;
561 rdp->passed_quiesc = 0; 543 struct rcu_node *rnp;
562 rdp->gpnum = rsp->gpnum; 544
545 local_irq_save(flags);
546 rnp = rdp->mynode;
547 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
548 !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
549 local_irq_restore(flags);
550 return;
551 }
552 __note_new_gpnum(rsp, rnp, rdp);
553 spin_unlock_irqrestore(&rnp->lock, flags);
563} 554}
564 555
565/* 556/*
@@ -583,6 +574,79 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
583} 574}
584 575
585/* 576/*
577 * Advance this CPU's callbacks, but only if the current grace period
578 * has ended. This may be called only from the CPU to whom the rdp
579 * belongs. In addition, the corresponding leaf rcu_node structure's
580 * ->lock must be held by the caller, with irqs disabled.
581 */
582static void
583__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
584{
585 /* Did another grace period end? */
586 if (rdp->completed != rnp->completed) {
587
588 /* Advance callbacks. No harm if list empty. */
589 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
590 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
591 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
592
593 /* Remember that we saw this grace-period completion. */
594 rdp->completed = rnp->completed;
595 }
596}
597
598/*
599 * Advance this CPU's callbacks, but only if the current grace period
600 * has ended. This may be called only from the CPU to whom the rdp
601 * belongs.
602 */
603static void
604rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
605{
606 unsigned long flags;
607 struct rcu_node *rnp;
608
609 local_irq_save(flags);
610 rnp = rdp->mynode;
611 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
612 !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
613 local_irq_restore(flags);
614 return;
615 }
616 __rcu_process_gp_end(rsp, rnp, rdp);
617 spin_unlock_irqrestore(&rnp->lock, flags);
618}
619
620/*
621 * Do per-CPU grace-period initialization for running CPU. The caller
622 * must hold the lock of the leaf rcu_node structure corresponding to
623 * this CPU.
624 */
625static void
626rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
627{
628 /* Prior grace period ended, so advance callbacks for current CPU. */
629 __rcu_process_gp_end(rsp, rnp, rdp);
630
631 /*
632 * Because this CPU just now started the new grace period, we know
633 * that all of its callbacks will be covered by this upcoming grace
634 * period, even the ones that were registered arbitrarily recently.
635 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
636 *
637 * Other CPUs cannot be sure exactly when the grace period started.
638 * Therefore, their recently registered callbacks must pass through
639 * an additional RCU_NEXT_READY stage, so that they will be handled
640 * by the next RCU grace period.
641 */
642 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
643 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
644
645 /* Set state so that this CPU will detect the next quiescent state. */
646 __note_new_gpnum(rsp, rnp, rdp);
647}
648
649/*
586 * Start a new RCU grace period if warranted, re-initializing the hierarchy 650 * Start a new RCU grace period if warranted, re-initializing the hierarchy
587 * in preparation for detecting the next grace period. The caller must hold 651 * in preparation for detecting the next grace period. The caller must hold
588 * the root node's ->lock, which is released before return. Hard irqs must 652 * the root node's ->lock, which is released before return. Hard irqs must
@@ -596,7 +660,23 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
596 struct rcu_node *rnp = rcu_get_root(rsp); 660 struct rcu_node *rnp = rcu_get_root(rsp);
597 661
598 if (!cpu_needs_another_gp(rsp, rdp)) { 662 if (!cpu_needs_another_gp(rsp, rdp)) {
599 spin_unlock_irqrestore(&rnp->lock, flags); 663 if (rnp->completed == rsp->completed) {
664 spin_unlock_irqrestore(&rnp->lock, flags);
665 return;
666 }
667 spin_unlock(&rnp->lock); /* irqs remain disabled. */
668
669 /*
670 * Propagate new ->completed value to rcu_node structures
671 * so that other CPUs don't have to wait until the start
672 * of the next grace period to process their callbacks.
673 */
674 rcu_for_each_node_breadth_first(rsp, rnp) {
675 spin_lock(&rnp->lock); /* irqs already disabled. */
676 rnp->completed = rsp->completed;
677 spin_unlock(&rnp->lock); /* irqs remain disabled. */
678 }
679 local_irq_restore(flags);
600 return; 680 return;
601 } 681 }
602 682
@@ -606,29 +686,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
606 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 686 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
607 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 687 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
608 record_gp_stall_check_time(rsp); 688 record_gp_stall_check_time(rsp);
609 dyntick_record_completed(rsp, rsp->completed - 1);
610 note_new_gpnum(rsp, rdp);
611
612 /*
613 * Because this CPU just now started the new grace period, we know
614 * that all of its callbacks will be covered by this upcoming grace
615 * period, even the ones that were registered arbitrarily recently.
616 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
617 *
618 * Other CPUs cannot be sure exactly when the grace period started.
619 * Therefore, their recently registered callbacks must pass through
620 * an additional RCU_NEXT_READY stage, so that they will be handled
621 * by the next RCU grace period.
622 */
623 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
624 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
625 689
626 /* Special-case the common single-level case. */ 690 /* Special-case the common single-level case. */
627 if (NUM_RCU_NODES == 1) { 691 if (NUM_RCU_NODES == 1) {
628 rcu_preempt_check_blocked_tasks(rnp); 692 rcu_preempt_check_blocked_tasks(rnp);
629 rnp->qsmask = rnp->qsmaskinit; 693 rnp->qsmask = rnp->qsmaskinit;
630 rnp->gpnum = rsp->gpnum; 694 rnp->gpnum = rsp->gpnum;
695 rnp->completed = rsp->completed;
631 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 696 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
697 rcu_start_gp_per_cpu(rsp, rnp, rdp);
632 spin_unlock_irqrestore(&rnp->lock, flags); 698 spin_unlock_irqrestore(&rnp->lock, flags);
633 return; 699 return;
634 } 700 }
@@ -661,6 +727,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
661 rcu_preempt_check_blocked_tasks(rnp); 727 rcu_preempt_check_blocked_tasks(rnp);
662 rnp->qsmask = rnp->qsmaskinit; 728 rnp->qsmask = rnp->qsmaskinit;
663 rnp->gpnum = rsp->gpnum; 729 rnp->gpnum = rsp->gpnum;
730 rnp->completed = rsp->completed;
731 if (rnp == rdp->mynode)
732 rcu_start_gp_per_cpu(rsp, rnp, rdp);
664 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 733 spin_unlock(&rnp->lock); /* irqs remain disabled. */
665 } 734 }
666 735
@@ -672,58 +741,32 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
672} 741}
673 742
674/* 743/*
675 * Advance this CPU's callbacks, but only if the current grace period 744 * Report a full set of quiescent states to the specified rcu_state
676 * has ended. This may be called only from the CPU to whom the rdp 745 * data structure. This involves cleaning up after the prior grace
677 * belongs. 746 * period and letting rcu_start_gp() start up the next grace period
747 * if one is needed. Note that the caller must hold rnp->lock, as
748 * required by rcu_start_gp(), which will release it.
678 */ 749 */
679static void 750static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
680rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
681{
682 long completed_snap;
683 unsigned long flags;
684
685 local_irq_save(flags);
686 completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */
687
688 /* Did another grace period end? */
689 if (rdp->completed != completed_snap) {
690
691 /* Advance callbacks. No harm if list empty. */
692 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
693 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
694 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
695
696 /* Remember that we saw this grace-period completion. */
697 rdp->completed = completed_snap;
698 }
699 local_irq_restore(flags);
700}
701
702/*
703 * Clean up after the prior grace period and let rcu_start_gp() start up
704 * the next grace period if one is needed. Note that the caller must
705 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
706 */
707static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
708 __releases(rcu_get_root(rsp)->lock) 751 __releases(rcu_get_root(rsp)->lock)
709{ 752{
710 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 753 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
711 rsp->completed = rsp->gpnum; 754 rsp->completed = rsp->gpnum;
712 rsp->signaled = RCU_GP_IDLE; 755 rsp->signaled = RCU_GP_IDLE;
713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
714 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 756 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
715} 757}
716 758
717/* 759/*
718 * Similar to cpu_quiet(), for which it is a helper function. Allows 760 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
719 * a group of CPUs to be quieted at one go, though all the CPUs in the 761 * Allows quiescent states for a group of CPUs to be reported at one go
720 * group must be represented by the same leaf rcu_node structure. 762 * to the specified rcu_node structure, though all the CPUs in the group
721 * That structure's lock must be held upon entry, and it is released 763 * must be represented by the same rcu_node structure (which need not be
722 * before return. 764 * a leaf rcu_node structure, though it often will be). That structure's
765 * lock must be held upon entry, and it is released before return.
723 */ 766 */
724static void 767static void
725cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, 768rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
726 unsigned long flags) 769 struct rcu_node *rnp, unsigned long flags)
727 __releases(rnp->lock) 770 __releases(rnp->lock)
728{ 771{
729 struct rcu_node *rnp_c; 772 struct rcu_node *rnp_c;
@@ -759,21 +802,23 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
759 802
760 /* 803 /*
761 * Get here if we are the last CPU to pass through a quiescent 804 * Get here if we are the last CPU to pass through a quiescent
762 * state for this grace period. Invoke cpu_quiet_msk_finish() 805 * state for this grace period. Invoke rcu_report_qs_rsp()
763 * to clean up and start the next grace period if one is needed. 806 * to clean up and start the next grace period if one is needed.
764 */ 807 */
765 cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ 808 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
766} 809}
767 810
768/* 811/*
769 * Record a quiescent state for the specified CPU, which must either be 812 * Record a quiescent state for the specified CPU to that CPU's rcu_data
770 * the current CPU. The lastcomp argument is used to make sure we are 813 * structure. This must be either called from the specified CPU, or
771 * still in the grace period of interest. We don't want to end the current 814 * called when the specified CPU is known to be offline (and when it is
772 * grace period based on quiescent states detected in an earlier grace 815 * also known that no other CPU is concurrently trying to help the offline
773 * period! 816 * CPU). The lastcomp argument is used to make sure we are still in the
817 * grace period of interest. We don't want to end the current grace period
818 * based on quiescent states detected in an earlier grace period!
774 */ 819 */
775static void 820static void
776cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 821rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
777{ 822{
778 unsigned long flags; 823 unsigned long flags;
779 unsigned long mask; 824 unsigned long mask;
@@ -781,15 +826,15 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
781 826
782 rnp = rdp->mynode; 827 rnp = rdp->mynode;
783 spin_lock_irqsave(&rnp->lock, flags); 828 spin_lock_irqsave(&rnp->lock, flags);
784 if (lastcomp != ACCESS_ONCE(rsp->completed)) { 829 if (lastcomp != rnp->completed) {
785 830
786 /* 831 /*
787 * Someone beat us to it for this grace period, so leave. 832 * Someone beat us to it for this grace period, so leave.
788 * The race with GP start is resolved by the fact that we 833 * The race with GP start is resolved by the fact that we
789 * hold the leaf rcu_node lock, so that the per-CPU bits 834 * hold the leaf rcu_node lock, so that the per-CPU bits
790 * cannot yet be initialized -- so we would simply find our 835 * cannot yet be initialized -- so we would simply find our
791 * CPU's bit already cleared in cpu_quiet_msk() if this race 836 * CPU's bit already cleared in rcu_report_qs_rnp() if this
792 * occurred. 837 * race occurred.
793 */ 838 */
794 rdp->passed_quiesc = 0; /* try again later! */ 839 rdp->passed_quiesc = 0; /* try again later! */
795 spin_unlock_irqrestore(&rnp->lock, flags); 840 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -807,7 +852,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
807 */ 852 */
808 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 853 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
809 854
810 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ 855 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
811 } 856 }
812} 857}
813 858
@@ -838,8 +883,11 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
838 if (!rdp->passed_quiesc) 883 if (!rdp->passed_quiesc)
839 return; 884 return;
840 885
841 /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ 886 /*
842 cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); 887 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
888 * judge of that).
889 */
890 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
843} 891}
844 892
845#ifdef CONFIG_HOTPLUG_CPU 893#ifdef CONFIG_HOTPLUG_CPU
@@ -899,8 +947,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
899static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 947static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
900{ 948{
901 unsigned long flags; 949 unsigned long flags;
902 long lastcomp;
903 unsigned long mask; 950 unsigned long mask;
951 int need_report = 0;
904 struct rcu_data *rdp = rsp->rda[cpu]; 952 struct rcu_data *rdp = rsp->rda[cpu];
905 struct rcu_node *rnp; 953 struct rcu_node *rnp;
906 954
@@ -914,30 +962,32 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
914 spin_lock(&rnp->lock); /* irqs already disabled. */ 962 spin_lock(&rnp->lock); /* irqs already disabled. */
915 rnp->qsmaskinit &= ~mask; 963 rnp->qsmaskinit &= ~mask;
916 if (rnp->qsmaskinit != 0) { 964 if (rnp->qsmaskinit != 0) {
917 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 965 if (rnp != rdp->mynode)
966 spin_unlock(&rnp->lock); /* irqs remain disabled. */
918 break; 967 break;
919 } 968 }
920 969 if (rnp == rdp->mynode)
921 /* 970 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
922 * If there was a task blocking the current grace period, 971 else
923 * and if all CPUs have checked in, we need to propagate 972 spin_unlock(&rnp->lock); /* irqs remain disabled. */
924 * the quiescent state up the rcu_node hierarchy. But that
925 * is inconvenient at the moment due to deadlock issues if
926 * this should end the current grace period. So set the
927 * offlined CPU's bit in ->qsmask in order to force the
928 * next force_quiescent_state() invocation to clean up this
929 * mess in a deadlock-free manner.
930 */
931 if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
932 rnp->qsmask |= mask;
933
934 mask = rnp->grpmask; 973 mask = rnp->grpmask;
935 spin_unlock(&rnp->lock); /* irqs remain disabled. */
936 rnp = rnp->parent; 974 rnp = rnp->parent;
937 } while (rnp != NULL); 975 } while (rnp != NULL);
938 lastcomp = rsp->completed;
939 976
940 spin_unlock_irqrestore(&rsp->onofflock, flags); 977 /*
978 * We still hold the leaf rcu_node structure lock here, and
979 * irqs are still disabled. The reason for this subterfuge is
980 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
981 * held leads to deadlock.
982 */
983 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
984 rnp = rdp->mynode;
985 if (need_report & RCU_OFL_TASKS_NORM_GP)
986 rcu_report_unblock_qs_rnp(rnp, flags);
987 else
988 spin_unlock_irqrestore(&rnp->lock, flags);
989 if (need_report & RCU_OFL_TASKS_EXP_GP)
990 rcu_report_exp_rnp(rsp, rnp);
941 991
942 rcu_adopt_orphan_cbs(rsp); 992 rcu_adopt_orphan_cbs(rsp);
943} 993}
@@ -1109,7 +1159,7 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1109 rcu_for_each_leaf_node(rsp, rnp) { 1159 rcu_for_each_leaf_node(rsp, rnp) {
1110 mask = 0; 1160 mask = 0;
1111 spin_lock_irqsave(&rnp->lock, flags); 1161 spin_lock_irqsave(&rnp->lock, flags);
1112 if (rsp->completed != lastcomp) { 1162 if (rnp->completed != lastcomp) {
1113 spin_unlock_irqrestore(&rnp->lock, flags); 1163 spin_unlock_irqrestore(&rnp->lock, flags);
1114 return 1; 1164 return 1;
1115 } 1165 }
@@ -1123,10 +1173,10 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1123 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1173 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1124 mask |= bit; 1174 mask |= bit;
1125 } 1175 }
1126 if (mask != 0 && rsp->completed == lastcomp) { 1176 if (mask != 0 && rnp->completed == lastcomp) {
1127 1177
1128 /* cpu_quiet_msk() releases rnp->lock. */ 1178 /* rcu_report_qs_rnp() releases rnp->lock. */
1129 cpu_quiet_msk(mask, rsp, rnp, flags); 1179 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1130 continue; 1180 continue;
1131 } 1181 }
1132 spin_unlock_irqrestore(&rnp->lock, flags); 1182 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1144,6 +1194,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1144 long lastcomp; 1194 long lastcomp;
1145 struct rcu_node *rnp = rcu_get_root(rsp); 1195 struct rcu_node *rnp = rcu_get_root(rsp);
1146 u8 signaled; 1196 u8 signaled;
1197 u8 forcenow;
1147 1198
1148 if (!rcu_gp_in_progress(rsp)) 1199 if (!rcu_gp_in_progress(rsp))
1149 return; /* No grace period in progress, nothing to force. */ 1200 return; /* No grace period in progress, nothing to force. */
@@ -1156,10 +1207,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1156 goto unlock_ret; /* no emergency and done recently. */ 1207 goto unlock_ret; /* no emergency and done recently. */
1157 rsp->n_force_qs++; 1208 rsp->n_force_qs++;
1158 spin_lock(&rnp->lock); 1209 spin_lock(&rnp->lock);
1159 lastcomp = rsp->completed; 1210 lastcomp = rsp->gpnum - 1;
1160 signaled = rsp->signaled; 1211 signaled = rsp->signaled;
1161 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 1212 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1162 if (lastcomp == rsp->gpnum) { 1213 if(!rcu_gp_in_progress(rsp)) {
1163 rsp->n_force_qs_ngp++; 1214 rsp->n_force_qs_ngp++;
1164 spin_unlock(&rnp->lock); 1215 spin_unlock(&rnp->lock);
1165 goto unlock_ret; /* no GP in progress, time updated. */ 1216 goto unlock_ret; /* no GP in progress, time updated. */
@@ -1180,21 +1231,29 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1180 if (rcu_process_dyntick(rsp, lastcomp, 1231 if (rcu_process_dyntick(rsp, lastcomp,
1181 dyntick_save_progress_counter)) 1232 dyntick_save_progress_counter))
1182 goto unlock_ret; 1233 goto unlock_ret;
1234 /* fall into next case. */
1235
1236 case RCU_SAVE_COMPLETED:
1183 1237
1184 /* Update state, record completion counter. */ 1238 /* Update state, record completion counter. */
1239 forcenow = 0;
1185 spin_lock(&rnp->lock); 1240 spin_lock(&rnp->lock);
1186 if (lastcomp == rsp->completed && 1241 if (lastcomp + 1 == rsp->gpnum &&
1187 rsp->signaled == RCU_SAVE_DYNTICK) { 1242 lastcomp == rsp->completed &&
1243 rsp->signaled == signaled) {
1188 rsp->signaled = RCU_FORCE_QS; 1244 rsp->signaled = RCU_FORCE_QS;
1189 dyntick_record_completed(rsp, lastcomp); 1245 rsp->completed_fqs = lastcomp;
1246 forcenow = signaled == RCU_SAVE_COMPLETED;
1190 } 1247 }
1191 spin_unlock(&rnp->lock); 1248 spin_unlock(&rnp->lock);
1192 break; 1249 if (!forcenow)
1250 break;
1251 /* fall into next case. */
1193 1252
1194 case RCU_FORCE_QS: 1253 case RCU_FORCE_QS:
1195 1254
1196 /* Check dyntick-idle state, send IPI to laggarts. */ 1255 /* Check dyntick-idle state, send IPI to laggarts. */
1197 if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), 1256 if (rcu_process_dyntick(rsp, rsp->completed_fqs,
1198 rcu_implicit_dynticks_qs)) 1257 rcu_implicit_dynticks_qs))
1199 goto unlock_ret; 1258 goto unlock_ret;
1200 1259
@@ -1351,6 +1410,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1351} 1410}
1352EXPORT_SYMBOL_GPL(call_rcu_bh); 1411EXPORT_SYMBOL_GPL(call_rcu_bh);
1353 1412
1413/**
1414 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
1415 *
1416 * Control will return to the caller some time after a full rcu-sched
1417 * grace period has elapsed, in other words after all currently executing
1418 * rcu-sched read-side critical sections have completed. These read-side
1419 * critical sections are delimited by rcu_read_lock_sched() and
1420 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
1421 * local_irq_disable(), and so on may be used in place of
1422 * rcu_read_lock_sched().
1423 *
1424 * This means that all preempt_disable code sequences, including NMI and
1425 * hardware-interrupt handlers, in progress on entry will have completed
1426 * before this primitive returns. However, this does not guarantee that
1427 * softirq handlers will have completed, since in some kernels, these
1428 * handlers can run in process context, and can block.
1429 *
1430 * This primitive provides the guarantees made by the (now removed)
1431 * synchronize_kernel() API. In contrast, synchronize_rcu() only
1432 * guarantees that rcu_read_lock() sections will have completed.
1433 * In "classic RCU", these two guarantees happen to be one and
1434 * the same, but can differ in realtime RCU implementations.
1435 */
1436void synchronize_sched(void)
1437{
1438 struct rcu_synchronize rcu;
1439
1440 if (rcu_blocking_is_gp())
1441 return;
1442
1443 init_completion(&rcu.completion);
1444 /* Will wake me after RCU finished. */
1445 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1446 /* Wait for it. */
1447 wait_for_completion(&rcu.completion);
1448}
1449EXPORT_SYMBOL_GPL(synchronize_sched);
1450
1451/**
1452 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
1453 *
1454 * Control will return to the caller some time after a full rcu_bh grace
1455 * period has elapsed, in other words after all currently executing rcu_bh
1456 * read-side critical sections have completed. RCU read-side critical
1457 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
1458 * and may be nested.
1459 */
1460void synchronize_rcu_bh(void)
1461{
1462 struct rcu_synchronize rcu;
1463
1464 if (rcu_blocking_is_gp())
1465 return;
1466
1467 init_completion(&rcu.completion);
1468 /* Will wake me after RCU finished. */
1469 call_rcu_bh(&rcu.head, wakeme_after_rcu);
1470 /* Wait for it. */
1471 wait_for_completion(&rcu.completion);
1472}
1473EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1474
1354/* 1475/*
1355 * Check to see if there is any immediate RCU-related work to be done 1476 * Check to see if there is any immediate RCU-related work to be done
1356 * by the current CPU, for the specified type of RCU, returning 1 if so. 1477 * by the current CPU, for the specified type of RCU, returning 1 if so.
@@ -1360,6 +1481,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
1360 */ 1481 */
1361static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) 1482static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1362{ 1483{
1484 struct rcu_node *rnp = rdp->mynode;
1485
1363 rdp->n_rcu_pending++; 1486 rdp->n_rcu_pending++;
1364 1487
1365 /* Check for CPU stalls, if enabled. */ 1488 /* Check for CPU stalls, if enabled. */
@@ -1384,13 +1507,13 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1384 } 1507 }
1385 1508
1386 /* Has another RCU grace period completed? */ 1509 /* Has another RCU grace period completed? */
1387 if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */ 1510 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
1388 rdp->n_rp_gp_completed++; 1511 rdp->n_rp_gp_completed++;
1389 return 1; 1512 return 1;
1390 } 1513 }
1391 1514
1392 /* Has a new RCU grace period started? */ 1515 /* Has a new RCU grace period started? */
1393 if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */ 1516 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
1394 rdp->n_rp_gp_started++; 1517 rdp->n_rp_gp_started++;
1395 return 1; 1518 return 1;
1396 } 1519 }
@@ -1433,6 +1556,21 @@ int rcu_needs_cpu(int cpu)
1433 rcu_preempt_needs_cpu(cpu); 1556 rcu_preempt_needs_cpu(cpu);
1434} 1557}
1435 1558
1559/*
1560 * This function is invoked towards the end of the scheduler's initialization
1561 * process. Before this is called, the idle task might contain
1562 * RCU read-side critical sections (during which time, this idle
1563 * task is booting the system). After this function is called, the
1564 * idle tasks are prohibited from containing RCU read-side critical
1565 * sections.
1566 */
1567void rcu_scheduler_starting(void)
1568{
1569 WARN_ON(num_online_cpus() != 1);
1570 WARN_ON(nr_context_switches() > 0);
1571 rcu_scheduler_active = 1;
1572}
1573
1436static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; 1574static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1437static atomic_t rcu_barrier_cpu_count; 1575static atomic_t rcu_barrier_cpu_count;
1438static DEFINE_MUTEX(rcu_barrier_mutex); 1576static DEFINE_MUTEX(rcu_barrier_mutex);
@@ -1544,21 +1682,16 @@ static void __cpuinit
1544rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) 1682rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1545{ 1683{
1546 unsigned long flags; 1684 unsigned long flags;
1547 long lastcomp;
1548 unsigned long mask; 1685 unsigned long mask;
1549 struct rcu_data *rdp = rsp->rda[cpu]; 1686 struct rcu_data *rdp = rsp->rda[cpu];
1550 struct rcu_node *rnp = rcu_get_root(rsp); 1687 struct rcu_node *rnp = rcu_get_root(rsp);
1551 1688
1552 /* Set up local state, ensuring consistent view of global state. */ 1689 /* Set up local state, ensuring consistent view of global state. */
1553 spin_lock_irqsave(&rnp->lock, flags); 1690 spin_lock_irqsave(&rnp->lock, flags);
1554 lastcomp = rsp->completed;
1555 rdp->completed = lastcomp;
1556 rdp->gpnum = lastcomp;
1557 rdp->passed_quiesc = 0; /* We could be racing with new GP, */ 1691 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1558 rdp->qs_pending = 1; /* so set up to respond to current GP. */ 1692 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1559 rdp->beenonline = 1; /* We have now been online. */ 1693 rdp->beenonline = 1; /* We have now been online. */
1560 rdp->preemptable = preemptable; 1694 rdp->preemptable = preemptable;
1561 rdp->passed_quiesc_completed = lastcomp - 1;
1562 rdp->qlen_last_fqs_check = 0; 1695 rdp->qlen_last_fqs_check = 0;
1563 rdp->n_force_qs_snap = rsp->n_force_qs; 1696 rdp->n_force_qs_snap = rsp->n_force_qs;
1564 rdp->blimit = blimit; 1697 rdp->blimit = blimit;
@@ -1580,6 +1713,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1580 spin_lock(&rnp->lock); /* irqs already disabled. */ 1713 spin_lock(&rnp->lock); /* irqs already disabled. */
1581 rnp->qsmaskinit |= mask; 1714 rnp->qsmaskinit |= mask;
1582 mask = rnp->grpmask; 1715 mask = rnp->grpmask;
1716 if (rnp == rdp->mynode) {
1717 rdp->gpnum = rnp->completed; /* if GP in progress... */
1718 rdp->completed = rnp->completed;
1719 rdp->passed_quiesc_completed = rnp->completed - 1;
1720 }
1583 spin_unlock(&rnp->lock); /* irqs already disabled. */ 1721 spin_unlock(&rnp->lock); /* irqs already disabled. */
1584 rnp = rnp->parent; 1722 rnp = rnp->parent;
1585 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 1723 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
@@ -1597,8 +1735,8 @@ static void __cpuinit rcu_online_cpu(int cpu)
1597/* 1735/*
1598 * Handle CPU online/offline notification events. 1736 * Handle CPU online/offline notification events.
1599 */ 1737 */
1600int __cpuinit rcu_cpu_notify(struct notifier_block *self, 1738static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1601 unsigned long action, void *hcpu) 1739 unsigned long action, void *hcpu)
1602{ 1740{
1603 long cpu = (long)hcpu; 1741 long cpu = (long)hcpu;
1604 1742
@@ -1685,8 +1823,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1685 cpustride *= rsp->levelspread[i]; 1823 cpustride *= rsp->levelspread[i];
1686 rnp = rsp->level[i]; 1824 rnp = rsp->level[i];
1687 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1825 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1688 if (rnp != rcu_get_root(rsp)) 1826 spin_lock_init(&rnp->lock);
1689 spin_lock_init(&rnp->lock); 1827 lockdep_set_class(&rnp->lock, &rcu_node_class[i]);
1690 rnp->gpnum = 0; 1828 rnp->gpnum = 0;
1691 rnp->qsmask = 0; 1829 rnp->qsmask = 0;
1692 rnp->qsmaskinit = 0; 1830 rnp->qsmaskinit = 0;
@@ -1707,9 +1845,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1707 rnp->level = i; 1845 rnp->level = i;
1708 INIT_LIST_HEAD(&rnp->blocked_tasks[0]); 1846 INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
1709 INIT_LIST_HEAD(&rnp->blocked_tasks[1]); 1847 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1848 INIT_LIST_HEAD(&rnp->blocked_tasks[2]);
1849 INIT_LIST_HEAD(&rnp->blocked_tasks[3]);
1710 } 1850 }
1711 } 1851 }
1712 spin_lock_init(&rcu_get_root(rsp)->lock);
1713} 1852}
1714 1853
1715/* 1854/*
@@ -1735,16 +1874,30 @@ do { \
1735 } \ 1874 } \
1736} while (0) 1875} while (0)
1737 1876
1738void __init __rcu_init(void) 1877void __init rcu_init(void)
1739{ 1878{
1879 int i;
1880
1740 rcu_bootup_announce(); 1881 rcu_bootup_announce();
1741#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1882#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1742 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1883 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
1743#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 1884#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
1885#if NUM_RCU_LVL_4 != 0
1886 printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n");
1887#endif /* #if NUM_RCU_LVL_4 != 0 */
1744 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); 1888 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
1745 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); 1889 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
1746 __rcu_init_preempt(); 1890 __rcu_init_preempt();
1747 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1891 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1892
1893 /*
1894 * We don't need protection against CPU-hotplug here because
1895 * this is called early in boot, before either interrupts
1896 * or the scheduler are operational.
1897 */
1898 cpu_notifier(rcu_cpu_notify, 0);
1899 for_each_online_cpu(i)
1900 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i);
1748} 1901}
1749 1902
1750#include "rcutree_plugin.h" 1903#include "rcutree_plugin.h"
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 1899023b0962..d2a0046f63b2 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -34,10 +34,11 @@
34 * In practice, this has not been tested, so there is probably some 34 * In practice, this has not been tested, so there is probably some
35 * bug somewhere. 35 * bug somewhere.
36 */ 36 */
37#define MAX_RCU_LVLS 3 37#define MAX_RCU_LVLS 4
38#define RCU_FANOUT (CONFIG_RCU_FANOUT) 38#define RCU_FANOUT (CONFIG_RCU_FANOUT)
39#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) 39#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
40#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) 40#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
41#define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT)
41 42
42#if NR_CPUS <= RCU_FANOUT 43#if NR_CPUS <= RCU_FANOUT
43# define NUM_RCU_LVLS 1 44# define NUM_RCU_LVLS 1
@@ -45,23 +46,33 @@
45# define NUM_RCU_LVL_1 (NR_CPUS) 46# define NUM_RCU_LVL_1 (NR_CPUS)
46# define NUM_RCU_LVL_2 0 47# define NUM_RCU_LVL_2 0
47# define NUM_RCU_LVL_3 0 48# define NUM_RCU_LVL_3 0
49# define NUM_RCU_LVL_4 0
48#elif NR_CPUS <= RCU_FANOUT_SQ 50#elif NR_CPUS <= RCU_FANOUT_SQ
49# define NUM_RCU_LVLS 2 51# define NUM_RCU_LVLS 2
50# define NUM_RCU_LVL_0 1 52# define NUM_RCU_LVL_0 1
51# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) 53# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
52# define NUM_RCU_LVL_2 (NR_CPUS) 54# define NUM_RCU_LVL_2 (NR_CPUS)
53# define NUM_RCU_LVL_3 0 55# define NUM_RCU_LVL_3 0
56# define NUM_RCU_LVL_4 0
54#elif NR_CPUS <= RCU_FANOUT_CUBE 57#elif NR_CPUS <= RCU_FANOUT_CUBE
55# define NUM_RCU_LVLS 3 58# define NUM_RCU_LVLS 3
56# define NUM_RCU_LVL_0 1 59# define NUM_RCU_LVL_0 1
57# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) 60# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
58# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) 61# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
59# define NUM_RCU_LVL_3 NR_CPUS 62# define NUM_RCU_LVL_3 NR_CPUS
63# define NUM_RCU_LVL_4 0
64#elif NR_CPUS <= RCU_FANOUT_FOURTH
65# define NUM_RCU_LVLS 4
66# define NUM_RCU_LVL_0 1
67# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE)
68# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
69# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
70# define NUM_RCU_LVL_4 NR_CPUS
60#else 71#else
61# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" 72# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
62#endif /* #if (NR_CPUS) <= RCU_FANOUT */ 73#endif /* #if (NR_CPUS) <= RCU_FANOUT */
63 74
64#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) 75#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
65#define NUM_RCU_NODES (RCU_SUM - NR_CPUS) 76#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
66 77
67/* 78/*
@@ -84,14 +95,21 @@ struct rcu_node {
84 long gpnum; /* Current grace period for this node. */ 95 long gpnum; /* Current grace period for this node. */
85 /* This will either be equal to or one */ 96 /* This will either be equal to or one */
86 /* behind the root rcu_node's gpnum. */ 97 /* behind the root rcu_node's gpnum. */
98 long completed; /* Last grace period completed for this node. */
99 /* This will either be equal to or one */
100 /* behind the root rcu_node's gpnum. */
87 unsigned long qsmask; /* CPUs or groups that need to switch in */ 101 unsigned long qsmask; /* CPUs or groups that need to switch in */
88 /* order for current grace period to proceed.*/ 102 /* order for current grace period to proceed.*/
89 /* In leaf rcu_node, each bit corresponds to */ 103 /* In leaf rcu_node, each bit corresponds to */
90 /* an rcu_data structure, otherwise, each */ 104 /* an rcu_data structure, otherwise, each */
91 /* bit corresponds to a child rcu_node */ 105 /* bit corresponds to a child rcu_node */
92 /* structure. */ 106 /* structure. */
107 unsigned long expmask; /* Groups that have ->blocked_tasks[] */
108 /* elements that need to drain to allow the */
109 /* current expedited grace period to */
110 /* complete (only for TREE_PREEMPT_RCU). */
93 unsigned long qsmaskinit; 111 unsigned long qsmaskinit;
94 /* Per-GP initialization for qsmask. */ 112 /* Per-GP initial value for qsmask & expmask. */
95 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 113 unsigned long grpmask; /* Mask to apply to parent qsmask. */
96 /* Only one bit will be set in this mask. */ 114 /* Only one bit will be set in this mask. */
97 int grplo; /* lowest-numbered CPU or group here. */ 115 int grplo; /* lowest-numbered CPU or group here. */
@@ -99,7 +117,7 @@ struct rcu_node {
99 u8 grpnum; /* CPU/group number for next level up. */ 117 u8 grpnum; /* CPU/group number for next level up. */
100 u8 level; /* root is at level 0. */ 118 u8 level; /* root is at level 0. */
101 struct rcu_node *parent; 119 struct rcu_node *parent;
102 struct list_head blocked_tasks[2]; 120 struct list_head blocked_tasks[4];
103 /* Tasks blocked in RCU read-side critsect. */ 121 /* Tasks blocked in RCU read-side critsect. */
104 /* Grace period number (->gpnum) x blocked */ 122 /* Grace period number (->gpnum) x blocked */
105 /* by tasks on the (x & 0x1) element of the */ 123 /* by tasks on the (x & 0x1) element of the */
@@ -114,6 +132,21 @@ struct rcu_node {
114 for ((rnp) = &(rsp)->node[0]; \ 132 for ((rnp) = &(rsp)->node[0]; \
115 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) 133 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
116 134
135/*
136 * Do a breadth-first scan of the non-leaf rcu_node structures for the
137 * specified rcu_state structure. Note that if there is a singleton
138 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
139 */
140#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
141 for ((rnp) = &(rsp)->node[0]; \
142 (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
143
144/*
145 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
146 * structure. Note that if there is a singleton rcu_node tree with but
147 * one rcu_node structure, this loop -will- visit the rcu_node structure.
148 * It is still a leaf node, even if it is also the root node.
149 */
117#define rcu_for_each_leaf_node(rsp, rnp) \ 150#define rcu_for_each_leaf_node(rsp, rnp) \
118 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ 151 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
119 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) 152 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
@@ -204,11 +237,12 @@ struct rcu_data {
204#define RCU_GP_IDLE 0 /* No grace period in progress. */ 237#define RCU_GP_IDLE 0 /* No grace period in progress. */
205#define RCU_GP_INIT 1 /* Grace period being initialized. */ 238#define RCU_GP_INIT 1 /* Grace period being initialized. */
206#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ 239#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
207#define RCU_FORCE_QS 3 /* Need to force quiescent state. */ 240#define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */
241#define RCU_FORCE_QS 4 /* Need to force quiescent state. */
208#ifdef CONFIG_NO_HZ 242#ifdef CONFIG_NO_HZ
209#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 243#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
210#else /* #ifdef CONFIG_NO_HZ */ 244#else /* #ifdef CONFIG_NO_HZ */
211#define RCU_SIGNAL_INIT RCU_FORCE_QS 245#define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED
212#endif /* #else #ifdef CONFIG_NO_HZ */ 246#endif /* #else #ifdef CONFIG_NO_HZ */
213 247
214#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ 248#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
@@ -246,7 +280,7 @@ struct rcu_state {
246 long gpnum; /* Current gp number. */ 280 long gpnum; /* Current gp number. */
247 long completed; /* # of last completed gp. */ 281 long completed; /* # of last completed gp. */
248 282
249 /* End of fields guarded by root rcu_node's lock. */ 283 /* End of fields guarded by root rcu_node's lock. */
250 284
251 spinlock_t onofflock; /* exclude on/offline and */ 285 spinlock_t onofflock; /* exclude on/offline and */
252 /* starting new GP. Also */ 286 /* starting new GP. Also */
@@ -260,6 +294,8 @@ struct rcu_state {
260 long orphan_qlen; /* Number of orphaned cbs. */ 294 long orphan_qlen; /* Number of orphaned cbs. */
261 spinlock_t fqslock; /* Only one task forcing */ 295 spinlock_t fqslock; /* Only one task forcing */
262 /* quiescent states. */ 296 /* quiescent states. */
297 long completed_fqs; /* Value of completed @ snap. */
298 /* Protected by fqslock. */
263 unsigned long jiffies_force_qs; /* Time at which to invoke */ 299 unsigned long jiffies_force_qs; /* Time at which to invoke */
264 /* force_quiescent_state(). */ 300 /* force_quiescent_state(). */
265 unsigned long n_force_qs; /* Number of calls to */ 301 unsigned long n_force_qs; /* Number of calls to */
@@ -274,11 +310,15 @@ struct rcu_state {
274 unsigned long jiffies_stall; /* Time at which to check */ 310 unsigned long jiffies_stall; /* Time at which to check */
275 /* for CPU stalls. */ 311 /* for CPU stalls. */
276#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 312#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
277#ifdef CONFIG_NO_HZ
278 long dynticks_completed; /* Value of completed @ snap. */
279#endif /* #ifdef CONFIG_NO_HZ */
280}; 313};
281 314
315/* Return values for rcu_preempt_offline_tasks(). */
316
317#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
318 /* GP were moved to root. */
319#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
320 /* GP were moved to root. */
321
282#ifdef RCU_TREE_NONCORE 322#ifdef RCU_TREE_NONCORE
283 323
284/* 324/*
@@ -298,10 +338,14 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
298#else /* #ifdef RCU_TREE_NONCORE */ 338#else /* #ifdef RCU_TREE_NONCORE */
299 339
300/* Forward declarations for rcutree_plugin.h */ 340/* Forward declarations for rcutree_plugin.h */
301static inline void rcu_bootup_announce(void); 341static void rcu_bootup_announce(void);
302long rcu_batches_completed(void); 342long rcu_batches_completed(void);
303static void rcu_preempt_note_context_switch(int cpu); 343static void rcu_preempt_note_context_switch(int cpu);
304static int rcu_preempted_readers(struct rcu_node *rnp); 344static int rcu_preempted_readers(struct rcu_node *rnp);
345#ifdef CONFIG_HOTPLUG_CPU
346static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
347 unsigned long flags);
348#endif /* #ifdef CONFIG_HOTPLUG_CPU */
305#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 349#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
306static void rcu_print_task_stall(struct rcu_node *rnp); 350static void rcu_print_task_stall(struct rcu_node *rnp);
307#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 351#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
@@ -315,6 +359,9 @@ static void rcu_preempt_offline_cpu(int cpu);
315static void rcu_preempt_check_callbacks(int cpu); 359static void rcu_preempt_check_callbacks(int cpu);
316static void rcu_preempt_process_callbacks(void); 360static void rcu_preempt_process_callbacks(void);
317void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 361void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
362#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
363static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
364#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
318static int rcu_preempt_pending(int cpu); 365static int rcu_preempt_pending(int cpu);
319static int rcu_preempt_needs_cpu(int cpu); 366static int rcu_preempt_needs_cpu(int cpu);
320static void __cpuinit rcu_preempt_init_percpu_data(int cpu); 367static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index ef2a58c2b9d5..37fbccdf41d5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -24,16 +24,19 @@
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */ 25 */
26 26
27#include <linux/delay.h>
27 28
28#ifdef CONFIG_TREE_PREEMPT_RCU 29#ifdef CONFIG_TREE_PREEMPT_RCU
29 30
30struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); 31struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
31DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); 32DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
32 33
34static int rcu_preempted_readers_exp(struct rcu_node *rnp);
35
33/* 36/*
34 * Tell them what RCU they are running. 37 * Tell them what RCU they are running.
35 */ 38 */
36static inline void rcu_bootup_announce(void) 39static void __init rcu_bootup_announce(void)
37{ 40{
38 printk(KERN_INFO 41 printk(KERN_INFO
39 "Experimental preemptable hierarchical RCU implementation.\n"); 42 "Experimental preemptable hierarchical RCU implementation.\n");
@@ -67,7 +70,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
67static void rcu_preempt_qs(int cpu) 70static void rcu_preempt_qs(int cpu)
68{ 71{
69 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 72 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
70 rdp->passed_quiesc_completed = rdp->completed; 73 rdp->passed_quiesc_completed = rdp->gpnum - 1;
71 barrier(); 74 barrier();
72 rdp->passed_quiesc = 1; 75 rdp->passed_quiesc = 1;
73} 76}
@@ -157,14 +160,58 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock);
157 */ 160 */
158static int rcu_preempted_readers(struct rcu_node *rnp) 161static int rcu_preempted_readers(struct rcu_node *rnp)
159{ 162{
160 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); 163 int phase = rnp->gpnum & 0x1;
164
165 return !list_empty(&rnp->blocked_tasks[phase]) ||
166 !list_empty(&rnp->blocked_tasks[phase + 2]);
167}
168
169/*
170 * Record a quiescent state for all tasks that were previously queued
171 * on the specified rcu_node structure and that were blocking the current
172 * RCU grace period. The caller must hold the specified rnp->lock with
173 * irqs disabled, and this lock is released upon return, but irqs remain
174 * disabled.
175 */
176static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
177 __releases(rnp->lock)
178{
179 unsigned long mask;
180 struct rcu_node *rnp_p;
181
182 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
183 spin_unlock_irqrestore(&rnp->lock, flags);
184 return; /* Still need more quiescent states! */
185 }
186
187 rnp_p = rnp->parent;
188 if (rnp_p == NULL) {
189 /*
190 * Either there is only one rcu_node in the tree,
191 * or tasks were kicked up to root rcu_node due to
192 * CPUs going offline.
193 */
194 rcu_report_qs_rsp(&rcu_preempt_state, flags);
195 return;
196 }
197
198 /* Report up the rest of the hierarchy. */
199 mask = rnp->grpmask;
200 spin_unlock(&rnp->lock); /* irqs remain disabled. */
201 spin_lock(&rnp_p->lock); /* irqs already disabled. */
202 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
161} 203}
162 204
205/*
206 * Handle special cases during rcu_read_unlock(), such as needing to
207 * notify RCU core processing or task having blocked during the RCU
208 * read-side critical section.
209 */
163static void rcu_read_unlock_special(struct task_struct *t) 210static void rcu_read_unlock_special(struct task_struct *t)
164{ 211{
165 int empty; 212 int empty;
213 int empty_exp;
166 unsigned long flags; 214 unsigned long flags;
167 unsigned long mask;
168 struct rcu_node *rnp; 215 struct rcu_node *rnp;
169 int special; 216 int special;
170 217
@@ -207,36 +254,30 @@ static void rcu_read_unlock_special(struct task_struct *t)
207 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 254 spin_unlock(&rnp->lock); /* irqs remain disabled. */
208 } 255 }
209 empty = !rcu_preempted_readers(rnp); 256 empty = !rcu_preempted_readers(rnp);
257 empty_exp = !rcu_preempted_readers_exp(rnp);
258 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
210 list_del_init(&t->rcu_node_entry); 259 list_del_init(&t->rcu_node_entry);
211 t->rcu_blocked_node = NULL; 260 t->rcu_blocked_node = NULL;
212 261
213 /* 262 /*
214 * If this was the last task on the current list, and if 263 * If this was the last task on the current list, and if
215 * we aren't waiting on any CPUs, report the quiescent state. 264 * we aren't waiting on any CPUs, report the quiescent state.
216 * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk() 265 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
217 * drop rnp->lock and restore irq.
218 */ 266 */
219 if (!empty && rnp->qsmask == 0 && 267 if (empty)
220 !rcu_preempted_readers(rnp)) {
221 struct rcu_node *rnp_p;
222
223 if (rnp->parent == NULL) {
224 /* Only one rcu_node in the tree. */
225 cpu_quiet_msk_finish(&rcu_preempt_state, flags);
226 return;
227 }
228 /* Report up the rest of the hierarchy. */
229 mask = rnp->grpmask;
230 spin_unlock_irqrestore(&rnp->lock, flags); 268 spin_unlock_irqrestore(&rnp->lock, flags);
231 rnp_p = rnp->parent; 269 else
232 spin_lock_irqsave(&rnp_p->lock, flags); 270 rcu_report_unblock_qs_rnp(rnp, flags);
233 WARN_ON_ONCE(rnp->qsmask); 271
234 cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags); 272 /*
235 return; 273 * If this was the last task on the expedited lists,
236 } 274 * then we need to report up the rcu_node hierarchy.
237 spin_unlock(&rnp->lock); 275 */
276 if (!empty_exp && !rcu_preempted_readers_exp(rnp))
277 rcu_report_exp_rnp(&rcu_preempt_state, rnp);
278 } else {
279 local_irq_restore(flags);
238 } 280 }
239 local_irq_restore(flags);
240} 281}
241 282
242/* 283/*
@@ -303,6 +344,8 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
303 * rcu_node. The reason for not just moving them to the immediate 344 * rcu_node. The reason for not just moving them to the immediate
304 * parent is to remove the need for rcu_read_unlock_special() to 345 * parent is to remove the need for rcu_read_unlock_special() to
305 * make more than two attempts to acquire the target rcu_node's lock. 346 * make more than two attempts to acquire the target rcu_node's lock.
347 * Returns true if there were tasks blocking the current RCU grace
348 * period.
306 * 349 *
307 * Returns 1 if there was previously a task blocking the current grace 350 * Returns 1 if there was previously a task blocking the current grace
308 * period on the specified rcu_node structure. 351 * period on the specified rcu_node structure.
@@ -316,7 +359,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
316 int i; 359 int i;
317 struct list_head *lp; 360 struct list_head *lp;
318 struct list_head *lp_root; 361 struct list_head *lp_root;
319 int retval = rcu_preempted_readers(rnp); 362 int retval = 0;
320 struct rcu_node *rnp_root = rcu_get_root(rsp); 363 struct rcu_node *rnp_root = rcu_get_root(rsp);
321 struct task_struct *tp; 364 struct task_struct *tp;
322 365
@@ -326,7 +369,9 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
326 } 369 }
327 WARN_ON_ONCE(rnp != rdp->mynode && 370 WARN_ON_ONCE(rnp != rdp->mynode &&
328 (!list_empty(&rnp->blocked_tasks[0]) || 371 (!list_empty(&rnp->blocked_tasks[0]) ||
329 !list_empty(&rnp->blocked_tasks[1]))); 372 !list_empty(&rnp->blocked_tasks[1]) ||
373 !list_empty(&rnp->blocked_tasks[2]) ||
374 !list_empty(&rnp->blocked_tasks[3])));
330 375
331 /* 376 /*
332 * Move tasks up to root rcu_node. Rely on the fact that the 377 * Move tasks up to root rcu_node. Rely on the fact that the
@@ -334,7 +379,11 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
334 * rcu_nodes in terms of gp_num value. This fact allows us to 379 * rcu_nodes in terms of gp_num value. This fact allows us to
335 * move the blocked_tasks[] array directly, element by element. 380 * move the blocked_tasks[] array directly, element by element.
336 */ 381 */
337 for (i = 0; i < 2; i++) { 382 if (rcu_preempted_readers(rnp))
383 retval |= RCU_OFL_TASKS_NORM_GP;
384 if (rcu_preempted_readers_exp(rnp))
385 retval |= RCU_OFL_TASKS_EXP_GP;
386 for (i = 0; i < 4; i++) {
338 lp = &rnp->blocked_tasks[i]; 387 lp = &rnp->blocked_tasks[i];
339 lp_root = &rnp_root->blocked_tasks[i]; 388 lp_root = &rnp_root->blocked_tasks[i];
340 while (!list_empty(lp)) { 389 while (!list_empty(lp)) {
@@ -346,7 +395,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
346 spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 395 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
347 } 396 }
348 } 397 }
349
350 return retval; 398 return retval;
351} 399}
352 400
@@ -398,14 +446,183 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
398} 446}
399EXPORT_SYMBOL_GPL(call_rcu); 447EXPORT_SYMBOL_GPL(call_rcu);
400 448
449/**
450 * synchronize_rcu - wait until a grace period has elapsed.
451 *
452 * Control will return to the caller some time after a full grace
453 * period has elapsed, in other words after all currently executing RCU
454 * read-side critical sections have completed. RCU read-side critical
455 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
456 * and may be nested.
457 */
458void synchronize_rcu(void)
459{
460 struct rcu_synchronize rcu;
461
462 if (!rcu_scheduler_active)
463 return;
464
465 init_completion(&rcu.completion);
466 /* Will wake me after RCU finished. */
467 call_rcu(&rcu.head, wakeme_after_rcu);
468 /* Wait for it. */
469 wait_for_completion(&rcu.completion);
470}
471EXPORT_SYMBOL_GPL(synchronize_rcu);
472
473static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
474static long sync_rcu_preempt_exp_count;
475static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
476
401/* 477/*
402 * Wait for an rcu-preempt grace period. We are supposed to expedite the 478 * Return non-zero if there are any tasks in RCU read-side critical
403 * grace period, but this is the crude slow compatability hack, so just 479 * sections blocking the current preemptible-RCU expedited grace period.
404 * invoke synchronize_rcu(). 480 * If there is no preemptible-RCU expedited grace period currently in
481 * progress, returns zero unconditionally.
482 */
483static int rcu_preempted_readers_exp(struct rcu_node *rnp)
484{
485 return !list_empty(&rnp->blocked_tasks[2]) ||
486 !list_empty(&rnp->blocked_tasks[3]);
487}
488
489/*
490 * return non-zero if there is no RCU expedited grace period in progress
491 * for the specified rcu_node structure, in other words, if all CPUs and
492 * tasks covered by the specified rcu_node structure have done their bit
493 * for the current expedited grace period. Works only for preemptible
494 * RCU -- other RCU implementation use other means.
495 *
496 * Caller must hold sync_rcu_preempt_exp_mutex.
497 */
498static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
499{
500 return !rcu_preempted_readers_exp(rnp) &&
501 ACCESS_ONCE(rnp->expmask) == 0;
502}
503
504/*
505 * Report the exit from RCU read-side critical section for the last task
506 * that queued itself during or before the current expedited preemptible-RCU
507 * grace period. This event is reported either to the rcu_node structure on
508 * which the task was queued or to one of that rcu_node structure's ancestors,
509 * recursively up the tree. (Calm down, calm down, we do the recursion
510 * iteratively!)
511 *
512 * Caller must hold sync_rcu_preempt_exp_mutex.
513 */
514static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
515{
516 unsigned long flags;
517 unsigned long mask;
518
519 spin_lock_irqsave(&rnp->lock, flags);
520 for (;;) {
521 if (!sync_rcu_preempt_exp_done(rnp))
522 break;
523 if (rnp->parent == NULL) {
524 wake_up(&sync_rcu_preempt_exp_wq);
525 break;
526 }
527 mask = rnp->grpmask;
528 spin_unlock(&rnp->lock); /* irqs remain disabled */
529 rnp = rnp->parent;
530 spin_lock(&rnp->lock); /* irqs already disabled */
531 rnp->expmask &= ~mask;
532 }
533 spin_unlock_irqrestore(&rnp->lock, flags);
534}
535
536/*
537 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
538 * grace period for the specified rcu_node structure. If there are no such
539 * tasks, report it up the rcu_node hierarchy.
540 *
541 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
542 */
543static void
544sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
545{
546 int must_wait;
547
548 spin_lock(&rnp->lock); /* irqs already disabled */
549 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
550 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
551 must_wait = rcu_preempted_readers_exp(rnp);
552 spin_unlock(&rnp->lock); /* irqs remain disabled */
553 if (!must_wait)
554 rcu_report_exp_rnp(rsp, rnp);
555}
556
557/*
558 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
559 * is to invoke synchronize_sched_expedited() to push all the tasks to
560 * the ->blocked_tasks[] lists, move all entries from the first set of
561 * ->blocked_tasks[] lists to the second set, and finally wait for this
562 * second set to drain.
405 */ 563 */
406void synchronize_rcu_expedited(void) 564void synchronize_rcu_expedited(void)
407{ 565{
408 synchronize_rcu(); 566 unsigned long flags;
567 struct rcu_node *rnp;
568 struct rcu_state *rsp = &rcu_preempt_state;
569 long snap;
570 int trycount = 0;
571
572 smp_mb(); /* Caller's modifications seen first by other CPUs. */
573 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
574 smp_mb(); /* Above access cannot bleed into critical section. */
575
576 /*
577 * Acquire lock, falling back to synchronize_rcu() if too many
578 * lock-acquisition failures. Of course, if someone does the
579 * expedited grace period for us, just leave.
580 */
581 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
582 if (trycount++ < 10)
583 udelay(trycount * num_online_cpus());
584 else {
585 synchronize_rcu();
586 return;
587 }
588 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
589 goto mb_ret; /* Others did our work for us. */
590 }
591 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
592 goto unlock_mb_ret; /* Others did our work for us. */
593
594 /* force all RCU readers onto blocked_tasks[]. */
595 synchronize_sched_expedited();
596
597 spin_lock_irqsave(&rsp->onofflock, flags);
598
599 /* Initialize ->expmask for all non-leaf rcu_node structures. */
600 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
601 spin_lock(&rnp->lock); /* irqs already disabled. */
602 rnp->expmask = rnp->qsmaskinit;
603 spin_unlock(&rnp->lock); /* irqs remain disabled. */
604 }
605
606 /* Snapshot current state of ->blocked_tasks[] lists. */
607 rcu_for_each_leaf_node(rsp, rnp)
608 sync_rcu_preempt_exp_init(rsp, rnp);
609 if (NUM_RCU_NODES > 1)
610 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
611
612 spin_unlock_irqrestore(&rsp->onofflock, flags);
613
614 /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
615 rnp = rcu_get_root(rsp);
616 wait_event(sync_rcu_preempt_exp_wq,
617 sync_rcu_preempt_exp_done(rnp));
618
619 /* Clean up and exit. */
620 smp_mb(); /* ensure expedited GP seen before counter increment. */
621 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
622unlock_mb_ret:
623 mutex_unlock(&sync_rcu_preempt_exp_mutex);
624mb_ret:
625 smp_mb(); /* ensure subsequent action seen after grace period. */
409} 626}
410EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 627EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
411 628
@@ -481,7 +698,7 @@ void exit_rcu(void)
481/* 698/*
482 * Tell them what RCU they are running. 699 * Tell them what RCU they are running.
483 */ 700 */
484static inline void rcu_bootup_announce(void) 701static void __init rcu_bootup_announce(void)
485{ 702{
486 printk(KERN_INFO "Hierarchical RCU implementation.\n"); 703 printk(KERN_INFO "Hierarchical RCU implementation.\n");
487} 704}
@@ -512,6 +729,16 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
512 return 0; 729 return 0;
513} 730}
514 731
732#ifdef CONFIG_HOTPLUG_CPU
733
734/* Because preemptible RCU does not exist, no quieting of tasks. */
735static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
736{
737 spin_unlock_irqrestore(&rnp->lock, flags);
738}
739
740#endif /* #ifdef CONFIG_HOTPLUG_CPU */
741
515#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 742#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
516 743
517/* 744/*
@@ -594,6 +821,20 @@ void synchronize_rcu_expedited(void)
594} 821}
595EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 822EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
596 823
824#ifdef CONFIG_HOTPLUG_CPU
825
826/*
827 * Because preemptable RCU does not exist, there is never any need to
828 * report on tasks preempted in RCU read-side critical sections during
829 * expedited RCU grace periods.
830 */
831static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
832{
833 return;
834}
835
836#endif /* #ifdef CONFIG_HOTPLUG_CPU */
837
597/* 838/*
598 * Because preemptable RCU does not exist, it never has any work to do. 839 * Because preemptable RCU does not exist, it never has any work to do.
599 */ 840 */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 4b31c779e62e..9d2c88423b31 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -155,12 +155,15 @@ static const struct file_operations rcudata_csv_fops = {
155 155
156static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) 156static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
157{ 157{
158 long gpnum;
158 int level = 0; 159 int level = 0;
160 int phase;
159 struct rcu_node *rnp; 161 struct rcu_node *rnp;
160 162
163 gpnum = rsp->gpnum;
161 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " 164 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
162 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n", 165 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n",
163 rsp->completed, rsp->gpnum, rsp->signaled, 166 rsp->completed, gpnum, rsp->signaled,
164 (long)(rsp->jiffies_force_qs - jiffies), 167 (long)(rsp->jiffies_force_qs - jiffies),
165 (int)(jiffies & 0xffff), 168 (int)(jiffies & 0xffff),
166 rsp->n_force_qs, rsp->n_force_qs_ngp, 169 rsp->n_force_qs, rsp->n_force_qs_ngp,
@@ -171,8 +174,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
171 seq_puts(m, "\n"); 174 seq_puts(m, "\n");
172 level = rnp->level; 175 level = rnp->level;
173 } 176 }
174 seq_printf(m, "%lx/%lx %d:%d ^%d ", 177 phase = gpnum & 0x1;
178 seq_printf(m, "%lx/%lx %c%c>%c%c %d:%d ^%d ",
175 rnp->qsmask, rnp->qsmaskinit, 179 rnp->qsmask, rnp->qsmaskinit,
180 "T."[list_empty(&rnp->blocked_tasks[phase])],
181 "E."[list_empty(&rnp->blocked_tasks[phase + 2])],
182 "T."[list_empty(&rnp->blocked_tasks[!phase])],
183 "E."[list_empty(&rnp->blocked_tasks[!phase + 2])],
176 rnp->grplo, rnp->grphi, rnp->grpnum); 184 rnp->grplo, rnp->grphi, rnp->grpnum);
177 } 185 }
178 seq_puts(m, "\n"); 186 seq_puts(m, "\n");
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c11ae0a948d..6ae2739b8f19 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5481,7 +5481,7 @@ need_resched_nonpreemptible:
5481} 5481}
5482EXPORT_SYMBOL(schedule); 5482EXPORT_SYMBOL(schedule);
5483 5483
5484#ifdef CONFIG_SMP 5484#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
5485/* 5485/*
5486 * Look out! "owner" is an entirely speculative pointer 5486 * Look out! "owner" is an entirely speculative pointer
5487 * access and not reliable. 5487 * access and not reliable.
@@ -10901,6 +10901,7 @@ void synchronize_sched_expedited(void)
10901 spin_unlock_irqrestore(&rq->lock, flags); 10901 spin_unlock_irqrestore(&rq->lock, flags);
10902 } 10902 }
10903 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 10903 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10904 synchronize_sched_expedited_count++;
10904 mutex_unlock(&rcu_sched_expedited_mutex); 10905 mutex_unlock(&rcu_sched_expedited_mutex);
10905 put_online_cpus(); 10906 put_online_cpus();
10906 if (need_full_sync) 10907 if (need_full_sync)
diff --git a/kernel/signal.c b/kernel/signal.c
index 6705320784fd..fe08008133da 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,6 +22,7 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/ratelimit.h>
25#include <linux/tracehook.h> 26#include <linux/tracehook.h>
26#include <linux/capability.h> 27#include <linux/capability.h>
27#include <linux/freezer.h> 28#include <linux/freezer.h>
@@ -41,6 +42,8 @@
41 42
42static struct kmem_cache *sigqueue_cachep; 43static struct kmem_cache *sigqueue_cachep;
43 44
45int print_fatal_signals __read_mostly;
46
44static void __user *sig_handler(struct task_struct *t, int sig) 47static void __user *sig_handler(struct task_struct *t, int sig)
45{ 48{
46 return t->sighand->action[sig - 1].sa.sa_handler; 49 return t->sighand->action[sig - 1].sa.sa_handler;
@@ -159,7 +162,7 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
159{ 162{
160 unsigned long i, *s, *m, x; 163 unsigned long i, *s, *m, x;
161 int sig = 0; 164 int sig = 0;
162 165
163 s = pending->signal.sig; 166 s = pending->signal.sig;
164 m = mask->sig; 167 m = mask->sig;
165 switch (_NSIG_WORDS) { 168 switch (_NSIG_WORDS) {
@@ -184,17 +187,31 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
184 sig = ffz(~x) + 1; 187 sig = ffz(~x) + 1;
185 break; 188 break;
186 } 189 }
187 190
188 return sig; 191 return sig;
189} 192}
190 193
194static inline void print_dropped_signal(int sig)
195{
196 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
197
198 if (!print_fatal_signals)
199 return;
200
201 if (!__ratelimit(&ratelimit_state))
202 return;
203
204 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
205 current->comm, current->pid, sig);
206}
207
191/* 208/*
192 * allocate a new signal queue record 209 * allocate a new signal queue record
193 * - this may be called without locks if and only if t == current, otherwise an 210 * - this may be called without locks if and only if t == current, otherwise an
194 * appopriate lock must be held to stop the target task from exiting 211 * appopriate lock must be held to stop the target task from exiting
195 */ 212 */
196static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 213static struct sigqueue *
197 int override_rlimit) 214__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
198{ 215{
199 struct sigqueue *q = NULL; 216 struct sigqueue *q = NULL;
200 struct user_struct *user; 217 struct user_struct *user;
@@ -207,10 +224,15 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
207 */ 224 */
208 user = get_uid(__task_cred(t)->user); 225 user = get_uid(__task_cred(t)->user);
209 atomic_inc(&user->sigpending); 226 atomic_inc(&user->sigpending);
227
210 if (override_rlimit || 228 if (override_rlimit ||
211 atomic_read(&user->sigpending) <= 229 atomic_read(&user->sigpending) <=
212 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 230 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
213 q = kmem_cache_alloc(sigqueue_cachep, flags); 231 q = kmem_cache_alloc(sigqueue_cachep, flags);
232 } else {
233 print_dropped_signal(sig);
234 }
235
214 if (unlikely(q == NULL)) { 236 if (unlikely(q == NULL)) {
215 atomic_dec(&user->sigpending); 237 atomic_dec(&user->sigpending);
216 free_uid(user); 238 free_uid(user);
@@ -869,7 +891,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
869 else 891 else
870 override_rlimit = 0; 892 override_rlimit = 0;
871 893
872 q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, 894 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
873 override_rlimit); 895 override_rlimit);
874 if (q) { 896 if (q) {
875 list_add_tail(&q->list, &pending->list); 897 list_add_tail(&q->list, &pending->list);
@@ -925,8 +947,6 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
925 return __send_signal(sig, info, t, group, from_ancestor_ns); 947 return __send_signal(sig, info, t, group, from_ancestor_ns);
926} 948}
927 949
928int print_fatal_signals;
929
930static void print_fatal_signal(struct pt_regs *regs, int signr) 950static void print_fatal_signal(struct pt_regs *regs, int signr)
931{ 951{
932 printk("%s/%d: potentially unexpected fatal signal %d.\n", 952 printk("%s/%d: potentially unexpected fatal signal %d.\n",
@@ -1293,19 +1313,19 @@ EXPORT_SYMBOL(kill_pid);
1293 * These functions support sending signals using preallocated sigqueue 1313 * These functions support sending signals using preallocated sigqueue
1294 * structures. This is needed "because realtime applications cannot 1314 * structures. This is needed "because realtime applications cannot
1295 * afford to lose notifications of asynchronous events, like timer 1315 * afford to lose notifications of asynchronous events, like timer
1296 * expirations or I/O completions". In the case of Posix Timers 1316 * expirations or I/O completions". In the case of Posix Timers
1297 * we allocate the sigqueue structure from the timer_create. If this 1317 * we allocate the sigqueue structure from the timer_create. If this
1298 * allocation fails we are able to report the failure to the application 1318 * allocation fails we are able to report the failure to the application
1299 * with an EAGAIN error. 1319 * with an EAGAIN error.
1300 */ 1320 */
1301
1302struct sigqueue *sigqueue_alloc(void) 1321struct sigqueue *sigqueue_alloc(void)
1303{ 1322{
1304 struct sigqueue *q; 1323 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1305 1324
1306 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1325 if (q)
1307 q->flags |= SIGQUEUE_PREALLOC; 1326 q->flags |= SIGQUEUE_PREALLOC;
1308 return(q); 1327
1328 return q;
1309} 1329}
1310 1330
1311void sigqueue_free(struct sigqueue *q) 1331void sigqueue_free(struct sigqueue *q)
diff --git a/kernel/smp.c b/kernel/smp.c
index c9d1c7835c2f..a8c76069cf50 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -265,9 +265,7 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data);
265 * @info: An arbitrary pointer to pass to the function. 265 * @info: An arbitrary pointer to pass to the function.
266 * @wait: If true, wait until function has completed on other CPUs. 266 * @wait: If true, wait until function has completed on other CPUs.
267 * 267 *
268 * Returns 0 on success, else a negative status code. Note that @wait 268 * Returns 0 on success, else a negative status code.
269 * will be implicitly turned on in case of allocation failures, since
270 * we fall back to on-stack allocation.
271 */ 269 */
272int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 270int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
273 int wait) 271 int wait)
@@ -321,6 +319,51 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
321} 319}
322EXPORT_SYMBOL(smp_call_function_single); 320EXPORT_SYMBOL(smp_call_function_single);
323 321
322/*
323 * smp_call_function_any - Run a function on any of the given cpus
324 * @mask: The mask of cpus it can run on.
325 * @func: The function to run. This must be fast and non-blocking.
326 * @info: An arbitrary pointer to pass to the function.
327 * @wait: If true, wait until function has completed.
328 *
329 * Returns 0 on success, else a negative status code (if no cpus were online).
330 * Note that @wait will be implicitly turned on in case of allocation failures,
331 * since we fall back to on-stack allocation.
332 *
333 * Selection preference:
334 * 1) current cpu if in @mask
335 * 2) any cpu of current node if in @mask
336 * 3) any other online cpu in @mask
337 */
338int smp_call_function_any(const struct cpumask *mask,
339 void (*func)(void *info), void *info, int wait)
340{
341 unsigned int cpu;
342 const struct cpumask *nodemask;
343 int ret;
344
345 /* Try for same CPU (cheapest) */
346 cpu = get_cpu();
347 if (cpumask_test_cpu(cpu, mask))
348 goto call;
349
350 /* Try for same node. */
351 nodemask = cpumask_of_node(cpu);
352 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
353 cpu = cpumask_next_and(cpu, nodemask, mask)) {
354 if (cpu_online(cpu))
355 goto call;
356 }
357
358 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
359 cpu = cpumask_any_and(mask, cpu_online_mask);
360call:
361 ret = smp_call_function_single(cpu, func, info, wait);
362 put_cpu();
363 return ret;
364}
365EXPORT_SYMBOL_GPL(smp_call_function_any);
366
324/** 367/**
325 * __smp_call_function_single(): Run a function on another CPU 368 * __smp_call_function_single(): Run a function on another CPU
326 * @cpu: The CPU to run on. 369 * @cpu: The CPU to run on.
@@ -355,9 +398,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
355 * @wait: If true, wait (atomically) until function has completed 398 * @wait: If true, wait (atomically) until function has completed
356 * on other CPUs. 399 * on other CPUs.
357 * 400 *
358 * If @wait is true, then returns once @func has returned. Note that @wait 401 * If @wait is true, then returns once @func has returned.
359 * will be implicitly turned on in case of allocation failures, since
360 * we fall back to on-stack allocation.
361 * 402 *
362 * You must not call this function with disabled interrupts or from a 403 * You must not call this function with disabled interrupts or from a
363 * hardware interrupt handler or from a bottom half handler. Preemption 404 * hardware interrupt handler or from a bottom half handler. Preemption
@@ -443,8 +484,7 @@ EXPORT_SYMBOL(smp_call_function_many);
443 * Returns 0. 484 * Returns 0.
444 * 485 *
445 * If @wait is true, then returns once @func has returned; otherwise 486 * If @wait is true, then returns once @func has returned; otherwise
446 * it returns just before the target cpu calls @func. In case of allocation 487 * it returns just before the target cpu calls @func.
447 * failure, @wait will be implicitly turned on.
448 * 488 *
449 * You must not call this function with disabled interrupts or from a 489 * You must not call this function with disabled interrupts or from a
450 * hardware interrupt handler or from a bottom half handler. 490 * hardware interrupt handler or from a bottom half handler.
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f8749e5216e0..21939d9e830e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -302,9 +302,9 @@ void irq_exit(void)
302 if (!in_interrupt() && local_softirq_pending()) 302 if (!in_interrupt() && local_softirq_pending())
303 invoke_softirq(); 303 invoke_softirq();
304 304
305 rcu_irq_exit();
305#ifdef CONFIG_NO_HZ 306#ifdef CONFIG_NO_HZ
306 /* Make sure that timer wheel updates are propagated */ 307 /* Make sure that timer wheel updates are propagated */
307 rcu_irq_exit();
308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) 308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309 tick_nohz_stop_sched_tick(0); 309 tick_nohz_stop_sched_tick(0);
310#endif 310#endif
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5ddab730cb2f..41e042219ff6 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,145 +21,28 @@
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/module.h> 22#include <linux/module.h>
23 23
24#ifndef _spin_trylock
25int __lockfunc _spin_trylock(spinlock_t *lock)
26{
27 return __spin_trylock(lock);
28}
29EXPORT_SYMBOL(_spin_trylock);
30#endif
31
32#ifndef _read_trylock
33int __lockfunc _read_trylock(rwlock_t *lock)
34{
35 return __read_trylock(lock);
36}
37EXPORT_SYMBOL(_read_trylock);
38#endif
39
40#ifndef _write_trylock
41int __lockfunc _write_trylock(rwlock_t *lock)
42{
43 return __write_trylock(lock);
44}
45EXPORT_SYMBOL(_write_trylock);
46#endif
47
48/* 24/*
49 * If lockdep is enabled then we use the non-preemption spin-ops 25 * If lockdep is enabled then we use the non-preemption spin-ops
50 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 26 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
51 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 27 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
52 */ 28 */
53#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 29#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
54
55#ifndef _read_lock
56void __lockfunc _read_lock(rwlock_t *lock)
57{
58 __read_lock(lock);
59}
60EXPORT_SYMBOL(_read_lock);
61#endif
62
63#ifndef _spin_lock_irqsave
64unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
65{
66 return __spin_lock_irqsave(lock);
67}
68EXPORT_SYMBOL(_spin_lock_irqsave);
69#endif
70
71#ifndef _spin_lock_irq
72void __lockfunc _spin_lock_irq(spinlock_t *lock)
73{
74 __spin_lock_irq(lock);
75}
76EXPORT_SYMBOL(_spin_lock_irq);
77#endif
78
79#ifndef _spin_lock_bh
80void __lockfunc _spin_lock_bh(spinlock_t *lock)
81{
82 __spin_lock_bh(lock);
83}
84EXPORT_SYMBOL(_spin_lock_bh);
85#endif
86
87#ifndef _read_lock_irqsave
88unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
89{
90 return __read_lock_irqsave(lock);
91}
92EXPORT_SYMBOL(_read_lock_irqsave);
93#endif
94
95#ifndef _read_lock_irq
96void __lockfunc _read_lock_irq(rwlock_t *lock)
97{
98 __read_lock_irq(lock);
99}
100EXPORT_SYMBOL(_read_lock_irq);
101#endif
102
103#ifndef _read_lock_bh
104void __lockfunc _read_lock_bh(rwlock_t *lock)
105{
106 __read_lock_bh(lock);
107}
108EXPORT_SYMBOL(_read_lock_bh);
109#endif
110
111#ifndef _write_lock_irqsave
112unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
113{
114 return __write_lock_irqsave(lock);
115}
116EXPORT_SYMBOL(_write_lock_irqsave);
117#endif
118
119#ifndef _write_lock_irq
120void __lockfunc _write_lock_irq(rwlock_t *lock)
121{
122 __write_lock_irq(lock);
123}
124EXPORT_SYMBOL(_write_lock_irq);
125#endif
126
127#ifndef _write_lock_bh
128void __lockfunc _write_lock_bh(rwlock_t *lock)
129{
130 __write_lock_bh(lock);
131}
132EXPORT_SYMBOL(_write_lock_bh);
133#endif
134
135#ifndef _spin_lock
136void __lockfunc _spin_lock(spinlock_t *lock)
137{
138 __spin_lock(lock);
139}
140EXPORT_SYMBOL(_spin_lock);
141#endif
142
143#ifndef _write_lock
144void __lockfunc _write_lock(rwlock_t *lock)
145{
146 __write_lock(lock);
147}
148EXPORT_SYMBOL(_write_lock);
149#endif
150
151#else /* CONFIG_PREEMPT: */
152
153/* 30/*
31 * The __lock_function inlines are taken from
32 * include/linux/spinlock_api_smp.h
33 */
34#else
35/*
36 * We build the __lock_function inlines here. They are too large for
37 * inlining all over the place, but here is only one user per function
38 * which embedds them into the calling _lock_function below.
39 *
154 * This could be a long-held lock. We both prepare to spin for a long 40 * This could be a long-held lock. We both prepare to spin for a long
155 * time (making _this_ CPU preemptable if possible), and we also signal 41 * time (making _this_ CPU preemptable if possible), and we also signal
156 * towards that other CPU that it should break the lock ASAP. 42 * towards that other CPU that it should break the lock ASAP.
157 *
158 * (We do this in a function because inlining it would be excessive.)
159 */ 43 */
160
161#define BUILD_LOCK_OPS(op, locktype) \ 44#define BUILD_LOCK_OPS(op, locktype) \
162void __lockfunc _##op##_lock(locktype##_t *lock) \ 45void __lockfunc __##op##_lock(locktype##_t *lock) \
163{ \ 46{ \
164 for (;;) { \ 47 for (;;) { \
165 preempt_disable(); \ 48 preempt_disable(); \
@@ -175,9 +58,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \
175 (lock)->break_lock = 0; \ 58 (lock)->break_lock = 0; \
176} \ 59} \
177 \ 60 \
178EXPORT_SYMBOL(_##op##_lock); \ 61unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
179 \
180unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
181{ \ 62{ \
182 unsigned long flags; \ 63 unsigned long flags; \
183 \ 64 \
@@ -198,16 +79,12 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
198 return flags; \ 79 return flags; \
199} \ 80} \
200 \ 81 \
201EXPORT_SYMBOL(_##op##_lock_irqsave); \ 82void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
202 \
203void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
204{ \ 83{ \
205 _##op##_lock_irqsave(lock); \ 84 _##op##_lock_irqsave(lock); \
206} \ 85} \
207 \ 86 \
208EXPORT_SYMBOL(_##op##_lock_irq); \ 87void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
209 \
210void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
211{ \ 88{ \
212 unsigned long flags; \ 89 unsigned long flags; \
213 \ 90 \
@@ -220,23 +97,21 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
220 local_bh_disable(); \ 97 local_bh_disable(); \
221 local_irq_restore(flags); \ 98 local_irq_restore(flags); \
222} \ 99} \
223 \
224EXPORT_SYMBOL(_##op##_lock_bh)
225 100
226/* 101/*
227 * Build preemption-friendly versions of the following 102 * Build preemption-friendly versions of the following
228 * lock-spinning functions: 103 * lock-spinning functions:
229 * 104 *
230 * _[spin|read|write]_lock() 105 * __[spin|read|write]_lock()
231 * _[spin|read|write]_lock_irq() 106 * __[spin|read|write]_lock_irq()
232 * _[spin|read|write]_lock_irqsave() 107 * __[spin|read|write]_lock_irqsave()
233 * _[spin|read|write]_lock_bh() 108 * __[spin|read|write]_lock_bh()
234 */ 109 */
235BUILD_LOCK_OPS(spin, spinlock); 110BUILD_LOCK_OPS(spin, spinlock);
236BUILD_LOCK_OPS(read, rwlock); 111BUILD_LOCK_OPS(read, rwlock);
237BUILD_LOCK_OPS(write, rwlock); 112BUILD_LOCK_OPS(write, rwlock);
238 113
239#endif /* CONFIG_PREEMPT */ 114#endif
240 115
241#ifdef CONFIG_DEBUG_LOCK_ALLOC 116#ifdef CONFIG_DEBUG_LOCK_ALLOC
242 117
@@ -248,7 +123,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
248} 123}
249EXPORT_SYMBOL(_spin_lock_nested); 124EXPORT_SYMBOL(_spin_lock_nested);
250 125
251unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 126unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
127 int subclass)
252{ 128{
253 unsigned long flags; 129 unsigned long flags;
254 130
@@ -272,7 +148,127 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
272 148
273#endif 149#endif
274 150
275#ifndef _spin_unlock 151#ifndef CONFIG_INLINE_SPIN_TRYLOCK
152int __lockfunc _spin_trylock(spinlock_t *lock)
153{
154 return __spin_trylock(lock);
155}
156EXPORT_SYMBOL(_spin_trylock);
157#endif
158
159#ifndef CONFIG_INLINE_READ_TRYLOCK
160int __lockfunc _read_trylock(rwlock_t *lock)
161{
162 return __read_trylock(lock);
163}
164EXPORT_SYMBOL(_read_trylock);
165#endif
166
167#ifndef CONFIG_INLINE_WRITE_TRYLOCK
168int __lockfunc _write_trylock(rwlock_t *lock)
169{
170 return __write_trylock(lock);
171}
172EXPORT_SYMBOL(_write_trylock);
173#endif
174
175#ifndef CONFIG_INLINE_READ_LOCK
176void __lockfunc _read_lock(rwlock_t *lock)
177{
178 __read_lock(lock);
179}
180EXPORT_SYMBOL(_read_lock);
181#endif
182
183#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
184unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
185{
186 return __spin_lock_irqsave(lock);
187}
188EXPORT_SYMBOL(_spin_lock_irqsave);
189#endif
190
191#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
192void __lockfunc _spin_lock_irq(spinlock_t *lock)
193{
194 __spin_lock_irq(lock);
195}
196EXPORT_SYMBOL(_spin_lock_irq);
197#endif
198
199#ifndef CONFIG_INLINE_SPIN_LOCK_BH
200void __lockfunc _spin_lock_bh(spinlock_t *lock)
201{
202 __spin_lock_bh(lock);
203}
204EXPORT_SYMBOL(_spin_lock_bh);
205#endif
206
207#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
208unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
209{
210 return __read_lock_irqsave(lock);
211}
212EXPORT_SYMBOL(_read_lock_irqsave);
213#endif
214
215#ifndef CONFIG_INLINE_READ_LOCK_IRQ
216void __lockfunc _read_lock_irq(rwlock_t *lock)
217{
218 __read_lock_irq(lock);
219}
220EXPORT_SYMBOL(_read_lock_irq);
221#endif
222
223#ifndef CONFIG_INLINE_READ_LOCK_BH
224void __lockfunc _read_lock_bh(rwlock_t *lock)
225{
226 __read_lock_bh(lock);
227}
228EXPORT_SYMBOL(_read_lock_bh);
229#endif
230
231#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
232unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
233{
234 return __write_lock_irqsave(lock);
235}
236EXPORT_SYMBOL(_write_lock_irqsave);
237#endif
238
239#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
240void __lockfunc _write_lock_irq(rwlock_t *lock)
241{
242 __write_lock_irq(lock);
243}
244EXPORT_SYMBOL(_write_lock_irq);
245#endif
246
247#ifndef CONFIG_INLINE_WRITE_LOCK_BH
248void __lockfunc _write_lock_bh(rwlock_t *lock)
249{
250 __write_lock_bh(lock);
251}
252EXPORT_SYMBOL(_write_lock_bh);
253#endif
254
255#ifndef CONFIG_INLINE_SPIN_LOCK
256void __lockfunc _spin_lock(spinlock_t *lock)
257{
258 __spin_lock(lock);
259}
260EXPORT_SYMBOL(_spin_lock);
261#endif
262
263#ifndef CONFIG_INLINE_WRITE_LOCK
264void __lockfunc _write_lock(rwlock_t *lock)
265{
266 __write_lock(lock);
267}
268EXPORT_SYMBOL(_write_lock);
269#endif
270
271#ifndef CONFIG_INLINE_SPIN_UNLOCK
276void __lockfunc _spin_unlock(spinlock_t *lock) 272void __lockfunc _spin_unlock(spinlock_t *lock)
277{ 273{
278 __spin_unlock(lock); 274 __spin_unlock(lock);
@@ -280,7 +276,7 @@ void __lockfunc _spin_unlock(spinlock_t *lock)
280EXPORT_SYMBOL(_spin_unlock); 276EXPORT_SYMBOL(_spin_unlock);
281#endif 277#endif
282 278
283#ifndef _write_unlock 279#ifndef CONFIG_INLINE_WRITE_UNLOCK
284void __lockfunc _write_unlock(rwlock_t *lock) 280void __lockfunc _write_unlock(rwlock_t *lock)
285{ 281{
286 __write_unlock(lock); 282 __write_unlock(lock);
@@ -288,7 +284,7 @@ void __lockfunc _write_unlock(rwlock_t *lock)
288EXPORT_SYMBOL(_write_unlock); 284EXPORT_SYMBOL(_write_unlock);
289#endif 285#endif
290 286
291#ifndef _read_unlock 287#ifndef CONFIG_INLINE_READ_UNLOCK
292void __lockfunc _read_unlock(rwlock_t *lock) 288void __lockfunc _read_unlock(rwlock_t *lock)
293{ 289{
294 __read_unlock(lock); 290 __read_unlock(lock);
@@ -296,7 +292,7 @@ void __lockfunc _read_unlock(rwlock_t *lock)
296EXPORT_SYMBOL(_read_unlock); 292EXPORT_SYMBOL(_read_unlock);
297#endif 293#endif
298 294
299#ifndef _spin_unlock_irqrestore 295#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
300void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 296void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
301{ 297{
302 __spin_unlock_irqrestore(lock, flags); 298 __spin_unlock_irqrestore(lock, flags);
@@ -304,7 +300,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
304EXPORT_SYMBOL(_spin_unlock_irqrestore); 300EXPORT_SYMBOL(_spin_unlock_irqrestore);
305#endif 301#endif
306 302
307#ifndef _spin_unlock_irq 303#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
308void __lockfunc _spin_unlock_irq(spinlock_t *lock) 304void __lockfunc _spin_unlock_irq(spinlock_t *lock)
309{ 305{
310 __spin_unlock_irq(lock); 306 __spin_unlock_irq(lock);
@@ -312,7 +308,7 @@ void __lockfunc _spin_unlock_irq(spinlock_t *lock)
312EXPORT_SYMBOL(_spin_unlock_irq); 308EXPORT_SYMBOL(_spin_unlock_irq);
313#endif 309#endif
314 310
315#ifndef _spin_unlock_bh 311#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
316void __lockfunc _spin_unlock_bh(spinlock_t *lock) 312void __lockfunc _spin_unlock_bh(spinlock_t *lock)
317{ 313{
318 __spin_unlock_bh(lock); 314 __spin_unlock_bh(lock);
@@ -320,7 +316,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock)
320EXPORT_SYMBOL(_spin_unlock_bh); 316EXPORT_SYMBOL(_spin_unlock_bh);
321#endif 317#endif
322 318
323#ifndef _read_unlock_irqrestore 319#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
324void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 320void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
325{ 321{
326 __read_unlock_irqrestore(lock, flags); 322 __read_unlock_irqrestore(lock, flags);
@@ -328,7 +324,7 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
328EXPORT_SYMBOL(_read_unlock_irqrestore); 324EXPORT_SYMBOL(_read_unlock_irqrestore);
329#endif 325#endif
330 326
331#ifndef _read_unlock_irq 327#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
332void __lockfunc _read_unlock_irq(rwlock_t *lock) 328void __lockfunc _read_unlock_irq(rwlock_t *lock)
333{ 329{
334 __read_unlock_irq(lock); 330 __read_unlock_irq(lock);
@@ -336,7 +332,7 @@ void __lockfunc _read_unlock_irq(rwlock_t *lock)
336EXPORT_SYMBOL(_read_unlock_irq); 332EXPORT_SYMBOL(_read_unlock_irq);
337#endif 333#endif
338 334
339#ifndef _read_unlock_bh 335#ifndef CONFIG_INLINE_READ_UNLOCK_BH
340void __lockfunc _read_unlock_bh(rwlock_t *lock) 336void __lockfunc _read_unlock_bh(rwlock_t *lock)
341{ 337{
342 __read_unlock_bh(lock); 338 __read_unlock_bh(lock);
@@ -344,7 +340,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock)
344EXPORT_SYMBOL(_read_unlock_bh); 340EXPORT_SYMBOL(_read_unlock_bh);
345#endif 341#endif
346 342
347#ifndef _write_unlock_irqrestore 343#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
348void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 344void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
349{ 345{
350 __write_unlock_irqrestore(lock, flags); 346 __write_unlock_irqrestore(lock, flags);
@@ -352,7 +348,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
352EXPORT_SYMBOL(_write_unlock_irqrestore); 348EXPORT_SYMBOL(_write_unlock_irqrestore);
353#endif 349#endif
354 350
355#ifndef _write_unlock_irq 351#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
356void __lockfunc _write_unlock_irq(rwlock_t *lock) 352void __lockfunc _write_unlock_irq(rwlock_t *lock)
357{ 353{
358 __write_unlock_irq(lock); 354 __write_unlock_irq(lock);
@@ -360,7 +356,7 @@ void __lockfunc _write_unlock_irq(rwlock_t *lock)
360EXPORT_SYMBOL(_write_unlock_irq); 356EXPORT_SYMBOL(_write_unlock_irq);
361#endif 357#endif
362 358
363#ifndef _write_unlock_bh 359#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
364void __lockfunc _write_unlock_bh(rwlock_t *lock) 360void __lockfunc _write_unlock_bh(rwlock_t *lock)
365{ 361{
366 __write_unlock_bh(lock); 362 __write_unlock_bh(lock);
@@ -368,7 +364,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock)
368EXPORT_SYMBOL(_write_unlock_bh); 364EXPORT_SYMBOL(_write_unlock_bh);
369#endif 365#endif
370 366
371#ifndef _spin_trylock_bh 367#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
372int __lockfunc _spin_trylock_bh(spinlock_t *lock) 368int __lockfunc _spin_trylock_bh(spinlock_t *lock)
373{ 369{
374 return __spin_trylock_bh(lock); 370 return __spin_trylock_bh(lock);
diff --git a/kernel/srcu.c b/kernel/srcu.c
index b0aeeaf22ce4..818d7d9aa03c 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -49,6 +49,7 @@ int init_srcu_struct(struct srcu_struct *sp)
49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); 49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
50 return (sp->per_cpu_ref ? 0 : -ENOMEM); 50 return (sp->per_cpu_ref ? 0 : -ENOMEM);
51} 51}
52EXPORT_SYMBOL_GPL(init_srcu_struct);
52 53
53/* 54/*
54 * srcu_readers_active_idx -- returns approximate number of readers 55 * srcu_readers_active_idx -- returns approximate number of readers
@@ -97,6 +98,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
97 free_percpu(sp->per_cpu_ref); 98 free_percpu(sp->per_cpu_ref);
98 sp->per_cpu_ref = NULL; 99 sp->per_cpu_ref = NULL;
99} 100}
101EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
100 102
101/** 103/**
102 * srcu_read_lock - register a new reader for an SRCU-protected structure. 104 * srcu_read_lock - register a new reader for an SRCU-protected structure.
@@ -118,6 +120,7 @@ int srcu_read_lock(struct srcu_struct *sp)
118 preempt_enable(); 120 preempt_enable();
119 return idx; 121 return idx;
120} 122}
123EXPORT_SYMBOL_GPL(srcu_read_lock);
121 124
122/** 125/**
123 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 126 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
@@ -136,22 +139,12 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx)
136 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; 139 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
137 preempt_enable(); 140 preempt_enable();
138} 141}
142EXPORT_SYMBOL_GPL(srcu_read_unlock);
139 143
140/** 144/*
141 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 145 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
142 * @sp: srcu_struct with which to synchronize.
143 *
144 * Flip the completed counter, and wait for the old count to drain to zero.
145 * As with classic RCU, the updater must use some separate means of
146 * synchronizing concurrent updates. Can block; must be called from
147 * process context.
148 *
149 * Note that it is illegal to call synchornize_srcu() from the corresponding
150 * SRCU read-side critical section; doing so will result in deadlock.
151 * However, it is perfectly legal to call synchronize_srcu() on one
152 * srcu_struct from some other srcu_struct's read-side critical section.
153 */ 146 */
154void synchronize_srcu(struct srcu_struct *sp) 147void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
155{ 148{
156 int idx; 149 int idx;
157 150
@@ -173,7 +166,7 @@ void synchronize_srcu(struct srcu_struct *sp)
173 return; 166 return;
174 } 167 }
175 168
176 synchronize_sched(); /* Force memory barrier on all CPUs. */ 169 sync_func(); /* Force memory barrier on all CPUs. */
177 170
178 /* 171 /*
179 * The preceding synchronize_sched() ensures that any CPU that 172 * The preceding synchronize_sched() ensures that any CPU that
@@ -190,7 +183,7 @@ void synchronize_srcu(struct srcu_struct *sp)
190 idx = sp->completed & 0x1; 183 idx = sp->completed & 0x1;
191 sp->completed++; 184 sp->completed++;
192 185
193 synchronize_sched(); /* Force memory barrier on all CPUs. */ 186 sync_func(); /* Force memory barrier on all CPUs. */
194 187
195 /* 188 /*
196 * At this point, because of the preceding synchronize_sched(), 189 * At this point, because of the preceding synchronize_sched(),
@@ -203,7 +196,7 @@ void synchronize_srcu(struct srcu_struct *sp)
203 while (srcu_readers_active_idx(sp, idx)) 196 while (srcu_readers_active_idx(sp, idx))
204 schedule_timeout_interruptible(1); 197 schedule_timeout_interruptible(1);
205 198
206 synchronize_sched(); /* Force memory barrier on all CPUs. */ 199 sync_func(); /* Force memory barrier on all CPUs. */
207 200
208 /* 201 /*
209 * The preceding synchronize_sched() forces all srcu_read_unlock() 202 * The preceding synchronize_sched() forces all srcu_read_unlock()
@@ -237,6 +230,47 @@ void synchronize_srcu(struct srcu_struct *sp)
237} 230}
238 231
239/** 232/**
233 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
234 * @sp: srcu_struct with which to synchronize.
235 *
236 * Flip the completed counter, and wait for the old count to drain to zero.
237 * As with classic RCU, the updater must use some separate means of
238 * synchronizing concurrent updates. Can block; must be called from
239 * process context.
240 *
241 * Note that it is illegal to call synchronize_srcu() from the corresponding
242 * SRCU read-side critical section; doing so will result in deadlock.
243 * However, it is perfectly legal to call synchronize_srcu() on one
244 * srcu_struct from some other srcu_struct's read-side critical section.
245 */
246void synchronize_srcu(struct srcu_struct *sp)
247{
248 __synchronize_srcu(sp, synchronize_sched);
249}
250EXPORT_SYMBOL_GPL(synchronize_srcu);
251
252/**
253 * synchronize_srcu_expedited - like synchronize_srcu, but less patient
254 * @sp: srcu_struct with which to synchronize.
255 *
256 * Flip the completed counter, and wait for the old count to drain to zero.
257 * As with classic RCU, the updater must use some separate means of
258 * synchronizing concurrent updates. Can block; must be called from
259 * process context.
260 *
261 * Note that it is illegal to call synchronize_srcu_expedited()
262 * from the corresponding SRCU read-side critical section; doing so
263 * will result in deadlock. However, it is perfectly legal to call
264 * synchronize_srcu_expedited() on one srcu_struct from some other
265 * srcu_struct's read-side critical section.
266 */
267void synchronize_srcu_expedited(struct srcu_struct *sp)
268{
269 __synchronize_srcu(sp, synchronize_sched_expedited);
270}
271EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
272
273/**
240 * srcu_batches_completed - return batches completed. 274 * srcu_batches_completed - return batches completed.
241 * @sp: srcu_struct on which to report batch completion. 275 * @sp: srcu_struct on which to report batch completion.
242 * 276 *
@@ -248,10 +282,4 @@ long srcu_batches_completed(struct srcu_struct *sp)
248{ 282{
249 return sp->completed; 283 return sp->completed;
250} 284}
251
252EXPORT_SYMBOL_GPL(init_srcu_struct);
253EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
254EXPORT_SYMBOL_GPL(srcu_read_lock);
255EXPORT_SYMBOL_GPL(srcu_read_unlock);
256EXPORT_SYMBOL_GPL(synchronize_srcu);
257EXPORT_SYMBOL_GPL(srcu_batches_completed); 285EXPORT_SYMBOL_GPL(srcu_batches_completed);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0d949c517412..4dbf93a52ee9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -36,6 +36,7 @@
36#include <linux/sysrq.h> 36#include <linux/sysrq.h>
37#include <linux/highuid.h> 37#include <linux/highuid.h>
38#include <linux/writeback.h> 38#include <linux/writeback.h>
39#include <linux/ratelimit.h>
39#include <linux/hugetlb.h> 40#include <linux/hugetlb.h>
40#include <linux/initrd.h> 41#include <linux/initrd.h>
41#include <linux/key.h> 42#include <linux/key.h>
@@ -158,6 +159,8 @@ extern int no_unaligned_warning;
158extern int unaligned_dump_stack; 159extern int unaligned_dump_stack;
159#endif 160#endif
160 161
162extern struct ratelimit_state printk_ratelimit_state;
163
161#ifdef CONFIG_RT_MUTEXES 164#ifdef CONFIG_RT_MUTEXES
162extern int max_lock_depth; 165extern int max_lock_depth;
163#endif 166#endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6dc4e5ef7a01..e51a1bcb7bed 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -60,6 +60,13 @@ static int last_ftrace_enabled;
60/* Quick disabling of function tracer. */ 60/* Quick disabling of function tracer. */
61int function_trace_stop; 61int function_trace_stop;
62 62
63/* List for set_ftrace_pid's pids. */
64LIST_HEAD(ftrace_pids);
65struct ftrace_pid {
66 struct list_head list;
67 struct pid *pid;
68};
69
63/* 70/*
64 * ftrace_disabled is set when an anomaly is discovered. 71 * ftrace_disabled is set when an anomaly is discovered.
65 * ftrace_disabled is much stronger than ftrace_enabled. 72 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -78,6 +85,10 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
78ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 85ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
79ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 86ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
80 87
88#ifdef CONFIG_FUNCTION_GRAPH_TRACER
89static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
90#endif
91
81static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 92static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
82{ 93{
83 struct ftrace_ops *op = ftrace_list; 94 struct ftrace_ops *op = ftrace_list;
@@ -155,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
155 else 166 else
156 func = ftrace_list_func; 167 func = ftrace_list_func;
157 168
158 if (ftrace_pid_trace) { 169 if (!list_empty(&ftrace_pids)) {
159 set_ftrace_pid_function(func); 170 set_ftrace_pid_function(func);
160 func = ftrace_pid_func; 171 func = ftrace_pid_func;
161 } 172 }
@@ -203,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
203 if (ftrace_list->next == &ftrace_list_end) { 214 if (ftrace_list->next == &ftrace_list_end) {
204 ftrace_func_t func = ftrace_list->func; 215 ftrace_func_t func = ftrace_list->func;
205 216
206 if (ftrace_pid_trace) { 217 if (!list_empty(&ftrace_pids)) {
207 set_ftrace_pid_function(func); 218 set_ftrace_pid_function(func);
208 func = ftrace_pid_func; 219 func = ftrace_pid_func;
209 } 220 }
@@ -231,7 +242,7 @@ static void ftrace_update_pid_func(void)
231 func = __ftrace_trace_function; 242 func = __ftrace_trace_function;
232#endif 243#endif
233 244
234 if (ftrace_pid_trace) { 245 if (!list_empty(&ftrace_pids)) {
235 set_ftrace_pid_function(func); 246 set_ftrace_pid_function(func);
236 func = ftrace_pid_func; 247 func = ftrace_pid_func;
237 } else { 248 } else {
@@ -821,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
821} 832}
822#endif /* CONFIG_FUNCTION_PROFILER */ 833#endif /* CONFIG_FUNCTION_PROFILER */
823 834
824/* set when tracing only a pid */
825struct pid *ftrace_pid_trace;
826static struct pid * const ftrace_swapper_pid = &init_struct_pid; 835static struct pid * const ftrace_swapper_pid = &init_struct_pid;
827 836
828#ifdef CONFIG_DYNAMIC_FTRACE 837#ifdef CONFIG_DYNAMIC_FTRACE
@@ -1261,12 +1270,34 @@ static int ftrace_update_code(struct module *mod)
1261 ftrace_new_addrs = p->newlist; 1270 ftrace_new_addrs = p->newlist;
1262 p->flags = 0L; 1271 p->flags = 0L;
1263 1272
1264 /* convert record (i.e, patch mcount-call with NOP) */ 1273 /*
1265 if (ftrace_code_disable(mod, p)) { 1274 * Do the initial record convertion from mcount jump
1266 p->flags |= FTRACE_FL_CONVERTED; 1275 * to the NOP instructions.
1267 ftrace_update_cnt++; 1276 */
1268 } else 1277 if (!ftrace_code_disable(mod, p)) {
1269 ftrace_free_rec(p); 1278 ftrace_free_rec(p);
1279 continue;
1280 }
1281
1282 p->flags |= FTRACE_FL_CONVERTED;
1283 ftrace_update_cnt++;
1284
1285 /*
1286 * If the tracing is enabled, go ahead and enable the record.
1287 *
1288 * The reason not to enable the record immediatelly is the
1289 * inherent check of ftrace_make_nop/ftrace_make_call for
1290 * correct previous instructions. Making first the NOP
1291 * conversion puts the module to the correct state, thus
1292 * passing the ftrace_make_call check.
1293 */
1294 if (ftrace_start_up) {
1295 int failed = __ftrace_replace_code(p, 1);
1296 if (failed) {
1297 ftrace_bug(failed, p->ip);
1298 ftrace_free_rec(p);
1299 }
1300 }
1270 } 1301 }
1271 1302
1272 stop = ftrace_now(raw_smp_processor_id()); 1303 stop = ftrace_now(raw_smp_processor_id());
@@ -1656,60 +1687,6 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1656 return ret; 1687 return ret;
1657} 1688}
1658 1689
1659enum {
1660 MATCH_FULL,
1661 MATCH_FRONT_ONLY,
1662 MATCH_MIDDLE_ONLY,
1663 MATCH_END_ONLY,
1664};
1665
1666/*
1667 * (static function - no need for kernel doc)
1668 *
1669 * Pass in a buffer containing a glob and this function will
1670 * set search to point to the search part of the buffer and
1671 * return the type of search it is (see enum above).
1672 * This does modify buff.
1673 *
1674 * Returns enum type.
1675 * search returns the pointer to use for comparison.
1676 * not returns 1 if buff started with a '!'
1677 * 0 otherwise.
1678 */
1679static int
1680ftrace_setup_glob(char *buff, int len, char **search, int *not)
1681{
1682 int type = MATCH_FULL;
1683 int i;
1684
1685 if (buff[0] == '!') {
1686 *not = 1;
1687 buff++;
1688 len--;
1689 } else
1690 *not = 0;
1691
1692 *search = buff;
1693
1694 for (i = 0; i < len; i++) {
1695 if (buff[i] == '*') {
1696 if (!i) {
1697 *search = buff + 1;
1698 type = MATCH_END_ONLY;
1699 } else {
1700 if (type == MATCH_END_ONLY)
1701 type = MATCH_MIDDLE_ONLY;
1702 else
1703 type = MATCH_FRONT_ONLY;
1704 buff[i] = 0;
1705 break;
1706 }
1707 }
1708 }
1709
1710 return type;
1711}
1712
1713static int ftrace_match(char *str, char *regex, int len, int type) 1690static int ftrace_match(char *str, char *regex, int len, int type)
1714{ 1691{
1715 int matched = 0; 1692 int matched = 0;
@@ -1758,7 +1735,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
1758 int not; 1735 int not;
1759 1736
1760 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1737 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1761 type = ftrace_setup_glob(buff, len, &search, &not); 1738 type = filter_parse_regex(buff, len, &search, &not);
1762 1739
1763 search_len = strlen(search); 1740 search_len = strlen(search);
1764 1741
@@ -1826,7 +1803,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
1826 } 1803 }
1827 1804
1828 if (strlen(buff)) { 1805 if (strlen(buff)) {
1829 type = ftrace_setup_glob(buff, strlen(buff), &search, &not); 1806 type = filter_parse_regex(buff, strlen(buff), &search, &not);
1830 search_len = strlen(search); 1807 search_len = strlen(search);
1831 } 1808 }
1832 1809
@@ -1991,7 +1968,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1991 int count = 0; 1968 int count = 0;
1992 char *search; 1969 char *search;
1993 1970
1994 type = ftrace_setup_glob(glob, strlen(glob), &search, &not); 1971 type = filter_parse_regex(glob, strlen(glob), &search, &not);
1995 len = strlen(search); 1972 len = strlen(search);
1996 1973
1997 /* we do not support '!' for function probes */ 1974 /* we do not support '!' for function probes */
@@ -2068,7 +2045,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2068 else if (glob) { 2045 else if (glob) {
2069 int not; 2046 int not;
2070 2047
2071 type = ftrace_setup_glob(glob, strlen(glob), &search, &not); 2048 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2072 len = strlen(search); 2049 len = strlen(search);
2073 2050
2074 /* we do not support '!' for function probes */ 2051 /* we do not support '!' for function probes */
@@ -2312,6 +2289,32 @@ static int __init set_ftrace_filter(char *str)
2312} 2289}
2313__setup("ftrace_filter=", set_ftrace_filter); 2290__setup("ftrace_filter=", set_ftrace_filter);
2314 2291
2292#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2293static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2294static int __init set_graph_function(char *str)
2295{
2296 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2297 return 1;
2298}
2299__setup("ftrace_graph_filter=", set_graph_function);
2300
2301static void __init set_ftrace_early_graph(char *buf)
2302{
2303 int ret;
2304 char *func;
2305
2306 while (buf) {
2307 func = strsep(&buf, ",");
2308 /* we allow only one expression at a time */
2309 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2310 func);
2311 if (ret)
2312 printk(KERN_DEBUG "ftrace: function %s not "
2313 "traceable\n", func);
2314 }
2315}
2316#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2317
2315static void __init set_ftrace_early_filter(char *buf, int enable) 2318static void __init set_ftrace_early_filter(char *buf, int enable)
2316{ 2319{
2317 char *func; 2320 char *func;
@@ -2328,6 +2331,10 @@ static void __init set_ftrace_early_filters(void)
2328 set_ftrace_early_filter(ftrace_filter_buf, 1); 2331 set_ftrace_early_filter(ftrace_filter_buf, 1);
2329 if (ftrace_notrace_buf[0]) 2332 if (ftrace_notrace_buf[0])
2330 set_ftrace_early_filter(ftrace_notrace_buf, 0); 2333 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2334#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2335 if (ftrace_graph_buf[0])
2336 set_ftrace_early_graph(ftrace_graph_buf);
2337#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2331} 2338}
2332 2339
2333static int 2340static int
@@ -2513,7 +2520,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2513 return -ENODEV; 2520 return -ENODEV;
2514 2521
2515 /* decode regex */ 2522 /* decode regex */
2516 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not); 2523 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2517 if (not) 2524 if (not)
2518 return -EINVAL; 2525 return -EINVAL;
2519 2526
@@ -2624,7 +2631,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2624 return 0; 2631 return 0;
2625} 2632}
2626 2633
2627static int ftrace_convert_nops(struct module *mod, 2634static int ftrace_process_locs(struct module *mod,
2628 unsigned long *start, 2635 unsigned long *start,
2629 unsigned long *end) 2636 unsigned long *end)
2630{ 2637{
@@ -2684,7 +2691,7 @@ static void ftrace_init_module(struct module *mod,
2684{ 2691{
2685 if (ftrace_disabled || start == end) 2692 if (ftrace_disabled || start == end)
2686 return; 2693 return;
2687 ftrace_convert_nops(mod, start, end); 2694 ftrace_process_locs(mod, start, end);
2688} 2695}
2689 2696
2690static int ftrace_module_notify(struct notifier_block *self, 2697static int ftrace_module_notify(struct notifier_block *self,
@@ -2745,7 +2752,7 @@ void __init ftrace_init(void)
2745 2752
2746 last_ftrace_enabled = ftrace_enabled = 1; 2753 last_ftrace_enabled = ftrace_enabled = 1;
2747 2754
2748 ret = ftrace_convert_nops(NULL, 2755 ret = ftrace_process_locs(NULL,
2749 __start_mcount_loc, 2756 __start_mcount_loc,
2750 __stop_mcount_loc); 2757 __stop_mcount_loc);
2751 2758
@@ -2778,23 +2785,6 @@ static inline void ftrace_startup_enable(int command) { }
2778# define ftrace_shutdown_sysctl() do { } while (0) 2785# define ftrace_shutdown_sysctl() do { } while (0)
2779#endif /* CONFIG_DYNAMIC_FTRACE */ 2786#endif /* CONFIG_DYNAMIC_FTRACE */
2780 2787
2781static ssize_t
2782ftrace_pid_read(struct file *file, char __user *ubuf,
2783 size_t cnt, loff_t *ppos)
2784{
2785 char buf[64];
2786 int r;
2787
2788 if (ftrace_pid_trace == ftrace_swapper_pid)
2789 r = sprintf(buf, "swapper tasks\n");
2790 else if (ftrace_pid_trace)
2791 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2792 else
2793 r = sprintf(buf, "no pid\n");
2794
2795 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2796}
2797
2798static void clear_ftrace_swapper(void) 2788static void clear_ftrace_swapper(void)
2799{ 2789{
2800 struct task_struct *p; 2790 struct task_struct *p;
@@ -2845,14 +2835,12 @@ static void set_ftrace_pid(struct pid *pid)
2845 rcu_read_unlock(); 2835 rcu_read_unlock();
2846} 2836}
2847 2837
2848static void clear_ftrace_pid_task(struct pid **pid) 2838static void clear_ftrace_pid_task(struct pid *pid)
2849{ 2839{
2850 if (*pid == ftrace_swapper_pid) 2840 if (pid == ftrace_swapper_pid)
2851 clear_ftrace_swapper(); 2841 clear_ftrace_swapper();
2852 else 2842 else
2853 clear_ftrace_pid(*pid); 2843 clear_ftrace_pid(pid);
2854
2855 *pid = NULL;
2856} 2844}
2857 2845
2858static void set_ftrace_pid_task(struct pid *pid) 2846static void set_ftrace_pid_task(struct pid *pid)
@@ -2863,74 +2851,184 @@ static void set_ftrace_pid_task(struct pid *pid)
2863 set_ftrace_pid(pid); 2851 set_ftrace_pid(pid);
2864} 2852}
2865 2853
2866static ssize_t 2854static int ftrace_pid_add(int p)
2867ftrace_pid_write(struct file *filp, const char __user *ubuf,
2868 size_t cnt, loff_t *ppos)
2869{ 2855{
2870 struct pid *pid; 2856 struct pid *pid;
2871 char buf[64]; 2857 struct ftrace_pid *fpid;
2872 long val; 2858 int ret = -EINVAL;
2873 int ret;
2874 2859
2875 if (cnt >= sizeof(buf)) 2860 mutex_lock(&ftrace_lock);
2876 return -EINVAL;
2877 2861
2878 if (copy_from_user(&buf, ubuf, cnt)) 2862 if (!p)
2879 return -EFAULT; 2863 pid = ftrace_swapper_pid;
2864 else
2865 pid = find_get_pid(p);
2880 2866
2881 buf[cnt] = 0; 2867 if (!pid)
2868 goto out;
2882 2869
2883 ret = strict_strtol(buf, 10, &val); 2870 ret = 0;
2884 if (ret < 0)
2885 return ret;
2886 2871
2887 mutex_lock(&ftrace_lock); 2872 list_for_each_entry(fpid, &ftrace_pids, list)
2888 if (val < 0) { 2873 if (fpid->pid == pid)
2889 /* disable pid tracing */ 2874 goto out_put;
2890 if (!ftrace_pid_trace)
2891 goto out;
2892 2875
2893 clear_ftrace_pid_task(&ftrace_pid_trace); 2876 ret = -ENOMEM;
2894 2877
2895 } else { 2878 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2896 /* swapper task is special */ 2879 if (!fpid)
2897 if (!val) { 2880 goto out_put;
2898 pid = ftrace_swapper_pid;
2899 if (pid == ftrace_pid_trace)
2900 goto out;
2901 } else {
2902 pid = find_get_pid(val);
2903 2881
2904 if (pid == ftrace_pid_trace) { 2882 list_add(&fpid->list, &ftrace_pids);
2905 put_pid(pid); 2883 fpid->pid = pid;
2906 goto out;
2907 }
2908 }
2909 2884
2910 if (ftrace_pid_trace) 2885 set_ftrace_pid_task(pid);
2911 clear_ftrace_pid_task(&ftrace_pid_trace);
2912 2886
2913 if (!pid) 2887 ftrace_update_pid_func();
2914 goto out; 2888 ftrace_startup_enable(0);
2889
2890 mutex_unlock(&ftrace_lock);
2891 return 0;
2892
2893out_put:
2894 if (pid != ftrace_swapper_pid)
2895 put_pid(pid);
2915 2896
2916 ftrace_pid_trace = pid; 2897out:
2898 mutex_unlock(&ftrace_lock);
2899 return ret;
2900}
2901
2902static void ftrace_pid_reset(void)
2903{
2904 struct ftrace_pid *fpid, *safe;
2917 2905
2918 set_ftrace_pid_task(ftrace_pid_trace); 2906 mutex_lock(&ftrace_lock);
2907 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
2908 struct pid *pid = fpid->pid;
2909
2910 clear_ftrace_pid_task(pid);
2911
2912 list_del(&fpid->list);
2913 kfree(fpid);
2919 } 2914 }
2920 2915
2921 /* update the function call */
2922 ftrace_update_pid_func(); 2916 ftrace_update_pid_func();
2923 ftrace_startup_enable(0); 2917 ftrace_startup_enable(0);
2924 2918
2925 out:
2926 mutex_unlock(&ftrace_lock); 2919 mutex_unlock(&ftrace_lock);
2920}
2927 2921
2928 return cnt; 2922static void *fpid_start(struct seq_file *m, loff_t *pos)
2923{
2924 mutex_lock(&ftrace_lock);
2925
2926 if (list_empty(&ftrace_pids) && (!*pos))
2927 return (void *) 1;
2928
2929 return seq_list_start(&ftrace_pids, *pos);
2930}
2931
2932static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
2933{
2934 if (v == (void *)1)
2935 return NULL;
2936
2937 return seq_list_next(v, &ftrace_pids, pos);
2938}
2939
2940static void fpid_stop(struct seq_file *m, void *p)
2941{
2942 mutex_unlock(&ftrace_lock);
2943}
2944
2945static int fpid_show(struct seq_file *m, void *v)
2946{
2947 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
2948
2949 if (v == (void *)1) {
2950 seq_printf(m, "no pid\n");
2951 return 0;
2952 }
2953
2954 if (fpid->pid == ftrace_swapper_pid)
2955 seq_printf(m, "swapper tasks\n");
2956 else
2957 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
2958
2959 return 0;
2960}
2961
2962static const struct seq_operations ftrace_pid_sops = {
2963 .start = fpid_start,
2964 .next = fpid_next,
2965 .stop = fpid_stop,
2966 .show = fpid_show,
2967};
2968
2969static int
2970ftrace_pid_open(struct inode *inode, struct file *file)
2971{
2972 int ret = 0;
2973
2974 if ((file->f_mode & FMODE_WRITE) &&
2975 (file->f_flags & O_TRUNC))
2976 ftrace_pid_reset();
2977
2978 if (file->f_mode & FMODE_READ)
2979 ret = seq_open(file, &ftrace_pid_sops);
2980
2981 return ret;
2982}
2983
2984static ssize_t
2985ftrace_pid_write(struct file *filp, const char __user *ubuf,
2986 size_t cnt, loff_t *ppos)
2987{
2988 char buf[64], *tmp;
2989 long val;
2990 int ret;
2991
2992 if (cnt >= sizeof(buf))
2993 return -EINVAL;
2994
2995 if (copy_from_user(&buf, ubuf, cnt))
2996 return -EFAULT;
2997
2998 buf[cnt] = 0;
2999
3000 /*
3001 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3002 * to clean the filter quietly.
3003 */
3004 tmp = strstrip(buf);
3005 if (strlen(tmp) == 0)
3006 return 1;
3007
3008 ret = strict_strtol(tmp, 10, &val);
3009 if (ret < 0)
3010 return ret;
3011
3012 ret = ftrace_pid_add(val);
3013
3014 return ret ? ret : cnt;
3015}
3016
3017static int
3018ftrace_pid_release(struct inode *inode, struct file *file)
3019{
3020 if (file->f_mode & FMODE_READ)
3021 seq_release(inode, file);
3022
3023 return 0;
2929} 3024}
2930 3025
2931static const struct file_operations ftrace_pid_fops = { 3026static const struct file_operations ftrace_pid_fops = {
2932 .read = ftrace_pid_read, 3027 .open = ftrace_pid_open,
2933 .write = ftrace_pid_write, 3028 .write = ftrace_pid_write,
3029 .read = seq_read,
3030 .llseek = seq_lseek,
3031 .release = ftrace_pid_release,
2934}; 3032};
2935 3033
2936static __init int ftrace_init_debugfs(void) 3034static __init int ftrace_init_debugfs(void)
@@ -3293,4 +3391,3 @@ void ftrace_graph_stop(void)
3293 ftrace_stop(); 3391 ftrace_stop();
3294} 3392}
3295#endif 3393#endif
3296
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5dd017fea6f5..a72c6e03deec 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1787,9 +1787,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1787static struct ring_buffer_event * 1787static struct ring_buffer_event *
1788rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 1788rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1789 unsigned long length, unsigned long tail, 1789 unsigned long length, unsigned long tail,
1790 struct buffer_page *commit_page,
1791 struct buffer_page *tail_page, u64 *ts) 1790 struct buffer_page *tail_page, u64 *ts)
1792{ 1791{
1792 struct buffer_page *commit_page = cpu_buffer->commit_page;
1793 struct ring_buffer *buffer = cpu_buffer->buffer; 1793 struct ring_buffer *buffer = cpu_buffer->buffer;
1794 struct buffer_page *next_page; 1794 struct buffer_page *next_page;
1795 int ret; 1795 int ret;
@@ -1892,13 +1892,10 @@ static struct ring_buffer_event *
1892__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 1892__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1893 unsigned type, unsigned long length, u64 *ts) 1893 unsigned type, unsigned long length, u64 *ts)
1894{ 1894{
1895 struct buffer_page *tail_page, *commit_page; 1895 struct buffer_page *tail_page;
1896 struct ring_buffer_event *event; 1896 struct ring_buffer_event *event;
1897 unsigned long tail, write; 1897 unsigned long tail, write;
1898 1898
1899 commit_page = cpu_buffer->commit_page;
1900 /* we just need to protect against interrupts */
1901 barrier();
1902 tail_page = cpu_buffer->tail_page; 1899 tail_page = cpu_buffer->tail_page;
1903 write = local_add_return(length, &tail_page->write); 1900 write = local_add_return(length, &tail_page->write);
1904 1901
@@ -1909,7 +1906,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1909 /* See if we shot pass the end of this buffer page */ 1906 /* See if we shot pass the end of this buffer page */
1910 if (write > BUF_PAGE_SIZE) 1907 if (write > BUF_PAGE_SIZE)
1911 return rb_move_tail(cpu_buffer, length, tail, 1908 return rb_move_tail(cpu_buffer, length, tail,
1912 commit_page, tail_page, ts); 1909 tail_page, ts);
1913 1910
1914 /* We reserved something on the buffer */ 1911 /* We reserved something on the buffer */
1915 1912
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 573d3cc762c3..b2477caf09c2 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -35,6 +35,28 @@ static int disable_reader;
35module_param(disable_reader, uint, 0644); 35module_param(disable_reader, uint, 0644);
36MODULE_PARM_DESC(disable_reader, "only run producer"); 36MODULE_PARM_DESC(disable_reader, "only run producer");
37 37
38static int write_iteration = 50;
39module_param(write_iteration, uint, 0644);
40MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
41
42static int producer_nice = 19;
43static int consumer_nice = 19;
44
45static int producer_fifo = -1;
46static int consumer_fifo = -1;
47
48module_param(producer_nice, uint, 0644);
49MODULE_PARM_DESC(producer_nice, "nice prio for producer");
50
51module_param(consumer_nice, uint, 0644);
52MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
53
54module_param(producer_fifo, uint, 0644);
55MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
56
57module_param(consumer_fifo, uint, 0644);
58MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
59
38static int read_events; 60static int read_events;
39 61
40static int kill_test; 62static int kill_test;
@@ -208,15 +230,18 @@ static void ring_buffer_producer(void)
208 do { 230 do {
209 struct ring_buffer_event *event; 231 struct ring_buffer_event *event;
210 int *entry; 232 int *entry;
211 233 int i;
212 event = ring_buffer_lock_reserve(buffer, 10); 234
213 if (!event) { 235 for (i = 0; i < write_iteration; i++) {
214 missed++; 236 event = ring_buffer_lock_reserve(buffer, 10);
215 } else { 237 if (!event) {
216 hit++; 238 missed++;
217 entry = ring_buffer_event_data(event); 239 } else {
218 *entry = smp_processor_id(); 240 hit++;
219 ring_buffer_unlock_commit(buffer, event); 241 entry = ring_buffer_event_data(event);
242 *entry = smp_processor_id();
243 ring_buffer_unlock_commit(buffer, event);
244 }
220 } 245 }
221 do_gettimeofday(&end_tv); 246 do_gettimeofday(&end_tv);
222 247
@@ -263,6 +288,27 @@ static void ring_buffer_producer(void)
263 288
264 if (kill_test) 289 if (kill_test)
265 trace_printk("ERROR!\n"); 290 trace_printk("ERROR!\n");
291
292 if (!disable_reader) {
293 if (consumer_fifo < 0)
294 trace_printk("Running Consumer at nice: %d\n",
295 consumer_nice);
296 else
297 trace_printk("Running Consumer at SCHED_FIFO %d\n",
298 consumer_fifo);
299 }
300 if (producer_fifo < 0)
301 trace_printk("Running Producer at nice: %d\n",
302 producer_nice);
303 else
304 trace_printk("Running Producer at SCHED_FIFO %d\n",
305 producer_fifo);
306
307 /* Let the user know that the test is running at low priority */
308 if (producer_fifo < 0 && consumer_fifo < 0 &&
309 producer_nice == 19 && consumer_nice == 19)
310 trace_printk("WARNING!!! This test is running at lowest priority.\n");
311
266 trace_printk("Time: %lld (usecs)\n", time); 312 trace_printk("Time: %lld (usecs)\n", time);
267 trace_printk("Overruns: %lld\n", overruns); 313 trace_printk("Overruns: %lld\n", overruns);
268 if (disable_reader) 314 if (disable_reader)
@@ -392,6 +438,27 @@ static int __init ring_buffer_benchmark_init(void)
392 if (IS_ERR(producer)) 438 if (IS_ERR(producer))
393 goto out_kill; 439 goto out_kill;
394 440
441 /*
442 * Run them as low-prio background tasks by default:
443 */
444 if (!disable_reader) {
445 if (consumer_fifo >= 0) {
446 struct sched_param param = {
447 .sched_priority = consumer_fifo
448 };
449 sched_setscheduler(consumer, SCHED_FIFO, &param);
450 } else
451 set_user_nice(consumer, consumer_nice);
452 }
453
454 if (producer_fifo >= 0) {
455 struct sched_param param = {
456 .sched_priority = consumer_fifo
457 };
458 sched_setscheduler(producer, SCHED_FIFO, &param);
459 } else
460 set_user_nice(producer, producer_nice);
461
395 return 0; 462 return 0;
396 463
397 out_kill: 464 out_kill:
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b20d3ec75de9..874f2893cff0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf);
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer; 130static char *default_bootup_tracer;
131 131
132static int __init set_ftrace(char *str) 132static int __init set_cmdline_ftrace(char *str)
133{ 133{
134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf; 135 default_bootup_tracer = bootup_tracer_buf;
@@ -137,7 +137,7 @@ static int __init set_ftrace(char *str)
137 ring_buffer_expanded = 1; 137 ring_buffer_expanded = 1;
138 return 1; 138 return 1;
139} 139}
140__setup("ftrace=", set_ftrace); 140__setup("ftrace=", set_cmdline_ftrace);
141 141
142static int __init set_ftrace_dump_on_oops(char *str) 142static int __init set_ftrace_dump_on_oops(char *str)
143{ 143{
@@ -1361,10 +1361,11 @@ int trace_array_vprintk(struct trace_array *tr,
1361 pause_graph_tracing(); 1361 pause_graph_tracing();
1362 raw_local_irq_save(irq_flags); 1362 raw_local_irq_save(irq_flags);
1363 __raw_spin_lock(&trace_buf_lock); 1363 __raw_spin_lock(&trace_buf_lock);
1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1364 if (args == NULL) {
1365 1365 strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
1366 len = min(len, TRACE_BUF_SIZE-1); 1366 len = strlen(trace_buf);
1367 trace_buf[len] = 0; 1367 } else
1368 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1368 1369
1369 size = sizeof(*entry) + len + 1; 1370 size = sizeof(*entry) + len + 1;
1370 buffer = tr->buffer; 1371 buffer = tr->buffer;
@@ -1373,10 +1374,10 @@ int trace_array_vprintk(struct trace_array *tr,
1373 if (!event) 1374 if (!event)
1374 goto out_unlock; 1375 goto out_unlock;
1375 entry = ring_buffer_event_data(event); 1376 entry = ring_buffer_event_data(event);
1376 entry->ip = ip; 1377 entry->ip = ip;
1377 1378
1378 memcpy(&entry->buf, trace_buf, len); 1379 memcpy(&entry->buf, trace_buf, len);
1379 entry->buf[len] = 0; 1380 entry->buf[len] = '\0';
1380 if (!filter_check_discard(call, entry, buffer, event)) 1381 if (!filter_check_discard(call, entry, buffer, event))
1381 ring_buffer_unlock_commit(buffer, event); 1382 ring_buffer_unlock_commit(buffer, event);
1382 1383
@@ -3319,22 +3320,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3319 return cnt; 3320 return cnt;
3320} 3321}
3321 3322
3322static int mark_printk(const char *fmt, ...)
3323{
3324 int ret;
3325 va_list args;
3326 va_start(args, fmt);
3327 ret = trace_vprintk(0, fmt, args);
3328 va_end(args);
3329 return ret;
3330}
3331
3332static ssize_t 3323static ssize_t
3333tracing_mark_write(struct file *filp, const char __user *ubuf, 3324tracing_mark_write(struct file *filp, const char __user *ubuf,
3334 size_t cnt, loff_t *fpos) 3325 size_t cnt, loff_t *fpos)
3335{ 3326{
3336 char *buf; 3327 char *buf;
3337 char *end;
3338 3328
3339 if (tracing_disabled) 3329 if (tracing_disabled)
3340 return -EINVAL; 3330 return -EINVAL;
@@ -3342,7 +3332,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3342 if (cnt > TRACE_BUF_SIZE) 3332 if (cnt > TRACE_BUF_SIZE)
3343 cnt = TRACE_BUF_SIZE; 3333 cnt = TRACE_BUF_SIZE;
3344 3334
3345 buf = kmalloc(cnt + 1, GFP_KERNEL); 3335 buf = kmalloc(cnt + 2, GFP_KERNEL);
3346 if (buf == NULL) 3336 if (buf == NULL)
3347 return -ENOMEM; 3337 return -ENOMEM;
3348 3338
@@ -3350,14 +3340,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3350 kfree(buf); 3340 kfree(buf);
3351 return -EFAULT; 3341 return -EFAULT;
3352 } 3342 }
3343 if (buf[cnt-1] != '\n') {
3344 buf[cnt] = '\n';
3345 buf[cnt+1] = '\0';
3346 } else
3347 buf[cnt] = '\0';
3353 3348
3354 /* Cut from the first nil or newline. */ 3349 cnt = trace_vprintk(0, buf, NULL);
3355 buf[cnt] = '\0';
3356 end = strchr(buf, '\n');
3357 if (end)
3358 *end = '\0';
3359
3360 cnt = mark_printk("%s\n", buf);
3361 kfree(buf); 3350 kfree(buf);
3362 *fpos += cnt; 3351 *fpos += cnt;
3363 3352
@@ -3730,7 +3719,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
3730 3719
3731 s = kmalloc(sizeof(*s), GFP_KERNEL); 3720 s = kmalloc(sizeof(*s), GFP_KERNEL);
3732 if (!s) 3721 if (!s)
3733 return ENOMEM; 3722 return -ENOMEM;
3734 3723
3735 trace_seq_init(s); 3724 trace_seq_init(s);
3736 3725
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 405cb850b75d..acef8b4636f0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -483,10 +483,6 @@ static inline int ftrace_graph_addr(unsigned long addr)
483 return 0; 483 return 0;
484} 484}
485#else 485#else
486static inline int ftrace_trace_addr(unsigned long addr)
487{
488 return 1;
489}
490static inline int ftrace_graph_addr(unsigned long addr) 486static inline int ftrace_graph_addr(unsigned long addr)
491{ 487{
492 return 1; 488 return 1;
@@ -500,12 +496,12 @@ print_graph_function(struct trace_iterator *iter)
500} 496}
501#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 497#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
502 498
503extern struct pid *ftrace_pid_trace; 499extern struct list_head ftrace_pids;
504 500
505#ifdef CONFIG_FUNCTION_TRACER 501#ifdef CONFIG_FUNCTION_TRACER
506static inline int ftrace_trace_task(struct task_struct *task) 502static inline int ftrace_trace_task(struct task_struct *task)
507{ 503{
508 if (!ftrace_pid_trace) 504 if (list_empty(&ftrace_pids))
509 return 1; 505 return 1;
510 506
511 return test_tsk_trace_trace(task); 507 return test_tsk_trace_trace(task);
@@ -699,22 +695,40 @@ struct event_subsystem {
699}; 695};
700 696
701struct filter_pred; 697struct filter_pred;
698struct regex;
702 699
703typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, 700typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
704 int val1, int val2); 701 int val1, int val2);
705 702
703typedef int (*regex_match_func)(char *str, struct regex *r, int len);
704
705enum regex_type {
706 MATCH_FULL,
707 MATCH_FRONT_ONLY,
708 MATCH_MIDDLE_ONLY,
709 MATCH_END_ONLY,
710};
711
712struct regex {
713 char pattern[MAX_FILTER_STR_VAL];
714 int len;
715 int field_len;
716 regex_match_func match;
717};
718
706struct filter_pred { 719struct filter_pred {
707 filter_pred_fn_t fn; 720 filter_pred_fn_t fn;
708 u64 val; 721 u64 val;
709 char str_val[MAX_FILTER_STR_VAL]; 722 struct regex regex;
710 int str_len; 723 char *field_name;
711 char *field_name; 724 int offset;
712 int offset; 725 int not;
713 int not; 726 int op;
714 int op; 727 int pop_n;
715 int pop_n;
716}; 728};
717 729
730extern enum regex_type
731filter_parse_regex(char *buff, int len, char **search, int *not);
718extern void print_event_filter(struct ftrace_event_call *call, 732extern void print_event_filter(struct ftrace_event_call *call,
719 struct trace_seq *s); 733 struct trace_seq *s);
720extern int apply_event_filter(struct ftrace_event_call *call, 734extern int apply_event_filter(struct ftrace_event_call *call,
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 20c5f92e28a8..878c03f386ba 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -20,6 +20,8 @@
20#include <linux/ktime.h> 20#include <linux/ktime.h>
21#include <linux/trace_clock.h> 21#include <linux/trace_clock.h>
22 22
23#include "trace.h"
24
23/* 25/*
24 * trace_clock_local(): the simplest and least coherent tracing clock. 26 * trace_clock_local(): the simplest and least coherent tracing clock.
25 * 27 *
@@ -28,17 +30,17 @@
28 */ 30 */
29u64 notrace trace_clock_local(void) 31u64 notrace trace_clock_local(void)
30{ 32{
31 unsigned long flags;
32 u64 clock; 33 u64 clock;
34 int resched;
33 35
34 /* 36 /*
35 * sched_clock() is an architecture implemented, fast, scalable, 37 * sched_clock() is an architecture implemented, fast, scalable,
36 * lockless clock. It is not guaranteed to be coherent across 38 * lockless clock. It is not guaranteed to be coherent across
37 * CPUs, nor across CPU idle events. 39 * CPUs, nor across CPU idle events.
38 */ 40 */
39 raw_local_irq_save(flags); 41 resched = ftrace_preempt_disable();
40 clock = sched_clock(); 42 clock = sched_clock();
41 raw_local_irq_restore(flags); 43 ftrace_preempt_enable(resched);
42 44
43 return clock; 45 return clock;
44} 46}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index d128f65778e6..5e9ffc33f6db 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -878,9 +878,9 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
878 "'%s/filter' entry\n", name); 878 "'%s/filter' entry\n", name);
879 } 879 }
880 880
881 entry = trace_create_file("enable", 0644, system->entry, 881 trace_create_file("enable", 0644, system->entry,
882 (void *)system->name, 882 (void *)system->name,
883 &ftrace_system_enable_fops); 883 &ftrace_system_enable_fops);
884 884
885 return system->entry; 885 return system->entry;
886} 886}
@@ -892,7 +892,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
892 const struct file_operations *filter, 892 const struct file_operations *filter,
893 const struct file_operations *format) 893 const struct file_operations *format)
894{ 894{
895 struct dentry *entry;
896 int ret; 895 int ret;
897 896
898 /* 897 /*
@@ -910,12 +909,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
910 } 909 }
911 910
912 if (call->regfunc) 911 if (call->regfunc)
913 entry = trace_create_file("enable", 0644, call->dir, call, 912 trace_create_file("enable", 0644, call->dir, call,
914 enable); 913 enable);
915 914
916 if (call->id && call->profile_enable) 915 if (call->id && call->profile_enable)
917 entry = trace_create_file("id", 0444, call->dir, call, 916 trace_create_file("id", 0444, call->dir, call,
918 id); 917 id);
919 918
920 if (call->define_fields) { 919 if (call->define_fields) {
921 ret = call->define_fields(call); 920 ret = call->define_fields(call);
@@ -924,16 +923,16 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
924 " events/%s\n", call->name); 923 " events/%s\n", call->name);
925 return ret; 924 return ret;
926 } 925 }
927 entry = trace_create_file("filter", 0644, call->dir, call, 926 trace_create_file("filter", 0644, call->dir, call,
928 filter); 927 filter);
929 } 928 }
930 929
931 /* A trace may not want to export its format */ 930 /* A trace may not want to export its format */
932 if (!call->show_format) 931 if (!call->show_format)
933 return 0; 932 return 0;
934 933
935 entry = trace_create_file("format", 0444, call->dir, call, 934 trace_create_file("format", 0444, call->dir, call,
936 format); 935 format);
937 936
938 return 0; 937 return 0;
939} 938}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 98a6cc5c64ed..92672016da28 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -18,8 +18,6 @@
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> 18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */ 19 */
20 20
21#include <linux/debugfs.h>
22#include <linux/uaccess.h>
23#include <linux/module.h> 21#include <linux/module.h>
24#include <linux/ctype.h> 22#include <linux/ctype.h>
25#include <linux/mutex.h> 23#include <linux/mutex.h>
@@ -197,9 +195,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
197 char *addr = (char *)(event + pred->offset); 195 char *addr = (char *)(event + pred->offset);
198 int cmp, match; 196 int cmp, match;
199 197
200 cmp = strncmp(addr, pred->str_val, pred->str_len); 198 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
201 199
202 match = (!cmp) ^ pred->not; 200 match = cmp ^ pred->not;
203 201
204 return match; 202 return match;
205} 203}
@@ -211,9 +209,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event,
211 char **addr = (char **)(event + pred->offset); 209 char **addr = (char **)(event + pred->offset);
212 int cmp, match; 210 int cmp, match;
213 211
214 cmp = strncmp(*addr, pred->str_val, pred->str_len); 212 cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len);
215 213
216 match = (!cmp) ^ pred->not; 214 match = cmp ^ pred->not;
217 215
218 return match; 216 return match;
219} 217}
@@ -237,9 +235,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event,
237 char *addr = (char *)(event + str_loc); 235 char *addr = (char *)(event + str_loc);
238 int cmp, match; 236 int cmp, match;
239 237
240 cmp = strncmp(addr, pred->str_val, str_len); 238 cmp = pred->regex.match(addr, &pred->regex, str_len);
241 239
242 match = (!cmp) ^ pred->not; 240 match = cmp ^ pred->not;
243 241
244 return match; 242 return match;
245} 243}
@@ -250,6 +248,124 @@ static int filter_pred_none(struct filter_pred *pred, void *event,
250 return 0; 248 return 0;
251} 249}
252 250
251/* Basic regex callbacks */
252static int regex_match_full(char *str, struct regex *r, int len)
253{
254 if (strncmp(str, r->pattern, len) == 0)
255 return 1;
256 return 0;
257}
258
259static int regex_match_front(char *str, struct regex *r, int len)
260{
261 if (strncmp(str, r->pattern, len) == 0)
262 return 1;
263 return 0;
264}
265
266static int regex_match_middle(char *str, struct regex *r, int len)
267{
268 if (strstr(str, r->pattern))
269 return 1;
270 return 0;
271}
272
273static int regex_match_end(char *str, struct regex *r, int len)
274{
275 char *ptr = strstr(str, r->pattern);
276
277 if (ptr && (ptr[r->len] == 0))
278 return 1;
279 return 0;
280}
281
282/**
283 * filter_parse_regex - parse a basic regex
284 * @buff: the raw regex
285 * @len: length of the regex
286 * @search: will point to the beginning of the string to compare
287 * @not: tell whether the match will have to be inverted
288 *
289 * This passes in a buffer containing a regex and this function will
290 * set search to point to the search part of the buffer and
291 * return the type of search it is (see enum above).
292 * This does modify buff.
293 *
294 * Returns enum type.
295 * search returns the pointer to use for comparison.
296 * not returns 1 if buff started with a '!'
297 * 0 otherwise.
298 */
299enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
300{
301 int type = MATCH_FULL;
302 int i;
303
304 if (buff[0] == '!') {
305 *not = 1;
306 buff++;
307 len--;
308 } else
309 *not = 0;
310
311 *search = buff;
312
313 for (i = 0; i < len; i++) {
314 if (buff[i] == '*') {
315 if (!i) {
316 *search = buff + 1;
317 type = MATCH_END_ONLY;
318 } else {
319 if (type == MATCH_END_ONLY)
320 type = MATCH_MIDDLE_ONLY;
321 else
322 type = MATCH_FRONT_ONLY;
323 buff[i] = 0;
324 break;
325 }
326 }
327 }
328
329 return type;
330}
331
332static int filter_build_regex(struct filter_pred *pred)
333{
334 struct regex *r = &pred->regex;
335 char *search, *dup;
336 enum regex_type type;
337 int not;
338
339 type = filter_parse_regex(r->pattern, r->len, &search, &not);
340 dup = kstrdup(search, GFP_KERNEL);
341 if (!dup)
342 return -ENOMEM;
343
344 strcpy(r->pattern, dup);
345 kfree(dup);
346
347 r->len = strlen(r->pattern);
348
349 switch (type) {
350 case MATCH_FULL:
351 r->match = regex_match_full;
352 break;
353 case MATCH_FRONT_ONLY:
354 r->match = regex_match_front;
355 break;
356 case MATCH_MIDDLE_ONLY:
357 r->match = regex_match_middle;
358 break;
359 case MATCH_END_ONLY:
360 r->match = regex_match_end;
361 break;
362 }
363
364 pred->not ^= not;
365
366 return 0;
367}
368
253/* return 1 if event matches, 0 otherwise (discard) */ 369/* return 1 if event matches, 0 otherwise (discard) */
254int filter_match_preds(struct ftrace_event_call *call, void *rec) 370int filter_match_preds(struct ftrace_event_call *call, void *rec)
255{ 371{
@@ -396,7 +512,7 @@ static void filter_clear_pred(struct filter_pred *pred)
396{ 512{
397 kfree(pred->field_name); 513 kfree(pred->field_name);
398 pred->field_name = NULL; 514 pred->field_name = NULL;
399 pred->str_len = 0; 515 pred->regex.len = 0;
400} 516}
401 517
402static int filter_set_pred(struct filter_pred *dest, 518static int filter_set_pred(struct filter_pred *dest,
@@ -660,21 +776,24 @@ static int filter_add_pred(struct filter_parse_state *ps,
660 } 776 }
661 777
662 if (is_string_field(field)) { 778 if (is_string_field(field)) {
663 pred->str_len = field->size; 779 ret = filter_build_regex(pred);
780 if (ret)
781 return ret;
664 782
665 if (field->filter_type == FILTER_STATIC_STRING) 783 if (field->filter_type == FILTER_STATIC_STRING) {
666 fn = filter_pred_string; 784 fn = filter_pred_string;
667 else if (field->filter_type == FILTER_DYN_STRING) 785 pred->regex.field_len = field->size;
668 fn = filter_pred_strloc; 786 } else if (field->filter_type == FILTER_DYN_STRING)
787 fn = filter_pred_strloc;
669 else { 788 else {
670 fn = filter_pred_pchar; 789 fn = filter_pred_pchar;
671 pred->str_len = strlen(pred->str_val); 790 pred->regex.field_len = strlen(pred->regex.pattern);
672 } 791 }
673 } else { 792 } else {
674 if (field->is_signed) 793 if (field->is_signed)
675 ret = strict_strtoll(pred->str_val, 0, &val); 794 ret = strict_strtoll(pred->regex.pattern, 0, &val);
676 else 795 else
677 ret = strict_strtoull(pred->str_val, 0, &val); 796 ret = strict_strtoull(pred->regex.pattern, 0, &val);
678 if (ret) { 797 if (ret) {
679 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); 798 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
680 return -EINVAL; 799 return -EINVAL;
@@ -1045,8 +1164,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2)
1045 return NULL; 1164 return NULL;
1046 } 1165 }
1047 1166
1048 strcpy(pred->str_val, operand2); 1167 strcpy(pred->regex.pattern, operand2);
1049 pred->str_len = strlen(operand2); 1168 pred->regex.len = strlen(pred->regex.pattern);
1050 1169
1051 pred->op = op; 1170 pred->op = op;
1052 1171
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 9753fcc61bc5..c74848ddb85a 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -48,11 +48,11 @@
48struct ____ftrace_##name { \ 48struct ____ftrace_##name { \
49 tstruct \ 49 tstruct \
50}; \ 50}; \
51static void __used ____ftrace_check_##name(void) \ 51static void __always_unused ____ftrace_check_##name(void) \
52{ \ 52{ \
53 struct ____ftrace_##name *__entry = NULL; \ 53 struct ____ftrace_##name *__entry = NULL; \
54 \ 54 \
55 /* force cmpile-time check on F_printk() */ \ 55 /* force compile-time check on F_printk() */ \
56 printk(print); \ 56 printk(print); \
57} 57}
58 58
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 527e17eae575..ddee9c593732 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -14,6 +14,69 @@ static int sys_refcount_exit;
14static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); 14static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
15static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); 15static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
16 16
17extern unsigned long __start_syscalls_metadata[];
18extern unsigned long __stop_syscalls_metadata[];
19
20static struct syscall_metadata **syscalls_metadata;
21
22static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
23{
24 struct syscall_metadata *start;
25 struct syscall_metadata *stop;
26 char str[KSYM_SYMBOL_LEN];
27
28
29 start = (struct syscall_metadata *)__start_syscalls_metadata;
30 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
31 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
32
33 for ( ; start < stop; start++) {
34 /*
35 * Only compare after the "sys" prefix. Archs that use
36 * syscall wrappers may have syscalls symbols aliases prefixed
37 * with "SyS" instead of "sys", leading to an unwanted
38 * mismatch.
39 */
40 if (start->name && !strcmp(start->name + 3, str + 3))
41 return start;
42 }
43 return NULL;
44}
45
46static struct syscall_metadata *syscall_nr_to_meta(int nr)
47{
48 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
49 return NULL;
50
51 return syscalls_metadata[nr];
52}
53
54int syscall_name_to_nr(char *name)
55{
56 int i;
57
58 if (!syscalls_metadata)
59 return -1;
60
61 for (i = 0; i < NR_syscalls; i++) {
62 if (syscalls_metadata[i]) {
63 if (!strcmp(syscalls_metadata[i]->name, name))
64 return i;
65 }
66 }
67 return -1;
68}
69
70void set_syscall_enter_id(int num, int id)
71{
72 syscalls_metadata[num]->enter_id = id;
73}
74
75void set_syscall_exit_id(int num, int id)
76{
77 syscalls_metadata[num]->exit_id = id;
78}
79
17enum print_line_t 80enum print_line_t
18print_syscall_enter(struct trace_iterator *iter, int flags) 81print_syscall_enter(struct trace_iterator *iter, int flags)
19{ 82{
@@ -375,6 +438,29 @@ struct trace_event event_syscall_exit = {
375 .trace = print_syscall_exit, 438 .trace = print_syscall_exit,
376}; 439};
377 440
441int __init init_ftrace_syscalls(void)
442{
443 struct syscall_metadata *meta;
444 unsigned long addr;
445 int i;
446
447 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
448 NR_syscalls, GFP_KERNEL);
449 if (!syscalls_metadata) {
450 WARN_ON(1);
451 return -ENOMEM;
452 }
453
454 for (i = 0; i < NR_syscalls; i++) {
455 addr = arch_syscall_addr(i);
456 meta = find_syscall_meta(addr);
457 syscalls_metadata[i] = meta;
458 }
459
460 return 0;
461}
462core_initcall(init_ftrace_syscalls);
463
378#ifdef CONFIG_EVENT_PROFILE 464#ifdef CONFIG_EVENT_PROFILE
379 465
380static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); 466static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 234ceb10861f..a79c4d0407ab 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -750,7 +750,7 @@ config RCU_TORTURE_TEST_RUNNABLE
750config RCU_CPU_STALL_DETECTOR 750config RCU_CPU_STALL_DETECTOR
751 bool "Check for stalled CPUs delaying RCU grace periods" 751 bool "Check for stalled CPUs delaying RCU grace periods"
752 depends on TREE_RCU || TREE_PREEMPT_RCU 752 depends on TREE_RCU || TREE_PREEMPT_RCU
753 default n 753 default y
754 help 754 help
755 This option causes RCU to printk information on which 755 This option causes RCU to printk information on which
756 CPUs are delaying the current grace period, but only when 756 CPUs are delaying the current grace period, but only when
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 39f1029e3525..4ebfa5a164d7 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -5,10 +5,13 @@
5 * relegated to obsolescence, but used by various less 5 * relegated to obsolescence, but used by various less
6 * important (or lazy) subsystems. 6 * important (or lazy) subsystems.
7 */ 7 */
8#include <linux/smp_lock.h>
9#include <linux/module.h> 8#include <linux/module.h>
10#include <linux/kallsyms.h> 9#include <linux/kallsyms.h>
11#include <linux/semaphore.h> 10#include <linux/semaphore.h>
11#include <linux/smp_lock.h>
12
13#define CREATE_TRACE_POINTS
14#include <trace/events/bkl.h>
12 15
13/* 16/*
14 * The 'big kernel lock' 17 * The 'big kernel lock'
@@ -113,21 +116,26 @@ static inline void __unlock_kernel(void)
113 * This cannot happen asynchronously, so we only need to 116 * This cannot happen asynchronously, so we only need to
114 * worry about other CPU's. 117 * worry about other CPU's.
115 */ 118 */
116void __lockfunc lock_kernel(void) 119void __lockfunc _lock_kernel(const char *func, const char *file, int line)
117{ 120{
118 int depth = current->lock_depth+1; 121 int depth = current->lock_depth + 1;
122
123 trace_lock_kernel(func, file, line);
124
119 if (likely(!depth)) 125 if (likely(!depth))
120 __lock_kernel(); 126 __lock_kernel();
121 current->lock_depth = depth; 127 current->lock_depth = depth;
122} 128}
123 129
124void __lockfunc unlock_kernel(void) 130void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
125{ 131{
126 BUG_ON(current->lock_depth < 0); 132 BUG_ON(current->lock_depth < 0);
127 if (likely(--current->lock_depth < 0)) 133 if (likely(--current->lock_depth < 0))
128 __unlock_kernel(); 134 __unlock_kernel();
135
136 trace_unlock_kernel(func, file, line);
129} 137}
130 138
131EXPORT_SYMBOL(lock_kernel); 139EXPORT_SYMBOL(_lock_kernel);
132EXPORT_SYMBOL(unlock_kernel); 140EXPORT_SYMBOL(_unlock_kernel);
133 141
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 26187edcc7ea..09f5ce1810dc 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -7,15 +7,12 @@
7 * parameter. Now every user can use their own standalone ratelimit_state. 7 * parameter. Now every user can use their own standalone ratelimit_state.
8 * 8 *
9 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
10 *
11 */ 10 */
12 11
13#include <linux/kernel.h> 12#include <linux/ratelimit.h>
14#include <linux/jiffies.h> 13#include <linux/jiffies.h>
15#include <linux/module.h> 14#include <linux/module.h>
16 15
17static DEFINE_SPINLOCK(ratelimit_lock);
18
19/* 16/*
20 * __ratelimit - rate limiting 17 * __ratelimit - rate limiting
21 * @rs: ratelimit_state data 18 * @rs: ratelimit_state data
@@ -23,35 +20,43 @@ static DEFINE_SPINLOCK(ratelimit_lock);
23 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks 20 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
24 * in every @rs->ratelimit_jiffies 21 * in every @rs->ratelimit_jiffies
25 */ 22 */
26int __ratelimit(struct ratelimit_state *rs) 23int ___ratelimit(struct ratelimit_state *rs, const char *func)
27{ 24{
28 unsigned long flags; 25 unsigned long flags;
26 int ret;
29 27
30 if (!rs->interval) 28 if (!rs->interval)
31 return 1; 29 return 1;
32 30
33 spin_lock_irqsave(&ratelimit_lock, flags); 31 /*
32 * If we contend on this state's lock then almost
33 * by definition we are too busy to print a message,
34 * in addition to the one that will be printed by
35 * the entity that is holding the lock already:
36 */
37 if (!spin_trylock_irqsave(&rs->lock, flags))
38 return 1;
39
34 if (!rs->begin) 40 if (!rs->begin)
35 rs->begin = jiffies; 41 rs->begin = jiffies;
36 42
37 if (time_is_before_jiffies(rs->begin + rs->interval)) { 43 if (time_is_before_jiffies(rs->begin + rs->interval)) {
38 if (rs->missed) 44 if (rs->missed)
39 printk(KERN_WARNING "%s: %d callbacks suppressed\n", 45 printk(KERN_WARNING "%s: %d callbacks suppressed\n",
40 __func__, rs->missed); 46 func, rs->missed);
41 rs->begin = 0; 47 rs->begin = 0;
42 rs->printed = 0; 48 rs->printed = 0;
43 rs->missed = 0; 49 rs->missed = 0;
44 } 50 }
45 if (rs->burst && rs->burst > rs->printed) 51 if (rs->burst && rs->burst > rs->printed) {
46 goto print; 52 rs->printed++;
47 53 ret = 1;
48 rs->missed++; 54 } else {
49 spin_unlock_irqrestore(&ratelimit_lock, flags); 55 rs->missed++;
50 return 0; 56 ret = 0;
57 }
58 spin_unlock_irqrestore(&rs->lock, flags);
51 59
52print: 60 return ret;
53 rs->printed++;
54 spin_unlock_irqrestore(&ratelimit_lock, flags);
55 return 1;
56} 61}
57EXPORT_SYMBOL(__ratelimit); 62EXPORT_SYMBOL(___ratelimit);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index ac25cd28e807..795472d8ae24 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -97,6 +97,8 @@ static phys_addr_t *io_tlb_orig_addr;
97 */ 97 */
98static DEFINE_SPINLOCK(io_tlb_lock); 98static DEFINE_SPINLOCK(io_tlb_lock);
99 99
100static int late_alloc;
101
100static int __init 102static int __init
101setup_io_tlb_npages(char *str) 103setup_io_tlb_npages(char *str)
102{ 104{
@@ -109,6 +111,7 @@ setup_io_tlb_npages(char *str)
109 ++str; 111 ++str;
110 if (!strcmp(str, "force")) 112 if (!strcmp(str, "force"))
111 swiotlb_force = 1; 113 swiotlb_force = 1;
114
112 return 1; 115 return 1;
113} 116}
114__setup("swiotlb=", setup_io_tlb_npages); 117__setup("swiotlb=", setup_io_tlb_npages);
@@ -121,8 +124,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
121 return phys_to_dma(hwdev, virt_to_phys(address)); 124 return phys_to_dma(hwdev, virt_to_phys(address));
122} 125}
123 126
124static void swiotlb_print_info(unsigned long bytes) 127void swiotlb_print_info(void)
125{ 128{
129 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
126 phys_addr_t pstart, pend; 130 phys_addr_t pstart, pend;
127 131
128 pstart = virt_to_phys(io_tlb_start); 132 pstart = virt_to_phys(io_tlb_start);
@@ -140,7 +144,7 @@ static void swiotlb_print_info(unsigned long bytes)
140 * structures for the software IO TLB used to implement the DMA API. 144 * structures for the software IO TLB used to implement the DMA API.
141 */ 145 */
142void __init 146void __init
143swiotlb_init_with_default_size(size_t default_size) 147swiotlb_init_with_default_size(size_t default_size, int verbose)
144{ 148{
145 unsigned long i, bytes; 149 unsigned long i, bytes;
146 150
@@ -176,14 +180,14 @@ swiotlb_init_with_default_size(size_t default_size)
176 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 180 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
177 if (!io_tlb_overflow_buffer) 181 if (!io_tlb_overflow_buffer)
178 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 182 panic("Cannot allocate SWIOTLB overflow buffer!\n");
179 183 if (verbose)
180 swiotlb_print_info(bytes); 184 swiotlb_print_info();
181} 185}
182 186
183void __init 187void __init
184swiotlb_init(void) 188swiotlb_init(int verbose)
185{ 189{
186 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 190 swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */
187} 191}
188 192
189/* 193/*
@@ -260,7 +264,9 @@ swiotlb_late_init_with_default_size(size_t default_size)
260 if (!io_tlb_overflow_buffer) 264 if (!io_tlb_overflow_buffer)
261 goto cleanup4; 265 goto cleanup4;
262 266
263 swiotlb_print_info(bytes); 267 swiotlb_print_info();
268
269 late_alloc = 1;
264 270
265 return 0; 271 return 0;
266 272
@@ -281,6 +287,32 @@ cleanup1:
281 return -ENOMEM; 287 return -ENOMEM;
282} 288}
283 289
290void __init swiotlb_free(void)
291{
292 if (!io_tlb_overflow_buffer)
293 return;
294
295 if (late_alloc) {
296 free_pages((unsigned long)io_tlb_overflow_buffer,
297 get_order(io_tlb_overflow));
298 free_pages((unsigned long)io_tlb_orig_addr,
299 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
300 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
301 sizeof(int)));
302 free_pages((unsigned long)io_tlb_start,
303 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
304 } else {
305 free_bootmem_late(__pa(io_tlb_overflow_buffer),
306 io_tlb_overflow);
307 free_bootmem_late(__pa(io_tlb_orig_addr),
308 io_tlb_nslabs * sizeof(phys_addr_t));
309 free_bootmem_late(__pa(io_tlb_list),
310 io_tlb_nslabs * sizeof(int));
311 free_bootmem_late(__pa(io_tlb_start),
312 io_tlb_nslabs << IO_TLB_SHIFT);
313 }
314}
315
284static int is_swiotlb_buffer(phys_addr_t paddr) 316static int is_swiotlb_buffer(phys_addr_t paddr)
285{ 317{
286 return paddr >= virt_to_phys(io_tlb_start) && 318 return paddr >= virt_to_phys(io_tlb_start) &&
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 555d5d2731c6..d1dc23cc7f10 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -143,6 +143,30 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
143 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); 143 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
144} 144}
145 145
146/*
147 * free_bootmem_late - free bootmem pages directly to page allocator
148 * @addr: starting address of the range
149 * @size: size of the range in bytes
150 *
151 * This is only useful when the bootmem allocator has already been torn
152 * down, but we are still initializing the system. Pages are given directly
153 * to the page allocator, no bootmem metadata is updated because it is gone.
154 */
155void __init free_bootmem_late(unsigned long addr, unsigned long size)
156{
157 unsigned long cursor, end;
158
159 kmemleak_free_part(__va(addr), size);
160
161 cursor = PFN_UP(addr);
162 end = PFN_DOWN(addr + size);
163
164 for (; cursor < end; cursor++) {
165 __free_pages_bootmem(pfn_to_page(cursor), 0);
166 totalram_pages++;
167 }
168}
169
146static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 170static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
147{ 171{
148 int aligned; 172 int aligned;
diff --git a/mm/mmap.c b/mm/mmap.c
index 73f5e4b64010..292ddc3cef9c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -20,7 +20,6 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/personality.h> 21#include <linux/personality.h>
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/ima.h>
24#include <linux/hugetlb.h> 23#include <linux/hugetlb.h>
25#include <linux/profile.h> 24#include <linux/profile.h>
26#include <linux/module.h> 25#include <linux/module.h>
@@ -1061,9 +1060,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1061 error = security_file_mmap(file, reqprot, prot, flags, addr, 0); 1060 error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1062 if (error) 1061 if (error)
1063 return error; 1062 return error;
1064 error = ima_file_mmap(file, prot);
1065 if (error)
1066 return error;
1067 1063
1068 return mmap_region(file, addr, len, flags, vm_flags, pgoff); 1064 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1069} 1065}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index fcfc5458c399..8938fa79124d 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -10,7 +10,9 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/socket.h> 11#include <linux/socket.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/ratelimit.h>
13#include <linux/init.h> 14#include <linux/init.h>
15
14#include <net/ip.h> 16#include <net/ip.h>
15#include <net/sock.h> 17#include <net/sock.h>
16 18
diff --git a/net/core/utils.c b/net/core/utils.c
index 83221aee7084..838250241d26 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -24,6 +24,8 @@
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/ratelimit.h>
28
27#include <net/sock.h> 29#include <net/sock.h>
28 30
29#include <asm/byteorder.h> 31#include <asm/byteorder.h>
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 090d300d7394..f0d14452632b 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -6,77 +6,93 @@
6# all the offsets to the calls to mcount. 6# all the offsets to the calls to mcount.
7# 7#
8# 8#
9# What we want to end up with is a section in vmlinux called 9# What we want to end up with this is that each object file will have a
10# __mcount_loc that contains a list of pointers to all the 10# section called __mcount_loc that will hold the list of pointers to mcount
11# call sites in the kernel that call mcount. Later on boot up, the kernel 11# callers. After final linking, the vmlinux will have within .init.data the
12# will read this list, save the locations and turn them into nops. 12# list of all callers to mcount between __start_mcount_loc and __stop_mcount_loc.
13# When tracing or profiling is later enabled, these locations will then 13# Later on boot up, the kernel will read this list, save the locations and turn
14# be converted back to pointers to some function. 14# them into nops. When tracing or profiling is later enabled, these locations
15# will then be converted back to pointers to some function.
15# 16#
16# This is no easy feat. This script is called just after the original 17# This is no easy feat. This script is called just after the original
17# object is compiled and before it is linked. 18# object is compiled and before it is linked.
18# 19#
19# The references to the call sites are offsets from the section of text 20# When parse this object file using 'objdump', the references to the call
20# that the call site is in. Hence, all functions in a section that 21# sites are offsets from the section that the call site is in. Hence, all
21# has a call site to mcount, will have the offset from the beginning of 22# functions in a section that has a call site to mcount, will have the
22# the section and not the beginning of the function. 23# offset from the beginning of the section and not the beginning of the
24# function.
25#
26# But where this section will reside finally in vmlinx is undetermined at
27# this point. So we can't use this kind of offsets to record the final
28# address of this call site.
29#
30# The trick is to change the call offset referring the start of a section to
31# referring a function symbol in this section. During the link step, 'ld' will
32# compute the final address according to the information we record.
23# 33#
24# The trick is to find a way to record the beginning of the section.
25# The way we do this is to look at the first function in the section
26# which will also be the location of that section after final link.
27# e.g. 34# e.g.
28# 35#
29# .section ".sched.text", "ax" 36# .section ".sched.text", "ax"
30# .globl my_func
31# my_func:
32# [...] 37# [...]
33# call mcount (offset: 0x5) 38# func1:
39# [...]
40# call mcount (offset: 0x10)
34# [...] 41# [...]
35# ret 42# ret
36# other_func: 43# .globl fun2
44# func2: (offset: 0x20)
37# [...] 45# [...]
38# call mcount (offset: 0x1b) 46# [...]
47# ret
48# func3:
49# [...]
50# call mcount (offset: 0x30)
39# [...] 51# [...]
40# 52#
41# Both relocation offsets for the mcounts in the above example will be 53# Both relocation offsets for the mcounts in the above example will be
42# offset from .sched.text. If we make another file called tmp.s with: 54# offset from .sched.text. If we choose global symbol func2 as a reference and
55# make another file called tmp.s with the new offsets:
43# 56#
44# .section __mcount_loc 57# .section __mcount_loc
45# .quad my_func + 0x5 58# .quad func2 - 0x10
46# .quad my_func + 0x1b 59# .quad func2 + 0x10
47# 60#
48# We can then compile this tmp.s into tmp.o, and link it to the original 61# We can then compile this tmp.s into tmp.o, and link it back to the original
49# object. 62# object.
50# 63#
51# But this gets hard if my_func is not globl (a static function). 64# In our algorithm, we will choose the first global function we meet in this
52# In such a case we have: 65# section as the reference. But this gets hard if there is no global functions
66# in this section. In such a case we have to select a local one. E.g. func1:
53# 67#
54# .section ".sched.text", "ax" 68# .section ".sched.text", "ax"
55# my_func: 69# func1:
56# [...] 70# [...]
57# call mcount (offset: 0x5) 71# call mcount (offset: 0x10)
58# [...] 72# [...]
59# ret 73# ret
60# other_func: 74# func2:
61# [...] 75# [...]
62# call mcount (offset: 0x1b) 76# call mcount (offset: 0x20)
63# [...] 77# [...]
78# .section "other.section"
64# 79#
65# If we make the tmp.s the same as above, when we link together with 80# If we make the tmp.s the same as above, when we link together with
66# the original object, we will end up with two symbols for my_func: 81# the original object, we will end up with two symbols for func1:
67# one local, one global. After final compile, we will end up with 82# one local, one global. After final compile, we will end up with
68# an undefined reference to my_func. 83# an undefined reference to func1 or a wrong reference to another global
84# func1 in other files.
69# 85#
70# Since local objects can reference local variables, we need to find 86# Since local objects can reference local variables, we need to find
71# a way to make tmp.o reference the local objects of the original object 87# a way to make tmp.o reference the local objects of the original object
72# file after it is linked together. To do this, we convert the my_func 88# file after it is linked together. To do this, we convert func1
73# into a global symbol before linking tmp.o. Then after we link tmp.o 89# into a global symbol before linking tmp.o. Then after we link tmp.o
74# we will only have a single symbol for my_func that is global. 90# we will only have a single symbol for func1 that is global.
75# We can convert my_func back into a local symbol and we are done. 91# We can convert func1 back into a local symbol and we are done.
76# 92#
77# Here are the steps we take: 93# Here are the steps we take:
78# 94#
79# 1) Record all the local symbols by using 'nm' 95# 1) Record all the local and weak symbols by using 'nm'
80# 2) Use objdump to find all the call site offsets and sections for 96# 2) Use objdump to find all the call site offsets and sections for
81# mcount. 97# mcount.
82# 3) Compile the list into its own object. 98# 3) Compile the list into its own object.
@@ -86,10 +102,8 @@
86# 6) Link together this new object with the list object. 102# 6) Link together this new object with the list object.
87# 7) Convert the local functions back to local symbols and rename 103# 7) Convert the local functions back to local symbols and rename
88# the result as the original object. 104# the result as the original object.
89# End.
90# 8) Link the object with the list object. 105# 8) Link the object with the list object.
91# 9) Move the result back to the original object. 106# 9) Move the result back to the original object.
92# End.
93# 107#
94 108
95use strict; 109use strict;
@@ -99,7 +113,7 @@ $P =~ s@.*/@@g;
99 113
100my $V = '0.1'; 114my $V = '0.1';
101 115
102if ($#ARGV < 7) { 116if ($#ARGV != 10) {
103 print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n"; 117 print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
104 print "version: $V\n"; 118 print "version: $V\n";
105 exit(1); 119 exit(1);
@@ -109,7 +123,7 @@ my ($arch, $bits, $objdump, $objcopy, $cc,
109 $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV; 123 $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV;
110 124
111# This file refers to mcount and shouldn't be ftraced, so lets' ignore it 125# This file refers to mcount and shouldn't be ftraced, so lets' ignore it
112if ($inputfile eq "kernel/trace/ftrace.o") { 126if ($inputfile =~ m,kernel/trace/ftrace\.o$,) {
113 exit(0); 127 exit(0);
114} 128}
115 129
@@ -119,6 +133,7 @@ my %text_sections = (
119 ".sched.text" => 1, 133 ".sched.text" => 1,
120 ".spinlock.text" => 1, 134 ".spinlock.text" => 1,
121 ".irqentry.text" => 1, 135 ".irqentry.text" => 1,
136 ".text.unlikely" => 1,
122); 137);
123 138
124$objdump = "objdump" if ((length $objdump) == 0); 139$objdump = "objdump" if ((length $objdump) == 0);
@@ -137,13 +152,47 @@ my %weak; # List of weak functions
137my %convert; # List of local functions used that needs conversion 152my %convert; # List of local functions used that needs conversion
138 153
139my $type; 154my $type;
140my $nm_regex; # Find the local functions (return function) 155my $local_regex; # Match a local function (return function)
156my $weak_regex; # Match a weak function (return function)
141my $section_regex; # Find the start of a section 157my $section_regex; # Find the start of a section
142my $function_regex; # Find the name of a function 158my $function_regex; # Find the name of a function
143 # (return offset and func name) 159 # (return offset and func name)
144my $mcount_regex; # Find the call site to mcount (return offset) 160my $mcount_regex; # Find the call site to mcount (return offset)
145my $alignment; # The .align value to use for $mcount_section 161my $alignment; # The .align value to use for $mcount_section
146my $section_type; # Section header plus possible alignment command 162my $section_type; # Section header plus possible alignment command
163my $can_use_local = 0; # If we can use local function references
164
165# Shut up recordmcount if user has older objcopy
166my $quiet_recordmcount = ".tmp_quiet_recordmcount";
167my $print_warning = 1;
168$print_warning = 0 if ( -f $quiet_recordmcount);
169
170##
171# check_objcopy - whether objcopy supports --globalize-symbols
172#
173# --globalize-symbols came out in 2.17, we must test the version
174# of objcopy, and if it is less than 2.17, then we can not
175# record local functions.
176sub check_objcopy
177{
178 open (IN, "$objcopy --version |") or die "error running $objcopy";
179 while (<IN>) {
180 if (/objcopy.*\s(\d+)\.(\d+)/) {
181 $can_use_local = 1 if ($1 > 2 || ($1 == 2 && $2 >= 17));
182 last;
183 }
184 }
185 close (IN);
186
187 if (!$can_use_local && $print_warning) {
188 print STDERR "WARNING: could not find objcopy version or version " .
189 "is less than 2.17.\n" .
190 "\tLocal function references are disabled.\n";
191 open (QUIET, ">$quiet_recordmcount");
192 printf QUIET "Disables the warning from recordmcount.pl\n";
193 close QUIET;
194 }
195}
147 196
148if ($arch eq "x86") { 197if ($arch eq "x86") {
149 if ($bits == 64) { 198 if ($bits == 64) {
@@ -157,7 +206,8 @@ if ($arch eq "x86") {
157# We base the defaults off of i386, the other archs may 206# We base the defaults off of i386, the other archs may
158# feel free to change them in the below if statements. 207# feel free to change them in the below if statements.
159# 208#
160$nm_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)"; 209$local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)";
210$weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)";
161$section_regex = "Disassembly of section\\s+(\\S+):"; 211$section_regex = "Disassembly of section\\s+(\\S+):";
162$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 212$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
163$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; 213$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
@@ -206,7 +256,7 @@ if ($arch eq "x86_64") {
206 $cc .= " -m32"; 256 $cc .= " -m32";
207 257
208} elsif ($arch eq "powerpc") { 258} elsif ($arch eq "powerpc") {
209 $nm_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)"; 259 $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
210 $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:"; 260 $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
211 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$"; 261 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
212 262
@@ -278,44 +328,17 @@ if ($filename =~ m,^(.*)(\.\S),) {
278my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s"; 328my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s";
279my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o"; 329my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o";
280 330
281# 331check_objcopy();
282# --globalize-symbols came out in 2.17, we must test the version
283# of objcopy, and if it is less than 2.17, then we can not
284# record local functions.
285my $use_locals = 01;
286my $local_warn_once = 0;
287my $found_version = 0;
288
289open (IN, "$objcopy --version |") || die "error running $objcopy";
290while (<IN>) {
291 if (/objcopy.*\s(\d+)\.(\d+)/) {
292 my $major = $1;
293 my $minor = $2;
294
295 $found_version = 1;
296 if ($major < 2 ||
297 ($major == 2 && $minor < 17)) {
298 $use_locals = 0;
299 }
300 last;
301 }
302}
303close (IN);
304
305if (!$found_version) {
306 print STDERR "WARNING: could not find objcopy version.\n" .
307 "\tDisabling local function references.\n";
308}
309 332
310# 333#
311# Step 1: find all the local (static functions) and weak symbols. 334# Step 1: find all the local (static functions) and weak symbols.
312# 't' is local, 'w/W' is weak (we never use a weak function) 335# 't' is local, 'w/W' is weak
313# 336#
314open (IN, "$nm $inputfile|") || die "error running $nm"; 337open (IN, "$nm $inputfile|") || die "error running $nm";
315while (<IN>) { 338while (<IN>) {
316 if (/$nm_regex/) { 339 if (/$local_regex/) {
317 $locals{$1} = 1; 340 $locals{$1} = 1;
318 } elsif (/^[0-9a-fA-F]+\s+([wW])\s+(\S+)/) { 341 } elsif (/$weak_regex/) {
319 $weak{$2} = $1; 342 $weak{$2} = $1;
320 } 343 }
321} 344}
@@ -333,26 +356,20 @@ my $offset = 0; # offset of ref_func to section beginning
333# 356#
334sub update_funcs 357sub update_funcs
335{ 358{
336 return if ($#offsets < 0); 359 return unless ($ref_func and @offsets);
337
338 defined($ref_func) || die "No function to reference";
339 360
340 # A section only had a weak function, to represent it. 361 # Sanity check on weak function. A weak function may be overwritten by
341 # Unfortunately, a weak function may be overwritten by another 362 # another function of the same name, making all these offsets incorrect.
342 # function of the same name, making all these offsets incorrect.
343 # To be safe, we simply print a warning and bail.
344 if (defined $weak{$ref_func}) { 363 if (defined $weak{$ref_func}) {
345 print STDERR 364 die "$inputfile: ERROR: referencing weak function" .
346 "$inputfile: WARNING: referencing weak function" .
347 " $ref_func for mcount\n"; 365 " $ref_func for mcount\n";
348 return;
349 } 366 }
350 367
351 # is this function static? If so, note this fact. 368 # is this function static? If so, note this fact.
352 if (defined $locals{$ref_func}) { 369 if (defined $locals{$ref_func}) {
353 370
354 # only use locals if objcopy supports globalize-symbols 371 # only use locals if objcopy supports globalize-symbols
355 if (!$use_locals) { 372 if (!$can_use_local) {
356 return; 373 return;
357 } 374 }
358 $convert{$ref_func} = 1; 375 $convert{$ref_func} = 1;
@@ -378,9 +395,27 @@ open(IN, "$objdump -hdr $inputfile|") || die "error running $objdump";
378 395
379my $text; 396my $text;
380 397
398
399# read headers first
381my $read_headers = 1; 400my $read_headers = 1;
382 401
383while (<IN>) { 402while (<IN>) {
403
404 if ($read_headers && /$mcount_section/) {
405 #
406 # Somehow the make process can execute this script on an
407 # object twice. If it does, we would duplicate the mcount
408 # section and it will cause the function tracer self test
409 # to fail. Check if the mcount section exists, and if it does,
410 # warn and exit.
411 #
412 print STDERR "ERROR: $mcount_section already in $inputfile\n" .
413 "\tThis may be an indication that your build is corrupted.\n" .
414 "\tDelete $inputfile and try again. If the same object file\n" .
415 "\tstill causes an issue, then disable CONFIG_DYNAMIC_FTRACE.\n";
416 exit(-1);
417 }
418
384 # is it a section? 419 # is it a section?
385 if (/$section_regex/) { 420 if (/$section_regex/) {
386 $read_headers = 0; 421 $read_headers = 0;
@@ -392,7 +427,7 @@ while (<IN>) {
392 $read_function = 0; 427 $read_function = 0;
393 } 428 }
394 # print out any recorded offsets 429 # print out any recorded offsets
395 update_funcs() if (defined($ref_func)); 430 update_funcs();
396 431
397 # reset all markers and arrays 432 # reset all markers and arrays
398 $text_found = 0; 433 $text_found = 0;
@@ -421,21 +456,7 @@ while (<IN>) {
421 $offset = hex $1; 456 $offset = hex $1;
422 } 457 }
423 } 458 }
424 } elsif ($read_headers && /$mcount_section/) {
425 #
426 # Somehow the make process can execute this script on an
427 # object twice. If it does, we would duplicate the mcount
428 # section and it will cause the function tracer self test
429 # to fail. Check if the mcount section exists, and if it does,
430 # warn and exit.
431 #
432 print STDERR "ERROR: $mcount_section already in $inputfile\n" .
433 "\tThis may be an indication that your build is corrupted.\n" .
434 "\tDelete $inputfile and try again. If the same object file\n" .
435 "\tstill causes an issue, then disable CONFIG_DYNAMIC_FTRACE.\n";
436 exit(-1);
437 } 459 }
438
439 # is this a call site to mcount? If so, record it to print later 460 # is this a call site to mcount? If so, record it to print later
440 if ($text_found && /$mcount_regex/) { 461 if ($text_found && /$mcount_regex/) {
441 $offsets[$#offsets + 1] = hex $1; 462 $offsets[$#offsets + 1] = hex $1;
@@ -443,7 +464,7 @@ while (<IN>) {
443} 464}
444 465
445# dump out anymore offsets that may have been found 466# dump out anymore offsets that may have been found
446update_funcs() if (defined($ref_func)); 467update_funcs();
447 468
448# If we did not find any mcount callers, we are done (do nothing). 469# If we did not find any mcount callers, we are done (do nothing).
449if (!$opened) { 470if (!$opened) {
diff --git a/scripts/selinux/Makefile b/scripts/selinux/Makefile
index ca4b1ec01822..e8049da1831f 100644
--- a/scripts/selinux/Makefile
+++ b/scripts/selinux/Makefile
@@ -1,2 +1,2 @@
1subdir-y := mdp 1subdir-y := mdp genheaders
2subdir- += mdp 2subdir- += mdp genheaders
diff --git a/scripts/selinux/genheaders/.gitignore b/scripts/selinux/genheaders/.gitignore
new file mode 100644
index 000000000000..4c0b646ff8d5
--- /dev/null
+++ b/scripts/selinux/genheaders/.gitignore
@@ -0,0 +1 @@
genheaders
diff --git a/scripts/selinux/genheaders/Makefile b/scripts/selinux/genheaders/Makefile
new file mode 100644
index 000000000000..417b165008ee
--- /dev/null
+++ b/scripts/selinux/genheaders/Makefile
@@ -0,0 +1,5 @@
1hostprogs-y := genheaders
2HOST_EXTRACFLAGS += -Isecurity/selinux/include
3
4always := $(hostprogs-y)
5clean-files := $(hostprogs-y)
diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c
new file mode 100644
index 000000000000..24626968055d
--- /dev/null
+++ b/scripts/selinux/genheaders/genheaders.c
@@ -0,0 +1,118 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <string.h>
5#include <errno.h>
6#include <ctype.h>
7
8struct security_class_mapping {
9 const char *name;
10 const char *perms[sizeof(unsigned) * 8 + 1];
11};
12
13#include "classmap.h"
14#include "initial_sid_to_string.h"
15
16#define max(x, y) (((int)(x) > (int)(y)) ? x : y)
17
18const char *progname;
19
20static void usage(void)
21{
22 printf("usage: %s flask.h av_permissions.h\n", progname);
23 exit(1);
24}
25
26static char *stoupperx(const char *s)
27{
28 char *s2 = strdup(s);
29 char *p;
30
31 if (!s2) {
32 fprintf(stderr, "%s: out of memory\n", progname);
33 exit(3);
34 }
35
36 for (p = s2; *p; p++)
37 *p = toupper(*p);
38 return s2;
39}
40
41int main(int argc, char *argv[])
42{
43 int i, j, k;
44 int isids_len;
45 FILE *fout;
46
47 progname = argv[0];
48
49 if (argc < 3)
50 usage();
51
52 fout = fopen(argv[1], "w");
53 if (!fout) {
54 fprintf(stderr, "Could not open %s for writing: %s\n",
55 argv[1], strerror(errno));
56 exit(2);
57 }
58
59 for (i = 0; secclass_map[i].name; i++) {
60 struct security_class_mapping *map = &secclass_map[i];
61 map->name = stoupperx(map->name);
62 for (j = 0; map->perms[j]; j++)
63 map->perms[j] = stoupperx(map->perms[j]);
64 }
65
66 isids_len = sizeof(initial_sid_to_string) / sizeof (char *);
67 for (i = 1; i < isids_len; i++)
68 initial_sid_to_string[i] = stoupperx(initial_sid_to_string[i]);
69
70 fprintf(fout, "/* This file is automatically generated. Do not edit. */\n");
71 fprintf(fout, "#ifndef _SELINUX_FLASK_H_\n#define _SELINUX_FLASK_H_\n\n");
72
73 for (i = 0; secclass_map[i].name; i++) {
74 struct security_class_mapping *map = &secclass_map[i];
75 fprintf(fout, "#define SECCLASS_%s", map->name);
76 for (j = 0; j < max(1, 40 - strlen(map->name)); j++)
77 fprintf(fout, " ");
78 fprintf(fout, "%2d\n", i+1);
79 }
80
81 fprintf(fout, "\n");
82
83 for (i = 1; i < isids_len; i++) {
84 char *s = initial_sid_to_string[i];
85 fprintf(fout, "#define SECINITSID_%s", s);
86 for (j = 0; j < max(1, 40 - strlen(s)); j++)
87 fprintf(fout, " ");
88 fprintf(fout, "%2d\n", i);
89 }
90 fprintf(fout, "\n#define SECINITSID_NUM %d\n", i-1);
91 fprintf(fout, "\n#endif\n");
92 fclose(fout);
93
94 fout = fopen(argv[2], "w");
95 if (!fout) {
96 fprintf(stderr, "Could not open %s for writing: %s\n",
97 argv[2], strerror(errno));
98 exit(4);
99 }
100
101 fprintf(fout, "/* This file is automatically generated. Do not edit. */\n");
102 fprintf(fout, "#ifndef _SELINUX_AV_PERMISSIONS_H_\n#define _SELINUX_AV_PERMISSIONS_H_\n\n");
103
104 for (i = 0; secclass_map[i].name; i++) {
105 struct security_class_mapping *map = &secclass_map[i];
106 for (j = 0; map->perms[j]; j++) {
107 fprintf(fout, "#define %s__%s", map->name,
108 map->perms[j]);
109 for (k = 0; k < max(1, 40 - strlen(map->name) - strlen(map->perms[j])); k++)
110 fprintf(fout, " ");
111 fprintf(fout, "0x%08xUL\n", (1<<j));
112 }
113 }
114
115 fprintf(fout, "\n#endif\n");
116 fclose(fout);
117 exit(0);
118}
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
index b4ced8562587..62b34ce1f50d 100644
--- a/scripts/selinux/mdp/mdp.c
+++ b/scripts/selinux/mdp/mdp.c
@@ -29,86 +29,27 @@
29#include <unistd.h> 29#include <unistd.h>
30#include <string.h> 30#include <string.h>
31 31
32#include "flask.h"
33
34static void usage(char *name) 32static void usage(char *name)
35{ 33{
36 printf("usage: %s [-m] policy_file context_file\n", name); 34 printf("usage: %s [-m] policy_file context_file\n", name);
37 exit(1); 35 exit(1);
38} 36}
39 37
40static void find_common_name(char *cname, char *dest, int len) 38/* Class/perm mapping support */
41{ 39struct security_class_mapping {
42 char *start, *end; 40 const char *name;
43 41 const char *perms[sizeof(unsigned) * 8 + 1];
44 start = strchr(cname, '_')+1;
45 end = strchr(start, '_');
46 if (!start || !end || start-cname > len || end-start > len) {
47 printf("Error with commons defines\n");
48 exit(1);
49 }
50 strncpy(dest, start, end-start);
51 dest[end-start] = '\0';
52}
53
54#define S_(x) x,
55static char *classlist[] = {
56#include "class_to_string.h"
57 NULL
58}; 42};
59#undef S_
60 43
44#include "classmap.h"
61#include "initial_sid_to_string.h" 45#include "initial_sid_to_string.h"
62 46
63#define TB_(x) char *x[] = {
64#define TE_(x) NULL };
65#define S_(x) x,
66#include "common_perm_to_string.h"
67#undef TB_
68#undef TE_
69#undef S_
70
71struct common {
72 char *cname;
73 char **perms;
74};
75struct common common[] = {
76#define TB_(x) { #x, x },
77#define S_(x)
78#define TE_(x)
79#include "common_perm_to_string.h"
80#undef TB_
81#undef TE_
82#undef S_
83};
84
85#define S_(x, y, z) {x, #y},
86struct av_inherit {
87 int class;
88 char *common;
89};
90struct av_inherit av_inherit[] = {
91#include "av_inherit.h"
92};
93#undef S_
94
95#include "av_permissions.h"
96#define S_(x, y, z) {x, y, z},
97struct av_perms {
98 int class;
99 int perm_i;
100 char *perm_s;
101};
102struct av_perms av_perms[] = {
103#include "av_perm_to_string.h"
104};
105#undef S_
106
107int main(int argc, char *argv[]) 47int main(int argc, char *argv[])
108{ 48{
109 int i, j, mls = 0; 49 int i, j, mls = 0;
50 int initial_sid_to_string_len;
110 char **arg, *polout, *ctxout; 51 char **arg, *polout, *ctxout;
111 int classlist_len, initial_sid_to_string_len; 52
112 FILE *fout; 53 FILE *fout;
113 54
114 if (argc < 3) 55 if (argc < 3)
@@ -127,64 +68,25 @@ int main(int argc, char *argv[])
127 usage(argv[0]); 68 usage(argv[0]);
128 } 69 }
129 70
130 classlist_len = sizeof(classlist) / sizeof(char *);
131 /* print out the classes */ 71 /* print out the classes */
132 for (i=1; i < classlist_len; i++) { 72 for (i = 0; secclass_map[i].name; i++)
133 if(classlist[i]) 73 fprintf(fout, "class %s\n", secclass_map[i].name);
134 fprintf(fout, "class %s\n", classlist[i]);
135 else
136 fprintf(fout, "class user%d\n", i);
137 }
138 fprintf(fout, "\n"); 74 fprintf(fout, "\n");
139 75
140 initial_sid_to_string_len = sizeof(initial_sid_to_string) / sizeof (char *); 76 initial_sid_to_string_len = sizeof(initial_sid_to_string) / sizeof (char *);
141 /* print out the sids */ 77 /* print out the sids */
142 for (i=1; i < initial_sid_to_string_len; i++) 78 for (i = 1; i < initial_sid_to_string_len; i++)
143 fprintf(fout, "sid %s\n", initial_sid_to_string[i]); 79 fprintf(fout, "sid %s\n", initial_sid_to_string[i]);
144 fprintf(fout, "\n"); 80 fprintf(fout, "\n");
145 81
146 /* print out the commons */
147 for (i=0; i< sizeof(common)/sizeof(struct common); i++) {
148 char cname[101];
149 find_common_name(common[i].cname, cname, 100);
150 cname[100] = '\0';
151 fprintf(fout, "common %s\n{\n", cname);
152 for (j=0; common[i].perms[j]; j++)
153 fprintf(fout, "\t%s\n", common[i].perms[j]);
154 fprintf(fout, "}\n\n");
155 }
156 fprintf(fout, "\n");
157
158 /* print out the class permissions */ 82 /* print out the class permissions */
159 for (i=1; i < classlist_len; i++) { 83 for (i = 0; secclass_map[i].name; i++) {
160 if (classlist[i]) { 84 struct security_class_mapping *map = &secclass_map[i];
161 int firstperm = -1, numperms = 0; 85 fprintf(fout, "class %s\n", map->name);
162 86 fprintf(fout, "{\n");
163 fprintf(fout, "class %s\n", classlist[i]); 87 for (j = 0; map->perms[j]; j++)
164 /* does it inherit from a common? */ 88 fprintf(fout, "\t%s\n", map->perms[j]);
165 for (j=0; j < sizeof(av_inherit)/sizeof(struct av_inherit); j++) 89 fprintf(fout, "}\n\n");
166 if (av_inherit[j].class == i)
167 fprintf(fout, "inherits %s\n", av_inherit[j].common);
168
169 for (j=0; j < sizeof(av_perms)/sizeof(struct av_perms); j++) {
170 if (av_perms[j].class == i) {
171 if (firstperm == -1)
172 firstperm = j;
173 numperms++;
174 }
175 }
176 if (!numperms) {
177 fprintf(fout, "\n");
178 continue;
179 }
180
181 fprintf(fout, "{\n");
182 /* print out the av_perms */
183 for (j=0; j < numperms; j++) {
184 fprintf(fout, "\t%s\n", av_perms[firstperm+j].perm_s);
185 }
186 fprintf(fout, "}\n\n");
187 }
188 } 90 }
189 fprintf(fout, "\n"); 91 fprintf(fout, "\n");
190 92
@@ -197,31 +99,34 @@ int main(int argc, char *argv[])
197 /* types, roles, and allows */ 99 /* types, roles, and allows */
198 fprintf(fout, "type base_t;\n"); 100 fprintf(fout, "type base_t;\n");
199 fprintf(fout, "role base_r types { base_t };\n"); 101 fprintf(fout, "role base_r types { base_t };\n");
200 for (i=1; i < classlist_len; i++) { 102 for (i = 0; secclass_map[i].name; i++)
201 if (classlist[i]) 103 fprintf(fout, "allow base_t base_t:%s *;\n",
202 fprintf(fout, "allow base_t base_t:%s *;\n", classlist[i]); 104 secclass_map[i].name);
203 else
204 fprintf(fout, "allow base_t base_t:user%d *;\n", i);
205 }
206 fprintf(fout, "user user_u roles { base_r };\n"); 105 fprintf(fout, "user user_u roles { base_r };\n");
207 fprintf(fout, "\n"); 106 fprintf(fout, "\n");
208 107
209 /* default sids */ 108 /* default sids */
210 for (i=1; i < initial_sid_to_string_len; i++) 109 for (i = 1; i < initial_sid_to_string_len; i++)
211 fprintf(fout, "sid %s user_u:base_r:base_t\n", initial_sid_to_string[i]); 110 fprintf(fout, "sid %s user_u:base_r:base_t\n", initial_sid_to_string[i]);
212 fprintf(fout, "\n"); 111 fprintf(fout, "\n");
213 112
214
215 fprintf(fout, "fs_use_xattr ext2 user_u:base_r:base_t;\n"); 113 fprintf(fout, "fs_use_xattr ext2 user_u:base_r:base_t;\n");
216 fprintf(fout, "fs_use_xattr ext3 user_u:base_r:base_t;\n"); 114 fprintf(fout, "fs_use_xattr ext3 user_u:base_r:base_t;\n");
115 fprintf(fout, "fs_use_xattr ext4 user_u:base_r:base_t;\n");
217 fprintf(fout, "fs_use_xattr jfs user_u:base_r:base_t;\n"); 116 fprintf(fout, "fs_use_xattr jfs user_u:base_r:base_t;\n");
218 fprintf(fout, "fs_use_xattr xfs user_u:base_r:base_t;\n"); 117 fprintf(fout, "fs_use_xattr xfs user_u:base_r:base_t;\n");
219 fprintf(fout, "fs_use_xattr reiserfs user_u:base_r:base_t;\n"); 118 fprintf(fout, "fs_use_xattr reiserfs user_u:base_r:base_t;\n");
119 fprintf(fout, "fs_use_xattr jffs2 user_u:base_r:base_t;\n");
120 fprintf(fout, "fs_use_xattr gfs2 user_u:base_r:base_t;\n");
121 fprintf(fout, "fs_use_xattr lustre user_u:base_r:base_t;\n");
220 122
123 fprintf(fout, "fs_use_task eventpollfs user_u:base_r:base_t;\n");
221 fprintf(fout, "fs_use_task pipefs user_u:base_r:base_t;\n"); 124 fprintf(fout, "fs_use_task pipefs user_u:base_r:base_t;\n");
222 fprintf(fout, "fs_use_task sockfs user_u:base_r:base_t;\n"); 125 fprintf(fout, "fs_use_task sockfs user_u:base_r:base_t;\n");
223 126
127 fprintf(fout, "fs_use_trans mqueue user_u:base_r:base_t;\n");
224 fprintf(fout, "fs_use_trans devpts user_u:base_r:base_t;\n"); 128 fprintf(fout, "fs_use_trans devpts user_u:base_r:base_t;\n");
129 fprintf(fout, "fs_use_trans hugetlbfs user_u:base_r:base_t;\n");
225 fprintf(fout, "fs_use_trans tmpfs user_u:base_r:base_t;\n"); 130 fprintf(fout, "fs_use_trans tmpfs user_u:base_r:base_t;\n");
226 fprintf(fout, "fs_use_trans shm user_u:base_r:base_t;\n"); 131 fprintf(fout, "fs_use_trans shm user_u:base_r:base_t;\n");
227 132
diff --git a/security/Kconfig b/security/Kconfig
index fb363cd81cf6..226b9556b25f 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -91,28 +91,6 @@ config SECURITY_PATH
91 implement pathname based access controls. 91 implement pathname based access controls.
92 If you are unsure how to answer this question, answer N. 92 If you are unsure how to answer this question, answer N.
93 93
94config SECURITY_FILE_CAPABILITIES
95 bool "File POSIX Capabilities"
96 default n
97 help
98 This enables filesystem capabilities, allowing you to give
99 binaries a subset of root's powers without using setuid 0.
100
101 If in doubt, answer N.
102
103config SECURITY_ROOTPLUG
104 bool "Root Plug Support"
105 depends on USB=y && SECURITY
106 help
107 This is a sample LSM module that should only be used as such.
108 It prevents any programs running with egid == 0 if a specific
109 USB device is not present in the system.
110
111 See <http://www.linuxjournal.com/article.php?sid=6279> for
112 more information about this module.
113
114 If you are unsure how to answer this question, answer N.
115
116config INTEL_TXT 94config INTEL_TXT
117 bool "Enable Intel(R) Trusted Execution Technology (Intel(R) TXT)" 95 bool "Enable Intel(R) Trusted Execution Technology (Intel(R) TXT)"
118 depends on HAVE_INTEL_TXT 96 depends on HAVE_INTEL_TXT
@@ -165,5 +143,37 @@ source security/tomoyo/Kconfig
165 143
166source security/integrity/ima/Kconfig 144source security/integrity/ima/Kconfig
167 145
146choice
147 prompt "Default security module"
148 default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
149 default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
150 default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
151 default DEFAULT_SECURITY_DAC
152
153 help
154 Select the security module that will be used by default if the
155 kernel parameter security= is not specified.
156
157 config DEFAULT_SECURITY_SELINUX
158 bool "SELinux" if SECURITY_SELINUX=y
159
160 config DEFAULT_SECURITY_SMACK
161 bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
162
163 config DEFAULT_SECURITY_TOMOYO
164 bool "TOMOYO" if SECURITY_TOMOYO=y
165
166 config DEFAULT_SECURITY_DAC
167 bool "Unix Discretionary Access Controls"
168
169endchoice
170
171config DEFAULT_SECURITY
172 string
173 default "selinux" if DEFAULT_SECURITY_SELINUX
174 default "smack" if DEFAULT_SECURITY_SMACK
175 default "tomoyo" if DEFAULT_SECURITY_TOMOYO
176 default "" if DEFAULT_SECURITY_DAC
177
168endmenu 178endmenu
169 179
diff --git a/security/Makefile b/security/Makefile
index 95ecc06392d7..bb44e350c618 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
18obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o 18obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
19obj-$(CONFIG_AUDIT) += lsm_audit.o 19obj-$(CONFIG_AUDIT) += lsm_audit.o
20obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o 20obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o
21obj-$(CONFIG_SECURITY_ROOTPLUG) += root_plug.o
22obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o 21obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
23 22
24# Object integrity file lists 23# Object integrity file lists
diff --git a/security/capability.c b/security/capability.c
index fce07a7bc825..5c700e1a4fd3 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -308,6 +308,22 @@ static int cap_path_truncate(struct path *path, loff_t length,
308{ 308{
309 return 0; 309 return 0;
310} 310}
311
312static int cap_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
313 mode_t mode)
314{
315 return 0;
316}
317
318static int cap_path_chown(struct path *path, uid_t uid, gid_t gid)
319{
320 return 0;
321}
322
323static int cap_path_chroot(struct path *root)
324{
325 return 0;
326}
311#endif 327#endif
312 328
313static int cap_file_permission(struct file *file, int mask) 329static int cap_file_permission(struct file *file, int mask)
@@ -405,7 +421,7 @@ static int cap_kernel_create_files_as(struct cred *new, struct inode *inode)
405 return 0; 421 return 0;
406} 422}
407 423
408static int cap_kernel_module_request(void) 424static int cap_kernel_module_request(char *kmod_name)
409{ 425{
410 return 0; 426 return 0;
411} 427}
@@ -977,6 +993,9 @@ void security_fixup_ops(struct security_operations *ops)
977 set_to_cap_if_null(ops, path_link); 993 set_to_cap_if_null(ops, path_link);
978 set_to_cap_if_null(ops, path_rename); 994 set_to_cap_if_null(ops, path_rename);
979 set_to_cap_if_null(ops, path_truncate); 995 set_to_cap_if_null(ops, path_truncate);
996 set_to_cap_if_null(ops, path_chmod);
997 set_to_cap_if_null(ops, path_chown);
998 set_to_cap_if_null(ops, path_chroot);
980#endif 999#endif
981 set_to_cap_if_null(ops, file_permission); 1000 set_to_cap_if_null(ops, file_permission);
982 set_to_cap_if_null(ops, file_alloc_security); 1001 set_to_cap_if_null(ops, file_alloc_security);
diff --git a/security/commoncap.c b/security/commoncap.c
index fe30751a6cd9..f800fdb3de94 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -1,4 +1,4 @@
1/* Common capabilities, needed by capability.o and root_plug.o 1/* Common capabilities, needed by capability.o.
2 * 2 *
3 * This program is free software; you can redistribute it and/or modify 3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by 4 * it under the terms of the GNU General Public License as published by
@@ -173,7 +173,6 @@ int cap_capget(struct task_struct *target, kernel_cap_t *effective,
173 */ 173 */
174static inline int cap_inh_is_capped(void) 174static inline int cap_inh_is_capped(void)
175{ 175{
176#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
177 176
178 /* they are so limited unless the current task has the CAP_SETPCAP 177 /* they are so limited unless the current task has the CAP_SETPCAP
179 * capability 178 * capability
@@ -181,7 +180,6 @@ static inline int cap_inh_is_capped(void)
181 if (cap_capable(current, current_cred(), CAP_SETPCAP, 180 if (cap_capable(current, current_cred(), CAP_SETPCAP,
182 SECURITY_CAP_AUDIT) == 0) 181 SECURITY_CAP_AUDIT) == 0)
183 return 0; 182 return 0;
184#endif
185 return 1; 183 return 1;
186} 184}
187 185
@@ -239,8 +237,6 @@ static inline void bprm_clear_caps(struct linux_binprm *bprm)
239 bprm->cap_effective = false; 237 bprm->cap_effective = false;
240} 238}
241 239
242#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
243
244/** 240/**
245 * cap_inode_need_killpriv - Determine if inode change affects privileges 241 * cap_inode_need_killpriv - Determine if inode change affects privileges
246 * @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV 242 * @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV
@@ -421,49 +417,6 @@ out:
421 return rc; 417 return rc;
422} 418}
423 419
424#else
425int cap_inode_need_killpriv(struct dentry *dentry)
426{
427 return 0;
428}
429
430int cap_inode_killpriv(struct dentry *dentry)
431{
432 return 0;
433}
434
435int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps)
436{
437 memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
438 return -ENODATA;
439}
440
441static inline int get_file_caps(struct linux_binprm *bprm, bool *effective)
442{
443 bprm_clear_caps(bprm);
444 return 0;
445}
446#endif
447
448/*
449 * Determine whether a exec'ing process's new permitted capabilities should be
450 * limited to just what it already has.
451 *
452 * This prevents processes that are being ptraced from gaining access to
453 * CAP_SETPCAP, unless the process they're tracing already has it, and the
454 * binary they're executing has filecaps that elevate it.
455 *
456 * Returns 1 if they should be limited, 0 if they are not.
457 */
458static inline int cap_limit_ptraced_target(void)
459{
460#ifndef CONFIG_SECURITY_FILE_CAPABILITIES
461 if (capable(CAP_SETPCAP))
462 return 0;
463#endif
464 return 1;
465}
466
467/** 420/**
468 * cap_bprm_set_creds - Set up the proposed credentials for execve(). 421 * cap_bprm_set_creds - Set up the proposed credentials for execve().
469 * @bprm: The execution parameters, including the proposed creds 422 * @bprm: The execution parameters, including the proposed creds
@@ -523,9 +476,8 @@ skip:
523 new->euid = new->uid; 476 new->euid = new->uid;
524 new->egid = new->gid; 477 new->egid = new->gid;
525 } 478 }
526 if (cap_limit_ptraced_target()) 479 new->cap_permitted = cap_intersect(new->cap_permitted,
527 new->cap_permitted = cap_intersect(new->cap_permitted, 480 old->cap_permitted);
528 old->cap_permitted);
529 } 481 }
530 482
531 new->suid = new->fsuid = new->euid; 483 new->suid = new->fsuid = new->euid;
@@ -739,7 +691,6 @@ int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
739 return 0; 691 return 0;
740} 692}
741 693
742#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
743/* 694/*
744 * Rationale: code calling task_setscheduler, task_setioprio, and 695 * Rationale: code calling task_setscheduler, task_setioprio, and
745 * task_setnice, assumes that 696 * task_setnice, assumes that
@@ -820,22 +771,6 @@ static long cap_prctl_drop(struct cred *new, unsigned long cap)
820 return 0; 771 return 0;
821} 772}
822 773
823#else
824int cap_task_setscheduler (struct task_struct *p, int policy,
825 struct sched_param *lp)
826{
827 return 0;
828}
829int cap_task_setioprio (struct task_struct *p, int ioprio)
830{
831 return 0;
832}
833int cap_task_setnice (struct task_struct *p, int nice)
834{
835 return 0;
836}
837#endif
838
839/** 774/**
840 * cap_task_prctl - Implement process control functions for this security module 775 * cap_task_prctl - Implement process control functions for this security module
841 * @option: The process control function requested 776 * @option: The process control function requested
@@ -866,7 +801,6 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
866 error = !!cap_raised(new->cap_bset, arg2); 801 error = !!cap_raised(new->cap_bset, arg2);
867 goto no_change; 802 goto no_change;
868 803
869#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
870 case PR_CAPBSET_DROP: 804 case PR_CAPBSET_DROP:
871 error = cap_prctl_drop(new, arg2); 805 error = cap_prctl_drop(new, arg2);
872 if (error < 0) 806 if (error < 0)
@@ -917,8 +851,6 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
917 error = new->securebits; 851 error = new->securebits;
918 goto no_change; 852 goto no_change;
919 853
920#endif /* def CONFIG_SECURITY_FILE_CAPABILITIES */
921
922 case PR_GET_KEEPCAPS: 854 case PR_GET_KEEPCAPS:
923 if (issecure(SECURE_KEEP_CAPS)) 855 if (issecure(SECURE_KEEP_CAPS))
924 error = 1; 856 error = 1;
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 53d9764e8f09..3d7846de8069 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -3,6 +3,7 @@
3config IMA 3config IMA
4 bool "Integrity Measurement Architecture(IMA)" 4 bool "Integrity Measurement Architecture(IMA)"
5 depends on ACPI 5 depends on ACPI
6 depends on SECURITY
6 select SECURITYFS 7 select SECURITYFS
7 select CRYPTO 8 select CRYPTO
8 select CRYPTO_HMAC 9 select CRYPTO_HMAC
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index e04566a2c4e5..acba3dfc8d29 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -354,6 +354,10 @@ static void dump_common_audit_data(struct audit_buffer *ab,
354 } 354 }
355 break; 355 break;
356#endif 356#endif
357 case LSM_AUDIT_DATA_KMOD:
358 audit_log_format(ab, " kmod=");
359 audit_log_untrustedstring(ab, a->u.kmod_name);
360 break;
357 } /* switch (a->type) */ 361 } /* switch (a->type) */
358} 362}
359 363
diff --git a/security/min_addr.c b/security/min_addr.c
index c844eed7915d..fc43c9d37084 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -33,6 +33,9 @@ int mmap_min_addr_handler(struct ctl_table *table, int write,
33{ 33{
34 int ret; 34 int ret;
35 35
36 if (!capable(CAP_SYS_RAWIO))
37 return -EPERM;
38
36 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 39 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
37 40
38 update_mmap_min_addr(); 41 update_mmap_min_addr();
diff --git a/security/root_plug.c b/security/root_plug.c
deleted file mode 100644
index 2f7ffa67c4d2..000000000000
--- a/security/root_plug.c
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * Root Plug sample LSM module
3 *
4 * Originally written for a Linux Journal.
5 *
6 * Copyright (C) 2002 Greg Kroah-Hartman <greg@kroah.com>
7 *
8 * Prevents any programs running with egid == 0 if a specific USB device
9 * is not present in the system. Yes, it can be gotten around, but is a
10 * nice starting point for people to play with, and learn the LSM
11 * interface.
12 *
13 * If you want to turn this into something with a semblance of security,
14 * you need to hook the task_* functions also.
15 *
16 * See http://www.linuxjournal.com/article.php?sid=6279 for more information
17 * about this code.
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License as
21 * published by the Free Software Foundation, version 2 of the
22 * License.
23 */
24
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/security.h>
28#include <linux/usb.h>
29#include <linux/moduleparam.h>
30
31/* default is a generic type of usb to serial converter */
32static int vendor_id = 0x0557;
33static int product_id = 0x2008;
34
35module_param(vendor_id, uint, 0400);
36module_param(product_id, uint, 0400);
37
38/* should we print out debug messages */
39static int debug = 0;
40
41module_param(debug, bool, 0600);
42
43#define MY_NAME "root_plug"
44
45#define root_dbg(fmt, arg...) \
46 do { \
47 if (debug) \
48 printk(KERN_DEBUG "%s: %s: " fmt , \
49 MY_NAME , __func__ , \
50 ## arg); \
51 } while (0)
52
53static int rootplug_bprm_check_security (struct linux_binprm *bprm)
54{
55 struct usb_device *dev;
56
57 root_dbg("file %s, e_uid = %d, e_gid = %d\n",
58 bprm->filename, bprm->cred->euid, bprm->cred->egid);
59
60 if (bprm->cred->egid == 0) {
61 dev = usb_find_device(vendor_id, product_id);
62 if (!dev) {
63 root_dbg("e_gid = 0, and device not found, "
64 "task not allowed to run...\n");
65 return -EPERM;
66 }
67 usb_put_dev(dev);
68 }
69
70 return 0;
71}
72
73static struct security_operations rootplug_security_ops = {
74 .bprm_check_security = rootplug_bprm_check_security,
75};
76
77static int __init rootplug_init (void)
78{
79 /* register ourselves with the security framework */
80 if (register_security (&rootplug_security_ops)) {
81 printk (KERN_INFO
82 "Failure registering Root Plug module with the kernel\n");
83 return -EINVAL;
84 }
85 printk (KERN_INFO "Root Plug module initialized, "
86 "vendor_id = %4.4x, product id = %4.4x\n", vendor_id, product_id);
87 return 0;
88}
89
90security_initcall (rootplug_init);
diff --git a/security/security.c b/security/security.c
index c4c673240c1c..24e060be9fa5 100644
--- a/security/security.c
+++ b/security/security.c
@@ -16,9 +16,11 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/security.h> 18#include <linux/security.h>
19#include <linux/ima.h>
19 20
20/* Boot-time LSM user choice */ 21/* Boot-time LSM user choice */
21static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1]; 22static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
23 CONFIG_DEFAULT_SECURITY;
22 24
23/* things that live in capability.c */ 25/* things that live in capability.c */
24extern struct security_operations default_security_ops; 26extern struct security_operations default_security_ops;
@@ -79,8 +81,10 @@ __setup("security=", choose_lsm);
79 * 81 *
80 * Return true if: 82 * Return true if:
81 * -The passed LSM is the one chosen by user at boot time, 83 * -The passed LSM is the one chosen by user at boot time,
82 * -or user didn't specify a specific LSM and we're the first to ask 84 * -or the passed LSM is configured as the default and the user did not
83 * for registration permission, 85 * choose an alternate LSM at boot time,
86 * -or there is no default LSM set and the user didn't specify a
87 * specific LSM and we're the first to ask for registration permission,
84 * -or the passed LSM is currently loaded. 88 * -or the passed LSM is currently loaded.
85 * Otherwise, return false. 89 * Otherwise, return false.
86 */ 90 */
@@ -235,7 +239,12 @@ int security_bprm_set_creds(struct linux_binprm *bprm)
235 239
236int security_bprm_check(struct linux_binprm *bprm) 240int security_bprm_check(struct linux_binprm *bprm)
237{ 241{
238 return security_ops->bprm_check_security(bprm); 242 int ret;
243
244 ret = security_ops->bprm_check_security(bprm);
245 if (ret)
246 return ret;
247 return ima_bprm_check(bprm);
239} 248}
240 249
241void security_bprm_committing_creds(struct linux_binprm *bprm) 250void security_bprm_committing_creds(struct linux_binprm *bprm)
@@ -352,12 +361,21 @@ EXPORT_SYMBOL(security_sb_parse_opts_str);
352 361
353int security_inode_alloc(struct inode *inode) 362int security_inode_alloc(struct inode *inode)
354{ 363{
364 int ret;
365
355 inode->i_security = NULL; 366 inode->i_security = NULL;
356 return security_ops->inode_alloc_security(inode); 367 ret = security_ops->inode_alloc_security(inode);
368 if (ret)
369 return ret;
370 ret = ima_inode_alloc(inode);
371 if (ret)
372 security_inode_free(inode);
373 return ret;
357} 374}
358 375
359void security_inode_free(struct inode *inode) 376void security_inode_free(struct inode *inode)
360{ 377{
378 ima_inode_free(inode);
361 security_ops->inode_free_security(inode); 379 security_ops->inode_free_security(inode);
362} 380}
363 381
@@ -434,6 +452,26 @@ int security_path_truncate(struct path *path, loff_t length,
434 return 0; 452 return 0;
435 return security_ops->path_truncate(path, length, time_attrs); 453 return security_ops->path_truncate(path, length, time_attrs);
436} 454}
455
456int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
457 mode_t mode)
458{
459 if (unlikely(IS_PRIVATE(dentry->d_inode)))
460 return 0;
461 return security_ops->path_chmod(dentry, mnt, mode);
462}
463
464int security_path_chown(struct path *path, uid_t uid, gid_t gid)
465{
466 if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
467 return 0;
468 return security_ops->path_chown(path, uid, gid);
469}
470
471int security_path_chroot(struct path *path)
472{
473 return security_ops->path_chroot(path);
474}
437#endif 475#endif
438 476
439int security_inode_create(struct inode *dir, struct dentry *dentry, int mode) 477int security_inode_create(struct inode *dir, struct dentry *dentry, int mode)
@@ -628,6 +666,8 @@ int security_file_alloc(struct file *file)
628void security_file_free(struct file *file) 666void security_file_free(struct file *file)
629{ 667{
630 security_ops->file_free_security(file); 668 security_ops->file_free_security(file);
669 if (file->f_dentry)
670 ima_file_free(file);
631} 671}
632 672
633int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 673int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -639,7 +679,12 @@ int security_file_mmap(struct file *file, unsigned long reqprot,
639 unsigned long prot, unsigned long flags, 679 unsigned long prot, unsigned long flags,
640 unsigned long addr, unsigned long addr_only) 680 unsigned long addr, unsigned long addr_only)
641{ 681{
642 return security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only); 682 int ret;
683
684 ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only);
685 if (ret)
686 return ret;
687 return ima_file_mmap(file, prot);
643} 688}
644 689
645int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, 690int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
@@ -719,9 +764,9 @@ int security_kernel_create_files_as(struct cred *new, struct inode *inode)
719 return security_ops->kernel_create_files_as(new, inode); 764 return security_ops->kernel_create_files_as(new, inode);
720} 765}
721 766
722int security_kernel_module_request(void) 767int security_kernel_module_request(char *kmod_name)
723{ 768{
724 return security_ops->kernel_module_request(); 769 return security_ops->kernel_module_request(kmod_name);
725} 770}
726 771
727int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) 772int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
diff --git a/security/selinux/.gitignore b/security/selinux/.gitignore
new file mode 100644
index 000000000000..2e5040a3d48b
--- /dev/null
+++ b/security/selinux/.gitignore
@@ -0,0 +1,2 @@
1av_permissions.h
2flask.h
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index d47fc5e545e0..f013982df417 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -18,5 +18,13 @@ selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
18 18
19selinux-$(CONFIG_NETLABEL) += netlabel.o 19selinux-$(CONFIG_NETLABEL) += netlabel.o
20 20
21EXTRA_CFLAGS += -Isecurity/selinux/include 21EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
22 22
23$(obj)/avc.o: $(obj)/flask.h
24
25quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
26 cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
27
28targets += flask.h
29$(obj)/flask.h: $(src)/include/classmap.h FORCE
30 $(call if_changed,flask)
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index b4b5da1c0a42..f2dde268165a 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -31,43 +31,7 @@
31#include <net/ipv6.h> 31#include <net/ipv6.h>
32#include "avc.h" 32#include "avc.h"
33#include "avc_ss.h" 33#include "avc_ss.h"
34 34#include "classmap.h"
35static const struct av_perm_to_string av_perm_to_string[] = {
36#define S_(c, v, s) { c, v, s },
37#include "av_perm_to_string.h"
38#undef S_
39};
40
41static const char *class_to_string[] = {
42#define S_(s) s,
43#include "class_to_string.h"
44#undef S_
45};
46
47#define TB_(s) static const char *s[] = {
48#define TE_(s) };
49#define S_(s) s,
50#include "common_perm_to_string.h"
51#undef TB_
52#undef TE_
53#undef S_
54
55static const struct av_inherit av_inherit[] = {
56#define S_(c, i, b) { .tclass = c,\
57 .common_pts = common_##i##_perm_to_string,\
58 .common_base = b },
59#include "av_inherit.h"
60#undef S_
61};
62
63const struct selinux_class_perm selinux_class_perm = {
64 .av_perm_to_string = av_perm_to_string,
65 .av_pts_len = ARRAY_SIZE(av_perm_to_string),
66 .class_to_string = class_to_string,
67 .cts_len = ARRAY_SIZE(class_to_string),
68 .av_inherit = av_inherit,
69 .av_inherit_len = ARRAY_SIZE(av_inherit)
70};
71 35
72#define AVC_CACHE_SLOTS 512 36#define AVC_CACHE_SLOTS 512
73#define AVC_DEF_CACHE_THRESHOLD 512 37#define AVC_DEF_CACHE_THRESHOLD 512
@@ -139,52 +103,28 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
139 */ 103 */
140static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) 104static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
141{ 105{
142 const char **common_pts = NULL; 106 const char **perms;
143 u32 common_base = 0; 107 int i, perm;
144 int i, i2, perm;
145 108
146 if (av == 0) { 109 if (av == 0) {
147 audit_log_format(ab, " null"); 110 audit_log_format(ab, " null");
148 return; 111 return;
149 } 112 }
150 113
151 for (i = 0; i < ARRAY_SIZE(av_inherit); i++) { 114 perms = secclass_map[tclass-1].perms;
152 if (av_inherit[i].tclass == tclass) {
153 common_pts = av_inherit[i].common_pts;
154 common_base = av_inherit[i].common_base;
155 break;
156 }
157 }
158 115
159 audit_log_format(ab, " {"); 116 audit_log_format(ab, " {");
160 i = 0; 117 i = 0;
161 perm = 1; 118 perm = 1;
162 while (perm < common_base) { 119 while (i < (sizeof(av) * 8)) {
163 if (perm & av) { 120 if ((perm & av) && perms[i]) {
164 audit_log_format(ab, " %s", common_pts[i]); 121 audit_log_format(ab, " %s", perms[i]);
165 av &= ~perm; 122 av &= ~perm;
166 } 123 }
167 i++; 124 i++;
168 perm <<= 1; 125 perm <<= 1;
169 } 126 }
170 127
171 while (i < sizeof(av) * 8) {
172 if (perm & av) {
173 for (i2 = 0; i2 < ARRAY_SIZE(av_perm_to_string); i2++) {
174 if ((av_perm_to_string[i2].tclass == tclass) &&
175 (av_perm_to_string[i2].value == perm))
176 break;
177 }
178 if (i2 < ARRAY_SIZE(av_perm_to_string)) {
179 audit_log_format(ab, " %s",
180 av_perm_to_string[i2].name);
181 av &= ~perm;
182 }
183 }
184 i++;
185 perm <<= 1;
186 }
187
188 if (av) 128 if (av)
189 audit_log_format(ab, " 0x%x", av); 129 audit_log_format(ab, " 0x%x", av);
190 130
@@ -219,8 +159,8 @@ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tcla
219 kfree(scontext); 159 kfree(scontext);
220 } 160 }
221 161
222 BUG_ON(tclass >= ARRAY_SIZE(class_to_string) || !class_to_string[tclass]); 162 BUG_ON(tclass >= ARRAY_SIZE(secclass_map));
223 audit_log_format(ab, " tclass=%s", class_to_string[tclass]); 163 audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name);
224} 164}
225 165
226/** 166/**
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 83a4aada0b4c..7a374c2eb043 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -91,7 +91,6 @@
91 91
92#define NUM_SEL_MNT_OPTS 5 92#define NUM_SEL_MNT_OPTS 5
93 93
94extern unsigned int policydb_loaded_version;
95extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); 94extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
96extern struct security_operations *security_ops; 95extern struct security_operations *security_ops;
97 96
@@ -3338,9 +3337,18 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
3338 return 0; 3337 return 0;
3339} 3338}
3340 3339
3341static int selinux_kernel_module_request(void) 3340static int selinux_kernel_module_request(char *kmod_name)
3342{ 3341{
3343 return task_has_system(current, SYSTEM__MODULE_REQUEST); 3342 u32 sid;
3343 struct common_audit_data ad;
3344
3345 sid = task_sid(current);
3346
3347 COMMON_AUDIT_DATA_INIT(&ad, KMOD);
3348 ad.u.kmod_name = kmod_name;
3349
3350 return avc_has_perm(sid, SECINITSID_KERNEL, SECCLASS_SYSTEM,
3351 SYSTEM__MODULE_REQUEST, &ad);
3344} 3352}
3345 3353
3346static int selinux_task_setpgid(struct task_struct *p, pid_t pgid) 3354static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
@@ -4714,10 +4722,7 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
4714 if (err) 4722 if (err)
4715 return err; 4723 return err;
4716 4724
4717 if (policydb_loaded_version >= POLICYDB_VERSION_NLCLASS) 4725 return selinux_nlmsg_perm(sk, skb);
4718 err = selinux_nlmsg_perm(sk, skb);
4719
4720 return err;
4721} 4726}
4722 4727
4723static int selinux_netlink_recv(struct sk_buff *skb, int capability) 4728static int selinux_netlink_recv(struct sk_buff *skb, int capability)
@@ -5830,12 +5835,12 @@ int selinux_disable(void)
5830 selinux_disabled = 1; 5835 selinux_disabled = 1;
5831 selinux_enabled = 0; 5836 selinux_enabled = 0;
5832 5837
5833 /* Try to destroy the avc node cache */
5834 avc_disable();
5835
5836 /* Reset security_ops to the secondary module, dummy or capability. */ 5838 /* Reset security_ops to the secondary module, dummy or capability. */
5837 security_ops = secondary_ops; 5839 security_ops = secondary_ops;
5838 5840
5841 /* Try to destroy the avc node cache */
5842 avc_disable();
5843
5839 /* Unregister netfilter hooks. */ 5844 /* Unregister netfilter hooks. */
5840 selinux_nf_ip_exit(); 5845 selinux_nf_ip_exit();
5841 5846
diff --git a/security/selinux/include/av_inherit.h b/security/selinux/include/av_inherit.h
deleted file mode 100644
index abedcd704dae..000000000000
--- a/security/selinux/include/av_inherit.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2 S_(SECCLASS_DIR, file, 0x00020000UL)
3 S_(SECCLASS_FILE, file, 0x00020000UL)
4 S_(SECCLASS_LNK_FILE, file, 0x00020000UL)
5 S_(SECCLASS_CHR_FILE, file, 0x00020000UL)
6 S_(SECCLASS_BLK_FILE, file, 0x00020000UL)
7 S_(SECCLASS_SOCK_FILE, file, 0x00020000UL)
8 S_(SECCLASS_FIFO_FILE, file, 0x00020000UL)
9 S_(SECCLASS_SOCKET, socket, 0x00400000UL)
10 S_(SECCLASS_TCP_SOCKET, socket, 0x00400000UL)
11 S_(SECCLASS_UDP_SOCKET, socket, 0x00400000UL)
12 S_(SECCLASS_RAWIP_SOCKET, socket, 0x00400000UL)
13 S_(SECCLASS_NETLINK_SOCKET, socket, 0x00400000UL)
14 S_(SECCLASS_PACKET_SOCKET, socket, 0x00400000UL)
15 S_(SECCLASS_KEY_SOCKET, socket, 0x00400000UL)
16 S_(SECCLASS_UNIX_STREAM_SOCKET, socket, 0x00400000UL)
17 S_(SECCLASS_UNIX_DGRAM_SOCKET, socket, 0x00400000UL)
18 S_(SECCLASS_TUN_SOCKET, socket, 0x00400000UL)
19 S_(SECCLASS_IPC, ipc, 0x00000200UL)
20 S_(SECCLASS_SEM, ipc, 0x00000200UL)
21 S_(SECCLASS_MSGQ, ipc, 0x00000200UL)
22 S_(SECCLASS_SHM, ipc, 0x00000200UL)
23 S_(SECCLASS_NETLINK_ROUTE_SOCKET, socket, 0x00400000UL)
24 S_(SECCLASS_NETLINK_FIREWALL_SOCKET, socket, 0x00400000UL)
25 S_(SECCLASS_NETLINK_TCPDIAG_SOCKET, socket, 0x00400000UL)
26 S_(SECCLASS_NETLINK_NFLOG_SOCKET, socket, 0x00400000UL)
27 S_(SECCLASS_NETLINK_XFRM_SOCKET, socket, 0x00400000UL)
28 S_(SECCLASS_NETLINK_SELINUX_SOCKET, socket, 0x00400000UL)
29 S_(SECCLASS_NETLINK_AUDIT_SOCKET, socket, 0x00400000UL)
30 S_(SECCLASS_NETLINK_IP6FW_SOCKET, socket, 0x00400000UL)
31 S_(SECCLASS_NETLINK_DNRT_SOCKET, socket, 0x00400000UL)
32 S_(SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET, socket, 0x00400000UL)
33 S_(SECCLASS_APPLETALK_SOCKET, socket, 0x00400000UL)
34 S_(SECCLASS_DCCP_SOCKET, socket, 0x00400000UL)
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
deleted file mode 100644
index 2b683ad83d21..000000000000
--- a/security/selinux/include/av_perm_to_string.h
+++ /dev/null
@@ -1,183 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2 S_(SECCLASS_FILESYSTEM, FILESYSTEM__MOUNT, "mount")
3 S_(SECCLASS_FILESYSTEM, FILESYSTEM__REMOUNT, "remount")
4 S_(SECCLASS_FILESYSTEM, FILESYSTEM__UNMOUNT, "unmount")
5 S_(SECCLASS_FILESYSTEM, FILESYSTEM__GETATTR, "getattr")
6 S_(SECCLASS_FILESYSTEM, FILESYSTEM__RELABELFROM, "relabelfrom")
7 S_(SECCLASS_FILESYSTEM, FILESYSTEM__RELABELTO, "relabelto")
8 S_(SECCLASS_FILESYSTEM, FILESYSTEM__TRANSITION, "transition")
9 S_(SECCLASS_FILESYSTEM, FILESYSTEM__ASSOCIATE, "associate")
10 S_(SECCLASS_FILESYSTEM, FILESYSTEM__QUOTAMOD, "quotamod")
11 S_(SECCLASS_FILESYSTEM, FILESYSTEM__QUOTAGET, "quotaget")
12 S_(SECCLASS_DIR, DIR__ADD_NAME, "add_name")
13 S_(SECCLASS_DIR, DIR__REMOVE_NAME, "remove_name")
14 S_(SECCLASS_DIR, DIR__REPARENT, "reparent")
15 S_(SECCLASS_DIR, DIR__SEARCH, "search")
16 S_(SECCLASS_DIR, DIR__RMDIR, "rmdir")
17 S_(SECCLASS_DIR, DIR__OPEN, "open")
18 S_(SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, "execute_no_trans")
19 S_(SECCLASS_FILE, FILE__ENTRYPOINT, "entrypoint")
20 S_(SECCLASS_FILE, FILE__EXECMOD, "execmod")
21 S_(SECCLASS_FILE, FILE__OPEN, "open")
22 S_(SECCLASS_CHR_FILE, CHR_FILE__EXECUTE_NO_TRANS, "execute_no_trans")
23 S_(SECCLASS_CHR_FILE, CHR_FILE__ENTRYPOINT, "entrypoint")
24 S_(SECCLASS_CHR_FILE, CHR_FILE__EXECMOD, "execmod")
25 S_(SECCLASS_CHR_FILE, CHR_FILE__OPEN, "open")
26 S_(SECCLASS_BLK_FILE, BLK_FILE__OPEN, "open")
27 S_(SECCLASS_SOCK_FILE, SOCK_FILE__OPEN, "open")
28 S_(SECCLASS_FIFO_FILE, FIFO_FILE__OPEN, "open")
29 S_(SECCLASS_FD, FD__USE, "use")
30 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__CONNECTTO, "connectto")
31 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NEWCONN, "newconn")
32 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__ACCEPTFROM, "acceptfrom")
33 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NODE_BIND, "node_bind")
34 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NAME_CONNECT, "name_connect")
35 S_(SECCLASS_UDP_SOCKET, UDP_SOCKET__NODE_BIND, "node_bind")
36 S_(SECCLASS_RAWIP_SOCKET, RAWIP_SOCKET__NODE_BIND, "node_bind")
37 S_(SECCLASS_NODE, NODE__TCP_RECV, "tcp_recv")
38 S_(SECCLASS_NODE, NODE__TCP_SEND, "tcp_send")
39 S_(SECCLASS_NODE, NODE__UDP_RECV, "udp_recv")
40 S_(SECCLASS_NODE, NODE__UDP_SEND, "udp_send")
41 S_(SECCLASS_NODE, NODE__RAWIP_RECV, "rawip_recv")
42 S_(SECCLASS_NODE, NODE__RAWIP_SEND, "rawip_send")
43 S_(SECCLASS_NODE, NODE__ENFORCE_DEST, "enforce_dest")
44 S_(SECCLASS_NODE, NODE__DCCP_RECV, "dccp_recv")
45 S_(SECCLASS_NODE, NODE__DCCP_SEND, "dccp_send")
46 S_(SECCLASS_NODE, NODE__RECVFROM, "recvfrom")
47 S_(SECCLASS_NODE, NODE__SENDTO, "sendto")
48 S_(SECCLASS_NETIF, NETIF__TCP_RECV, "tcp_recv")
49 S_(SECCLASS_NETIF, NETIF__TCP_SEND, "tcp_send")
50 S_(SECCLASS_NETIF, NETIF__UDP_RECV, "udp_recv")
51 S_(SECCLASS_NETIF, NETIF__UDP_SEND, "udp_send")
52 S_(SECCLASS_NETIF, NETIF__RAWIP_RECV, "rawip_recv")
53 S_(SECCLASS_NETIF, NETIF__RAWIP_SEND, "rawip_send")
54 S_(SECCLASS_NETIF, NETIF__DCCP_RECV, "dccp_recv")
55 S_(SECCLASS_NETIF, NETIF__DCCP_SEND, "dccp_send")
56 S_(SECCLASS_NETIF, NETIF__INGRESS, "ingress")
57 S_(SECCLASS_NETIF, NETIF__EGRESS, "egress")
58 S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__CONNECTTO, "connectto")
59 S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__NEWCONN, "newconn")
60 S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__ACCEPTFROM, "acceptfrom")
61 S_(SECCLASS_PROCESS, PROCESS__FORK, "fork")
62 S_(SECCLASS_PROCESS, PROCESS__TRANSITION, "transition")
63 S_(SECCLASS_PROCESS, PROCESS__SIGCHLD, "sigchld")
64 S_(SECCLASS_PROCESS, PROCESS__SIGKILL, "sigkill")
65 S_(SECCLASS_PROCESS, PROCESS__SIGSTOP, "sigstop")
66 S_(SECCLASS_PROCESS, PROCESS__SIGNULL, "signull")
67 S_(SECCLASS_PROCESS, PROCESS__SIGNAL, "signal")
68 S_(SECCLASS_PROCESS, PROCESS__PTRACE, "ptrace")
69 S_(SECCLASS_PROCESS, PROCESS__GETSCHED, "getsched")
70 S_(SECCLASS_PROCESS, PROCESS__SETSCHED, "setsched")
71 S_(SECCLASS_PROCESS, PROCESS__GETSESSION, "getsession")
72 S_(SECCLASS_PROCESS, PROCESS__GETPGID, "getpgid")
73 S_(SECCLASS_PROCESS, PROCESS__SETPGID, "setpgid")
74 S_(SECCLASS_PROCESS, PROCESS__GETCAP, "getcap")
75 S_(SECCLASS_PROCESS, PROCESS__SETCAP, "setcap")
76 S_(SECCLASS_PROCESS, PROCESS__SHARE, "share")
77 S_(SECCLASS_PROCESS, PROCESS__GETATTR, "getattr")
78 S_(SECCLASS_PROCESS, PROCESS__SETEXEC, "setexec")
79 S_(SECCLASS_PROCESS, PROCESS__SETFSCREATE, "setfscreate")
80 S_(SECCLASS_PROCESS, PROCESS__NOATSECURE, "noatsecure")
81 S_(SECCLASS_PROCESS, PROCESS__SIGINH, "siginh")
82 S_(SECCLASS_PROCESS, PROCESS__SETRLIMIT, "setrlimit")
83 S_(SECCLASS_PROCESS, PROCESS__RLIMITINH, "rlimitinh")
84 S_(SECCLASS_PROCESS, PROCESS__DYNTRANSITION, "dyntransition")
85 S_(SECCLASS_PROCESS, PROCESS__SETCURRENT, "setcurrent")
86 S_(SECCLASS_PROCESS, PROCESS__EXECMEM, "execmem")
87 S_(SECCLASS_PROCESS, PROCESS__EXECSTACK, "execstack")
88 S_(SECCLASS_PROCESS, PROCESS__EXECHEAP, "execheap")
89 S_(SECCLASS_PROCESS, PROCESS__SETKEYCREATE, "setkeycreate")
90 S_(SECCLASS_PROCESS, PROCESS__SETSOCKCREATE, "setsockcreate")
91 S_(SECCLASS_MSGQ, MSGQ__ENQUEUE, "enqueue")
92 S_(SECCLASS_MSG, MSG__SEND, "send")
93 S_(SECCLASS_MSG, MSG__RECEIVE, "receive")
94 S_(SECCLASS_SHM, SHM__LOCK, "lock")
95 S_(SECCLASS_SECURITY, SECURITY__COMPUTE_AV, "compute_av")
96 S_(SECCLASS_SECURITY, SECURITY__COMPUTE_CREATE, "compute_create")
97 S_(SECCLASS_SECURITY, SECURITY__COMPUTE_MEMBER, "compute_member")
98 S_(SECCLASS_SECURITY, SECURITY__CHECK_CONTEXT, "check_context")
99 S_(SECCLASS_SECURITY, SECURITY__LOAD_POLICY, "load_policy")
100 S_(SECCLASS_SECURITY, SECURITY__COMPUTE_RELABEL, "compute_relabel")
101 S_(SECCLASS_SECURITY, SECURITY__COMPUTE_USER, "compute_user")
102 S_(SECCLASS_SECURITY, SECURITY__SETENFORCE, "setenforce")
103 S_(SECCLASS_SECURITY, SECURITY__SETBOOL, "setbool")
104 S_(SECCLASS_SECURITY, SECURITY__SETSECPARAM, "setsecparam")
105 S_(SECCLASS_SECURITY, SECURITY__SETCHECKREQPROT, "setcheckreqprot")
106 S_(SECCLASS_SYSTEM, SYSTEM__IPC_INFO, "ipc_info")
107 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_READ, "syslog_read")
108 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_MOD, "syslog_mod")
109 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_CONSOLE, "syslog_console")
110 S_(SECCLASS_SYSTEM, SYSTEM__MODULE_REQUEST, "module_request")
111 S_(SECCLASS_CAPABILITY, CAPABILITY__CHOWN, "chown")
112 S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_OVERRIDE, "dac_override")
113 S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_READ_SEARCH, "dac_read_search")
114 S_(SECCLASS_CAPABILITY, CAPABILITY__FOWNER, "fowner")
115 S_(SECCLASS_CAPABILITY, CAPABILITY__FSETID, "fsetid")
116 S_(SECCLASS_CAPABILITY, CAPABILITY__KILL, "kill")
117 S_(SECCLASS_CAPABILITY, CAPABILITY__SETGID, "setgid")
118 S_(SECCLASS_CAPABILITY, CAPABILITY__SETUID, "setuid")
119 S_(SECCLASS_CAPABILITY, CAPABILITY__SETPCAP, "setpcap")
120 S_(SECCLASS_CAPABILITY, CAPABILITY__LINUX_IMMUTABLE, "linux_immutable")
121 S_(SECCLASS_CAPABILITY, CAPABILITY__NET_BIND_SERVICE, "net_bind_service")
122 S_(SECCLASS_CAPABILITY, CAPABILITY__NET_BROADCAST, "net_broadcast")
123 S_(SECCLASS_CAPABILITY, CAPABILITY__NET_ADMIN, "net_admin")
124 S_(SECCLASS_CAPABILITY, CAPABILITY__NET_RAW, "net_raw")
125 S_(SECCLASS_CAPABILITY, CAPABILITY__IPC_LOCK, "ipc_lock")
126 S_(SECCLASS_CAPABILITY, CAPABILITY__IPC_OWNER, "ipc_owner")
127 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_MODULE, "sys_module")
128 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_RAWIO, "sys_rawio")
129 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_CHROOT, "sys_chroot")
130 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_PTRACE, "sys_ptrace")
131 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_PACCT, "sys_pacct")
132 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_ADMIN, "sys_admin")
133 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_BOOT, "sys_boot")
134 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_NICE, "sys_nice")
135 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_RESOURCE, "sys_resource")
136 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_TIME, "sys_time")
137 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_TTY_CONFIG, "sys_tty_config")
138 S_(SECCLASS_CAPABILITY, CAPABILITY__MKNOD, "mknod")
139 S_(SECCLASS_CAPABILITY, CAPABILITY__LEASE, "lease")
140 S_(SECCLASS_CAPABILITY, CAPABILITY__AUDIT_WRITE, "audit_write")
141 S_(SECCLASS_CAPABILITY, CAPABILITY__AUDIT_CONTROL, "audit_control")
142 S_(SECCLASS_CAPABILITY, CAPABILITY__SETFCAP, "setfcap")
143 S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_OVERRIDE, "mac_override")
144 S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_ADMIN, "mac_admin")
145 S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_READ, "nlmsg_read")
146 S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE, "nlmsg_write")
147 S_(SECCLASS_NETLINK_FIREWALL_SOCKET, NETLINK_FIREWALL_SOCKET__NLMSG_READ, "nlmsg_read")
148 S_(SECCLASS_NETLINK_FIREWALL_SOCKET, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE, "nlmsg_write")
149 S_(SECCLASS_NETLINK_TCPDIAG_SOCKET, NETLINK_TCPDIAG_SOCKET__NLMSG_READ, "nlmsg_read")
150 S_(SECCLASS_NETLINK_TCPDIAG_SOCKET, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE, "nlmsg_write")
151 S_(SECCLASS_NETLINK_XFRM_SOCKET, NETLINK_XFRM_SOCKET__NLMSG_READ, "nlmsg_read")
152 S_(SECCLASS_NETLINK_XFRM_SOCKET, NETLINK_XFRM_SOCKET__NLMSG_WRITE, "nlmsg_write")
153 S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_READ, "nlmsg_read")
154 S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_WRITE, "nlmsg_write")
155 S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_RELAY, "nlmsg_relay")
156 S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_READPRIV, "nlmsg_readpriv")
157 S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT, "nlmsg_tty_audit")
158 S_(SECCLASS_NETLINK_IP6FW_SOCKET, NETLINK_IP6FW_SOCKET__NLMSG_READ, "nlmsg_read")
159 S_(SECCLASS_NETLINK_IP6FW_SOCKET, NETLINK_IP6FW_SOCKET__NLMSG_WRITE, "nlmsg_write")
160 S_(SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, "sendto")
161 S_(SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, "recvfrom")
162 S_(SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, "setcontext")
163 S_(SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, "polmatch")
164 S_(SECCLASS_PACKET, PACKET__SEND, "send")
165 S_(SECCLASS_PACKET, PACKET__RECV, "recv")
166 S_(SECCLASS_PACKET, PACKET__RELABELTO, "relabelto")
167 S_(SECCLASS_PACKET, PACKET__FLOW_IN, "flow_in")
168 S_(SECCLASS_PACKET, PACKET__FLOW_OUT, "flow_out")
169 S_(SECCLASS_PACKET, PACKET__FORWARD_IN, "forward_in")
170 S_(SECCLASS_PACKET, PACKET__FORWARD_OUT, "forward_out")
171 S_(SECCLASS_KEY, KEY__VIEW, "view")
172 S_(SECCLASS_KEY, KEY__READ, "read")
173 S_(SECCLASS_KEY, KEY__WRITE, "write")
174 S_(SECCLASS_KEY, KEY__SEARCH, "search")
175 S_(SECCLASS_KEY, KEY__LINK, "link")
176 S_(SECCLASS_KEY, KEY__SETATTR, "setattr")
177 S_(SECCLASS_KEY, KEY__CREATE, "create")
178 S_(SECCLASS_DCCP_SOCKET, DCCP_SOCKET__NODE_BIND, "node_bind")
179 S_(SECCLASS_DCCP_SOCKET, DCCP_SOCKET__NAME_CONNECT, "name_connect")
180 S_(SECCLASS_MEMPROTECT, MEMPROTECT__MMAP_ZERO, "mmap_zero")
181 S_(SECCLASS_PEER, PEER__RECV, "recv")
182 S_(SECCLASS_KERNEL_SERVICE, KERNEL_SERVICE__USE_AS_OVERRIDE, "use_as_override")
183 S_(SECCLASS_KERNEL_SERVICE, KERNEL_SERVICE__CREATE_FILES_AS, "create_files_as")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
deleted file mode 100644
index 0546d616ccac..000000000000
--- a/security/selinux/include/av_permissions.h
+++ /dev/null
@@ -1,870 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2#define COMMON_FILE__IOCTL 0x00000001UL
3#define COMMON_FILE__READ 0x00000002UL
4#define COMMON_FILE__WRITE 0x00000004UL
5#define COMMON_FILE__CREATE 0x00000008UL
6#define COMMON_FILE__GETATTR 0x00000010UL
7#define COMMON_FILE__SETATTR 0x00000020UL
8#define COMMON_FILE__LOCK 0x00000040UL
9#define COMMON_FILE__RELABELFROM 0x00000080UL
10#define COMMON_FILE__RELABELTO 0x00000100UL
11#define COMMON_FILE__APPEND 0x00000200UL
12#define COMMON_FILE__UNLINK 0x00000400UL
13#define COMMON_FILE__LINK 0x00000800UL
14#define COMMON_FILE__RENAME 0x00001000UL
15#define COMMON_FILE__EXECUTE 0x00002000UL
16#define COMMON_FILE__SWAPON 0x00004000UL
17#define COMMON_FILE__QUOTAON 0x00008000UL
18#define COMMON_FILE__MOUNTON 0x00010000UL
19#define COMMON_SOCKET__IOCTL 0x00000001UL
20#define COMMON_SOCKET__READ 0x00000002UL
21#define COMMON_SOCKET__WRITE 0x00000004UL
22#define COMMON_SOCKET__CREATE 0x00000008UL
23#define COMMON_SOCKET__GETATTR 0x00000010UL
24#define COMMON_SOCKET__SETATTR 0x00000020UL
25#define COMMON_SOCKET__LOCK 0x00000040UL
26#define COMMON_SOCKET__RELABELFROM 0x00000080UL
27#define COMMON_SOCKET__RELABELTO 0x00000100UL
28#define COMMON_SOCKET__APPEND 0x00000200UL
29#define COMMON_SOCKET__BIND 0x00000400UL
30#define COMMON_SOCKET__CONNECT 0x00000800UL
31#define COMMON_SOCKET__LISTEN 0x00001000UL
32#define COMMON_SOCKET__ACCEPT 0x00002000UL
33#define COMMON_SOCKET__GETOPT 0x00004000UL
34#define COMMON_SOCKET__SETOPT 0x00008000UL
35#define COMMON_SOCKET__SHUTDOWN 0x00010000UL
36#define COMMON_SOCKET__RECVFROM 0x00020000UL
37#define COMMON_SOCKET__SENDTO 0x00040000UL
38#define COMMON_SOCKET__RECV_MSG 0x00080000UL
39#define COMMON_SOCKET__SEND_MSG 0x00100000UL
40#define COMMON_SOCKET__NAME_BIND 0x00200000UL
41#define COMMON_IPC__CREATE 0x00000001UL
42#define COMMON_IPC__DESTROY 0x00000002UL
43#define COMMON_IPC__GETATTR 0x00000004UL
44#define COMMON_IPC__SETATTR 0x00000008UL
45#define COMMON_IPC__READ 0x00000010UL
46#define COMMON_IPC__WRITE 0x00000020UL
47#define COMMON_IPC__ASSOCIATE 0x00000040UL
48#define COMMON_IPC__UNIX_READ 0x00000080UL
49#define COMMON_IPC__UNIX_WRITE 0x00000100UL
50#define FILESYSTEM__MOUNT 0x00000001UL
51#define FILESYSTEM__REMOUNT 0x00000002UL
52#define FILESYSTEM__UNMOUNT 0x00000004UL
53#define FILESYSTEM__GETATTR 0x00000008UL
54#define FILESYSTEM__RELABELFROM 0x00000010UL
55#define FILESYSTEM__RELABELTO 0x00000020UL
56#define FILESYSTEM__TRANSITION 0x00000040UL
57#define FILESYSTEM__ASSOCIATE 0x00000080UL
58#define FILESYSTEM__QUOTAMOD 0x00000100UL
59#define FILESYSTEM__QUOTAGET 0x00000200UL
60#define DIR__IOCTL 0x00000001UL
61#define DIR__READ 0x00000002UL
62#define DIR__WRITE 0x00000004UL
63#define DIR__CREATE 0x00000008UL
64#define DIR__GETATTR 0x00000010UL
65#define DIR__SETATTR 0x00000020UL
66#define DIR__LOCK 0x00000040UL
67#define DIR__RELABELFROM 0x00000080UL
68#define DIR__RELABELTO 0x00000100UL
69#define DIR__APPEND 0x00000200UL
70#define DIR__UNLINK 0x00000400UL
71#define DIR__LINK 0x00000800UL
72#define DIR__RENAME 0x00001000UL
73#define DIR__EXECUTE 0x00002000UL
74#define DIR__SWAPON 0x00004000UL
75#define DIR__QUOTAON 0x00008000UL
76#define DIR__MOUNTON 0x00010000UL
77#define DIR__ADD_NAME 0x00020000UL
78#define DIR__REMOVE_NAME 0x00040000UL
79#define DIR__REPARENT 0x00080000UL
80#define DIR__SEARCH 0x00100000UL
81#define DIR__RMDIR 0x00200000UL
82#define DIR__OPEN 0x00400000UL
83#define FILE__IOCTL 0x00000001UL
84#define FILE__READ 0x00000002UL
85#define FILE__WRITE 0x00000004UL
86#define FILE__CREATE 0x00000008UL
87#define FILE__GETATTR 0x00000010UL
88#define FILE__SETATTR 0x00000020UL
89#define FILE__LOCK 0x00000040UL
90#define FILE__RELABELFROM 0x00000080UL
91#define FILE__RELABELTO 0x00000100UL
92#define FILE__APPEND 0x00000200UL
93#define FILE__UNLINK 0x00000400UL
94#define FILE__LINK 0x00000800UL
95#define FILE__RENAME 0x00001000UL
96#define FILE__EXECUTE 0x00002000UL
97#define FILE__SWAPON 0x00004000UL
98#define FILE__QUOTAON 0x00008000UL
99#define FILE__MOUNTON 0x00010000UL
100#define FILE__EXECUTE_NO_TRANS 0x00020000UL
101#define FILE__ENTRYPOINT 0x00040000UL
102#define FILE__EXECMOD 0x00080000UL
103#define FILE__OPEN 0x00100000UL
104#define LNK_FILE__IOCTL 0x00000001UL
105#define LNK_FILE__READ 0x00000002UL
106#define LNK_FILE__WRITE 0x00000004UL
107#define LNK_FILE__CREATE 0x00000008UL
108#define LNK_FILE__GETATTR 0x00000010UL
109#define LNK_FILE__SETATTR 0x00000020UL
110#define LNK_FILE__LOCK 0x00000040UL
111#define LNK_FILE__RELABELFROM 0x00000080UL
112#define LNK_FILE__RELABELTO 0x00000100UL
113#define LNK_FILE__APPEND 0x00000200UL
114#define LNK_FILE__UNLINK 0x00000400UL
115#define LNK_FILE__LINK 0x00000800UL
116#define LNK_FILE__RENAME 0x00001000UL
117#define LNK_FILE__EXECUTE 0x00002000UL
118#define LNK_FILE__SWAPON 0x00004000UL
119#define LNK_FILE__QUOTAON 0x00008000UL
120#define LNK_FILE__MOUNTON 0x00010000UL
121#define CHR_FILE__IOCTL 0x00000001UL
122#define CHR_FILE__READ 0x00000002UL
123#define CHR_FILE__WRITE 0x00000004UL
124#define CHR_FILE__CREATE 0x00000008UL
125#define CHR_FILE__GETATTR 0x00000010UL
126#define CHR_FILE__SETATTR 0x00000020UL
127#define CHR_FILE__LOCK 0x00000040UL
128#define CHR_FILE__RELABELFROM 0x00000080UL
129#define CHR_FILE__RELABELTO 0x00000100UL
130#define CHR_FILE__APPEND 0x00000200UL
131#define CHR_FILE__UNLINK 0x00000400UL
132#define CHR_FILE__LINK 0x00000800UL
133#define CHR_FILE__RENAME 0x00001000UL
134#define CHR_FILE__EXECUTE 0x00002000UL
135#define CHR_FILE__SWAPON 0x00004000UL
136#define CHR_FILE__QUOTAON 0x00008000UL
137#define CHR_FILE__MOUNTON 0x00010000UL
138#define CHR_FILE__EXECUTE_NO_TRANS 0x00020000UL
139#define CHR_FILE__ENTRYPOINT 0x00040000UL
140#define CHR_FILE__EXECMOD 0x00080000UL
141#define CHR_FILE__OPEN 0x00100000UL
142#define BLK_FILE__IOCTL 0x00000001UL
143#define BLK_FILE__READ 0x00000002UL
144#define BLK_FILE__WRITE 0x00000004UL
145#define BLK_FILE__CREATE 0x00000008UL
146#define BLK_FILE__GETATTR 0x00000010UL
147#define BLK_FILE__SETATTR 0x00000020UL
148#define BLK_FILE__LOCK 0x00000040UL
149#define BLK_FILE__RELABELFROM 0x00000080UL
150#define BLK_FILE__RELABELTO 0x00000100UL
151#define BLK_FILE__APPEND 0x00000200UL
152#define BLK_FILE__UNLINK 0x00000400UL
153#define BLK_FILE__LINK 0x00000800UL
154#define BLK_FILE__RENAME 0x00001000UL
155#define BLK_FILE__EXECUTE 0x00002000UL
156#define BLK_FILE__SWAPON 0x00004000UL
157#define BLK_FILE__QUOTAON 0x00008000UL
158#define BLK_FILE__MOUNTON 0x00010000UL
159#define BLK_FILE__OPEN 0x00020000UL
160#define SOCK_FILE__IOCTL 0x00000001UL
161#define SOCK_FILE__READ 0x00000002UL
162#define SOCK_FILE__WRITE 0x00000004UL
163#define SOCK_FILE__CREATE 0x00000008UL
164#define SOCK_FILE__GETATTR 0x00000010UL
165#define SOCK_FILE__SETATTR 0x00000020UL
166#define SOCK_FILE__LOCK 0x00000040UL
167#define SOCK_FILE__RELABELFROM 0x00000080UL
168#define SOCK_FILE__RELABELTO 0x00000100UL
169#define SOCK_FILE__APPEND 0x00000200UL
170#define SOCK_FILE__UNLINK 0x00000400UL
171#define SOCK_FILE__LINK 0x00000800UL
172#define SOCK_FILE__RENAME 0x00001000UL
173#define SOCK_FILE__EXECUTE 0x00002000UL
174#define SOCK_FILE__SWAPON 0x00004000UL
175#define SOCK_FILE__QUOTAON 0x00008000UL
176#define SOCK_FILE__MOUNTON 0x00010000UL
177#define SOCK_FILE__OPEN 0x00020000UL
178#define FIFO_FILE__IOCTL 0x00000001UL
179#define FIFO_FILE__READ 0x00000002UL
180#define FIFO_FILE__WRITE 0x00000004UL
181#define FIFO_FILE__CREATE 0x00000008UL
182#define FIFO_FILE__GETATTR 0x00000010UL
183#define FIFO_FILE__SETATTR 0x00000020UL
184#define FIFO_FILE__LOCK 0x00000040UL
185#define FIFO_FILE__RELABELFROM 0x00000080UL
186#define FIFO_FILE__RELABELTO 0x00000100UL
187#define FIFO_FILE__APPEND 0x00000200UL
188#define FIFO_FILE__UNLINK 0x00000400UL
189#define FIFO_FILE__LINK 0x00000800UL
190#define FIFO_FILE__RENAME 0x00001000UL
191#define FIFO_FILE__EXECUTE 0x00002000UL
192#define FIFO_FILE__SWAPON 0x00004000UL
193#define FIFO_FILE__QUOTAON 0x00008000UL
194#define FIFO_FILE__MOUNTON 0x00010000UL
195#define FIFO_FILE__OPEN 0x00020000UL
196#define FD__USE 0x00000001UL
197#define SOCKET__IOCTL 0x00000001UL
198#define SOCKET__READ 0x00000002UL
199#define SOCKET__WRITE 0x00000004UL
200#define SOCKET__CREATE 0x00000008UL
201#define SOCKET__GETATTR 0x00000010UL
202#define SOCKET__SETATTR 0x00000020UL
203#define SOCKET__LOCK 0x00000040UL
204#define SOCKET__RELABELFROM 0x00000080UL
205#define SOCKET__RELABELTO 0x00000100UL
206#define SOCKET__APPEND 0x00000200UL
207#define SOCKET__BIND 0x00000400UL
208#define SOCKET__CONNECT 0x00000800UL
209#define SOCKET__LISTEN 0x00001000UL
210#define SOCKET__ACCEPT 0x00002000UL
211#define SOCKET__GETOPT 0x00004000UL
212#define SOCKET__SETOPT 0x00008000UL
213#define SOCKET__SHUTDOWN 0x00010000UL
214#define SOCKET__RECVFROM 0x00020000UL
215#define SOCKET__SENDTO 0x00040000UL
216#define SOCKET__RECV_MSG 0x00080000UL
217#define SOCKET__SEND_MSG 0x00100000UL
218#define SOCKET__NAME_BIND 0x00200000UL
219#define TCP_SOCKET__IOCTL 0x00000001UL
220#define TCP_SOCKET__READ 0x00000002UL
221#define TCP_SOCKET__WRITE 0x00000004UL
222#define TCP_SOCKET__CREATE 0x00000008UL
223#define TCP_SOCKET__GETATTR 0x00000010UL
224#define TCP_SOCKET__SETATTR 0x00000020UL
225#define TCP_SOCKET__LOCK 0x00000040UL
226#define TCP_SOCKET__RELABELFROM 0x00000080UL
227#define TCP_SOCKET__RELABELTO 0x00000100UL
228#define TCP_SOCKET__APPEND 0x00000200UL
229#define TCP_SOCKET__BIND 0x00000400UL
230#define TCP_SOCKET__CONNECT 0x00000800UL
231#define TCP_SOCKET__LISTEN 0x00001000UL
232#define TCP_SOCKET__ACCEPT 0x00002000UL
233#define TCP_SOCKET__GETOPT 0x00004000UL
234#define TCP_SOCKET__SETOPT 0x00008000UL
235#define TCP_SOCKET__SHUTDOWN 0x00010000UL
236#define TCP_SOCKET__RECVFROM 0x00020000UL
237#define TCP_SOCKET__SENDTO 0x00040000UL
238#define TCP_SOCKET__RECV_MSG 0x00080000UL
239#define TCP_SOCKET__SEND_MSG 0x00100000UL
240#define TCP_SOCKET__NAME_BIND 0x00200000UL
241#define TCP_SOCKET__CONNECTTO 0x00400000UL
242#define TCP_SOCKET__NEWCONN 0x00800000UL
243#define TCP_SOCKET__ACCEPTFROM 0x01000000UL
244#define TCP_SOCKET__NODE_BIND 0x02000000UL
245#define TCP_SOCKET__NAME_CONNECT 0x04000000UL
246#define UDP_SOCKET__IOCTL 0x00000001UL
247#define UDP_SOCKET__READ 0x00000002UL
248#define UDP_SOCKET__WRITE 0x00000004UL
249#define UDP_SOCKET__CREATE 0x00000008UL
250#define UDP_SOCKET__GETATTR 0x00000010UL
251#define UDP_SOCKET__SETATTR 0x00000020UL
252#define UDP_SOCKET__LOCK 0x00000040UL
253#define UDP_SOCKET__RELABELFROM 0x00000080UL
254#define UDP_SOCKET__RELABELTO 0x00000100UL
255#define UDP_SOCKET__APPEND 0x00000200UL
256#define UDP_SOCKET__BIND 0x00000400UL
257#define UDP_SOCKET__CONNECT 0x00000800UL
258#define UDP_SOCKET__LISTEN 0x00001000UL
259#define UDP_SOCKET__ACCEPT 0x00002000UL
260#define UDP_SOCKET__GETOPT 0x00004000UL
261#define UDP_SOCKET__SETOPT 0x00008000UL
262#define UDP_SOCKET__SHUTDOWN 0x00010000UL
263#define UDP_SOCKET__RECVFROM 0x00020000UL
264#define UDP_SOCKET__SENDTO 0x00040000UL
265#define UDP_SOCKET__RECV_MSG 0x00080000UL
266#define UDP_SOCKET__SEND_MSG 0x00100000UL
267#define UDP_SOCKET__NAME_BIND 0x00200000UL
268#define UDP_SOCKET__NODE_BIND 0x00400000UL
269#define RAWIP_SOCKET__IOCTL 0x00000001UL
270#define RAWIP_SOCKET__READ 0x00000002UL
271#define RAWIP_SOCKET__WRITE 0x00000004UL
272#define RAWIP_SOCKET__CREATE 0x00000008UL
273#define RAWIP_SOCKET__GETATTR 0x00000010UL
274#define RAWIP_SOCKET__SETATTR 0x00000020UL
275#define RAWIP_SOCKET__LOCK 0x00000040UL
276#define RAWIP_SOCKET__RELABELFROM 0x00000080UL
277#define RAWIP_SOCKET__RELABELTO 0x00000100UL
278#define RAWIP_SOCKET__APPEND 0x00000200UL
279#define RAWIP_SOCKET__BIND 0x00000400UL
280#define RAWIP_SOCKET__CONNECT 0x00000800UL
281#define RAWIP_SOCKET__LISTEN 0x00001000UL
282#define RAWIP_SOCKET__ACCEPT 0x00002000UL
283#define RAWIP_SOCKET__GETOPT 0x00004000UL
284#define RAWIP_SOCKET__SETOPT 0x00008000UL
285#define RAWIP_SOCKET__SHUTDOWN 0x00010000UL
286#define RAWIP_SOCKET__RECVFROM 0x00020000UL
287#define RAWIP_SOCKET__SENDTO 0x00040000UL
288#define RAWIP_SOCKET__RECV_MSG 0x00080000UL
289#define RAWIP_SOCKET__SEND_MSG 0x00100000UL
290#define RAWIP_SOCKET__NAME_BIND 0x00200000UL
291#define RAWIP_SOCKET__NODE_BIND 0x00400000UL
292#define NODE__TCP_RECV 0x00000001UL
293#define NODE__TCP_SEND 0x00000002UL
294#define NODE__UDP_RECV 0x00000004UL
295#define NODE__UDP_SEND 0x00000008UL
296#define NODE__RAWIP_RECV 0x00000010UL
297#define NODE__RAWIP_SEND 0x00000020UL
298#define NODE__ENFORCE_DEST 0x00000040UL
299#define NODE__DCCP_RECV 0x00000080UL
300#define NODE__DCCP_SEND 0x00000100UL
301#define NODE__RECVFROM 0x00000200UL
302#define NODE__SENDTO 0x00000400UL
303#define NETIF__TCP_RECV 0x00000001UL
304#define NETIF__TCP_SEND 0x00000002UL
305#define NETIF__UDP_RECV 0x00000004UL
306#define NETIF__UDP_SEND 0x00000008UL
307#define NETIF__RAWIP_RECV 0x00000010UL
308#define NETIF__RAWIP_SEND 0x00000020UL
309#define NETIF__DCCP_RECV 0x00000040UL
310#define NETIF__DCCP_SEND 0x00000080UL
311#define NETIF__INGRESS 0x00000100UL
312#define NETIF__EGRESS 0x00000200UL
313#define NETLINK_SOCKET__IOCTL 0x00000001UL
314#define NETLINK_SOCKET__READ 0x00000002UL
315#define NETLINK_SOCKET__WRITE 0x00000004UL
316#define NETLINK_SOCKET__CREATE 0x00000008UL
317#define NETLINK_SOCKET__GETATTR 0x00000010UL
318#define NETLINK_SOCKET__SETATTR 0x00000020UL
319#define NETLINK_SOCKET__LOCK 0x00000040UL
320#define NETLINK_SOCKET__RELABELFROM 0x00000080UL
321#define NETLINK_SOCKET__RELABELTO 0x00000100UL
322#define NETLINK_SOCKET__APPEND 0x00000200UL
323#define NETLINK_SOCKET__BIND 0x00000400UL
324#define NETLINK_SOCKET__CONNECT 0x00000800UL
325#define NETLINK_SOCKET__LISTEN 0x00001000UL
326#define NETLINK_SOCKET__ACCEPT 0x00002000UL
327#define NETLINK_SOCKET__GETOPT 0x00004000UL
328#define NETLINK_SOCKET__SETOPT 0x00008000UL
329#define NETLINK_SOCKET__SHUTDOWN 0x00010000UL
330#define NETLINK_SOCKET__RECVFROM 0x00020000UL
331#define NETLINK_SOCKET__SENDTO 0x00040000UL
332#define NETLINK_SOCKET__RECV_MSG 0x00080000UL
333#define NETLINK_SOCKET__SEND_MSG 0x00100000UL
334#define NETLINK_SOCKET__NAME_BIND 0x00200000UL
335#define PACKET_SOCKET__IOCTL 0x00000001UL
336#define PACKET_SOCKET__READ 0x00000002UL
337#define PACKET_SOCKET__WRITE 0x00000004UL
338#define PACKET_SOCKET__CREATE 0x00000008UL
339#define PACKET_SOCKET__GETATTR 0x00000010UL
340#define PACKET_SOCKET__SETATTR 0x00000020UL
341#define PACKET_SOCKET__LOCK 0x00000040UL
342#define PACKET_SOCKET__RELABELFROM 0x00000080UL
343#define PACKET_SOCKET__RELABELTO 0x00000100UL
344#define PACKET_SOCKET__APPEND 0x00000200UL
345#define PACKET_SOCKET__BIND 0x00000400UL
346#define PACKET_SOCKET__CONNECT 0x00000800UL
347#define PACKET_SOCKET__LISTEN 0x00001000UL
348#define PACKET_SOCKET__ACCEPT 0x00002000UL
349#define PACKET_SOCKET__GETOPT 0x00004000UL
350#define PACKET_SOCKET__SETOPT 0x00008000UL
351#define PACKET_SOCKET__SHUTDOWN 0x00010000UL
352#define PACKET_SOCKET__RECVFROM 0x00020000UL
353#define PACKET_SOCKET__SENDTO 0x00040000UL
354#define PACKET_SOCKET__RECV_MSG 0x00080000UL
355#define PACKET_SOCKET__SEND_MSG 0x00100000UL
356#define PACKET_SOCKET__NAME_BIND 0x00200000UL
357#define KEY_SOCKET__IOCTL 0x00000001UL
358#define KEY_SOCKET__READ 0x00000002UL
359#define KEY_SOCKET__WRITE 0x00000004UL
360#define KEY_SOCKET__CREATE 0x00000008UL
361#define KEY_SOCKET__GETATTR 0x00000010UL
362#define KEY_SOCKET__SETATTR 0x00000020UL
363#define KEY_SOCKET__LOCK 0x00000040UL
364#define KEY_SOCKET__RELABELFROM 0x00000080UL
365#define KEY_SOCKET__RELABELTO 0x00000100UL
366#define KEY_SOCKET__APPEND 0x00000200UL
367#define KEY_SOCKET__BIND 0x00000400UL
368#define KEY_SOCKET__CONNECT 0x00000800UL
369#define KEY_SOCKET__LISTEN 0x00001000UL
370#define KEY_SOCKET__ACCEPT 0x00002000UL
371#define KEY_SOCKET__GETOPT 0x00004000UL
372#define KEY_SOCKET__SETOPT 0x00008000UL
373#define KEY_SOCKET__SHUTDOWN 0x00010000UL
374#define KEY_SOCKET__RECVFROM 0x00020000UL
375#define KEY_SOCKET__SENDTO 0x00040000UL
376#define KEY_SOCKET__RECV_MSG 0x00080000UL
377#define KEY_SOCKET__SEND_MSG 0x00100000UL
378#define KEY_SOCKET__NAME_BIND 0x00200000UL
379#define UNIX_STREAM_SOCKET__IOCTL 0x00000001UL
380#define UNIX_STREAM_SOCKET__READ 0x00000002UL
381#define UNIX_STREAM_SOCKET__WRITE 0x00000004UL
382#define UNIX_STREAM_SOCKET__CREATE 0x00000008UL
383#define UNIX_STREAM_SOCKET__GETATTR 0x00000010UL
384#define UNIX_STREAM_SOCKET__SETATTR 0x00000020UL
385#define UNIX_STREAM_SOCKET__LOCK 0x00000040UL
386#define UNIX_STREAM_SOCKET__RELABELFROM 0x00000080UL
387#define UNIX_STREAM_SOCKET__RELABELTO 0x00000100UL
388#define UNIX_STREAM_SOCKET__APPEND 0x00000200UL
389#define UNIX_STREAM_SOCKET__BIND 0x00000400UL
390#define UNIX_STREAM_SOCKET__CONNECT 0x00000800UL
391#define UNIX_STREAM_SOCKET__LISTEN 0x00001000UL
392#define UNIX_STREAM_SOCKET__ACCEPT 0x00002000UL
393#define UNIX_STREAM_SOCKET__GETOPT 0x00004000UL
394#define UNIX_STREAM_SOCKET__SETOPT 0x00008000UL
395#define UNIX_STREAM_SOCKET__SHUTDOWN 0x00010000UL
396#define UNIX_STREAM_SOCKET__RECVFROM 0x00020000UL
397#define UNIX_STREAM_SOCKET__SENDTO 0x00040000UL
398#define UNIX_STREAM_SOCKET__RECV_MSG 0x00080000UL
399#define UNIX_STREAM_SOCKET__SEND_MSG 0x00100000UL
400#define UNIX_STREAM_SOCKET__NAME_BIND 0x00200000UL
401#define UNIX_STREAM_SOCKET__CONNECTTO 0x00400000UL
402#define UNIX_STREAM_SOCKET__NEWCONN 0x00800000UL
403#define UNIX_STREAM_SOCKET__ACCEPTFROM 0x01000000UL
404#define UNIX_DGRAM_SOCKET__IOCTL 0x00000001UL
405#define UNIX_DGRAM_SOCKET__READ 0x00000002UL
406#define UNIX_DGRAM_SOCKET__WRITE 0x00000004UL
407#define UNIX_DGRAM_SOCKET__CREATE 0x00000008UL
408#define UNIX_DGRAM_SOCKET__GETATTR 0x00000010UL
409#define UNIX_DGRAM_SOCKET__SETATTR 0x00000020UL
410#define UNIX_DGRAM_SOCKET__LOCK 0x00000040UL
411#define UNIX_DGRAM_SOCKET__RELABELFROM 0x00000080UL
412#define UNIX_DGRAM_SOCKET__RELABELTO 0x00000100UL
413#define UNIX_DGRAM_SOCKET__APPEND 0x00000200UL
414#define UNIX_DGRAM_SOCKET__BIND 0x00000400UL
415#define UNIX_DGRAM_SOCKET__CONNECT 0x00000800UL
416#define UNIX_DGRAM_SOCKET__LISTEN 0x00001000UL
417#define UNIX_DGRAM_SOCKET__ACCEPT 0x00002000UL
418#define UNIX_DGRAM_SOCKET__GETOPT 0x00004000UL
419#define UNIX_DGRAM_SOCKET__SETOPT 0x00008000UL
420#define UNIX_DGRAM_SOCKET__SHUTDOWN 0x00010000UL
421#define UNIX_DGRAM_SOCKET__RECVFROM 0x00020000UL
422#define UNIX_DGRAM_SOCKET__SENDTO 0x00040000UL
423#define UNIX_DGRAM_SOCKET__RECV_MSG 0x00080000UL
424#define UNIX_DGRAM_SOCKET__SEND_MSG 0x00100000UL
425#define UNIX_DGRAM_SOCKET__NAME_BIND 0x00200000UL
426#define TUN_SOCKET__IOCTL 0x00000001UL
427#define TUN_SOCKET__READ 0x00000002UL
428#define TUN_SOCKET__WRITE 0x00000004UL
429#define TUN_SOCKET__CREATE 0x00000008UL
430#define TUN_SOCKET__GETATTR 0x00000010UL
431#define TUN_SOCKET__SETATTR 0x00000020UL
432#define TUN_SOCKET__LOCK 0x00000040UL
433#define TUN_SOCKET__RELABELFROM 0x00000080UL
434#define TUN_SOCKET__RELABELTO 0x00000100UL
435#define TUN_SOCKET__APPEND 0x00000200UL
436#define TUN_SOCKET__BIND 0x00000400UL
437#define TUN_SOCKET__CONNECT 0x00000800UL
438#define TUN_SOCKET__LISTEN 0x00001000UL
439#define TUN_SOCKET__ACCEPT 0x00002000UL
440#define TUN_SOCKET__GETOPT 0x00004000UL
441#define TUN_SOCKET__SETOPT 0x00008000UL
442#define TUN_SOCKET__SHUTDOWN 0x00010000UL
443#define TUN_SOCKET__RECVFROM 0x00020000UL
444#define TUN_SOCKET__SENDTO 0x00040000UL
445#define TUN_SOCKET__RECV_MSG 0x00080000UL
446#define TUN_SOCKET__SEND_MSG 0x00100000UL
447#define TUN_SOCKET__NAME_BIND 0x00200000UL
448#define PROCESS__FORK 0x00000001UL
449#define PROCESS__TRANSITION 0x00000002UL
450#define PROCESS__SIGCHLD 0x00000004UL
451#define PROCESS__SIGKILL 0x00000008UL
452#define PROCESS__SIGSTOP 0x00000010UL
453#define PROCESS__SIGNULL 0x00000020UL
454#define PROCESS__SIGNAL 0x00000040UL
455#define PROCESS__PTRACE 0x00000080UL
456#define PROCESS__GETSCHED 0x00000100UL
457#define PROCESS__SETSCHED 0x00000200UL
458#define PROCESS__GETSESSION 0x00000400UL
459#define PROCESS__GETPGID 0x00000800UL
460#define PROCESS__SETPGID 0x00001000UL
461#define PROCESS__GETCAP 0x00002000UL
462#define PROCESS__SETCAP 0x00004000UL
463#define PROCESS__SHARE 0x00008000UL
464#define PROCESS__GETATTR 0x00010000UL
465#define PROCESS__SETEXEC 0x00020000UL
466#define PROCESS__SETFSCREATE 0x00040000UL
467#define PROCESS__NOATSECURE 0x00080000UL
468#define PROCESS__SIGINH 0x00100000UL
469#define PROCESS__SETRLIMIT 0x00200000UL
470#define PROCESS__RLIMITINH 0x00400000UL
471#define PROCESS__DYNTRANSITION 0x00800000UL
472#define PROCESS__SETCURRENT 0x01000000UL
473#define PROCESS__EXECMEM 0x02000000UL
474#define PROCESS__EXECSTACK 0x04000000UL
475#define PROCESS__EXECHEAP 0x08000000UL
476#define PROCESS__SETKEYCREATE 0x10000000UL
477#define PROCESS__SETSOCKCREATE 0x20000000UL
478#define IPC__CREATE 0x00000001UL
479#define IPC__DESTROY 0x00000002UL
480#define IPC__GETATTR 0x00000004UL
481#define IPC__SETATTR 0x00000008UL
482#define IPC__READ 0x00000010UL
483#define IPC__WRITE 0x00000020UL
484#define IPC__ASSOCIATE 0x00000040UL
485#define IPC__UNIX_READ 0x00000080UL
486#define IPC__UNIX_WRITE 0x00000100UL
487#define SEM__CREATE 0x00000001UL
488#define SEM__DESTROY 0x00000002UL
489#define SEM__GETATTR 0x00000004UL
490#define SEM__SETATTR 0x00000008UL
491#define SEM__READ 0x00000010UL
492#define SEM__WRITE 0x00000020UL
493#define SEM__ASSOCIATE 0x00000040UL
494#define SEM__UNIX_READ 0x00000080UL
495#define SEM__UNIX_WRITE 0x00000100UL
496#define MSGQ__CREATE 0x00000001UL
497#define MSGQ__DESTROY 0x00000002UL
498#define MSGQ__GETATTR 0x00000004UL
499#define MSGQ__SETATTR 0x00000008UL
500#define MSGQ__READ 0x00000010UL
501#define MSGQ__WRITE 0x00000020UL
502#define MSGQ__ASSOCIATE 0x00000040UL
503#define MSGQ__UNIX_READ 0x00000080UL
504#define MSGQ__UNIX_WRITE 0x00000100UL
505#define MSGQ__ENQUEUE 0x00000200UL
506#define MSG__SEND 0x00000001UL
507#define MSG__RECEIVE 0x00000002UL
508#define SHM__CREATE 0x00000001UL
509#define SHM__DESTROY 0x00000002UL
510#define SHM__GETATTR 0x00000004UL
511#define SHM__SETATTR 0x00000008UL
512#define SHM__READ 0x00000010UL
513#define SHM__WRITE 0x00000020UL
514#define SHM__ASSOCIATE 0x00000040UL
515#define SHM__UNIX_READ 0x00000080UL
516#define SHM__UNIX_WRITE 0x00000100UL
517#define SHM__LOCK 0x00000200UL
518#define SECURITY__COMPUTE_AV 0x00000001UL
519#define SECURITY__COMPUTE_CREATE 0x00000002UL
520#define SECURITY__COMPUTE_MEMBER 0x00000004UL
521#define SECURITY__CHECK_CONTEXT 0x00000008UL
522#define SECURITY__LOAD_POLICY 0x00000010UL
523#define SECURITY__COMPUTE_RELABEL 0x00000020UL
524#define SECURITY__COMPUTE_USER 0x00000040UL
525#define SECURITY__SETENFORCE 0x00000080UL
526#define SECURITY__SETBOOL 0x00000100UL
527#define SECURITY__SETSECPARAM 0x00000200UL
528#define SECURITY__SETCHECKREQPROT 0x00000400UL
529#define SYSTEM__IPC_INFO 0x00000001UL
530#define SYSTEM__SYSLOG_READ 0x00000002UL
531#define SYSTEM__SYSLOG_MOD 0x00000004UL
532#define SYSTEM__SYSLOG_CONSOLE 0x00000008UL
533#define SYSTEM__MODULE_REQUEST 0x00000010UL
534#define CAPABILITY__CHOWN 0x00000001UL
535#define CAPABILITY__DAC_OVERRIDE 0x00000002UL
536#define CAPABILITY__DAC_READ_SEARCH 0x00000004UL
537#define CAPABILITY__FOWNER 0x00000008UL
538#define CAPABILITY__FSETID 0x00000010UL
539#define CAPABILITY__KILL 0x00000020UL
540#define CAPABILITY__SETGID 0x00000040UL
541#define CAPABILITY__SETUID 0x00000080UL
542#define CAPABILITY__SETPCAP 0x00000100UL
543#define CAPABILITY__LINUX_IMMUTABLE 0x00000200UL
544#define CAPABILITY__NET_BIND_SERVICE 0x00000400UL
545#define CAPABILITY__NET_BROADCAST 0x00000800UL
546#define CAPABILITY__NET_ADMIN 0x00001000UL
547#define CAPABILITY__NET_RAW 0x00002000UL
548#define CAPABILITY__IPC_LOCK 0x00004000UL
549#define CAPABILITY__IPC_OWNER 0x00008000UL
550#define CAPABILITY__SYS_MODULE 0x00010000UL
551#define CAPABILITY__SYS_RAWIO 0x00020000UL
552#define CAPABILITY__SYS_CHROOT 0x00040000UL
553#define CAPABILITY__SYS_PTRACE 0x00080000UL
554#define CAPABILITY__SYS_PACCT 0x00100000UL
555#define CAPABILITY__SYS_ADMIN 0x00200000UL
556#define CAPABILITY__SYS_BOOT 0x00400000UL
557#define CAPABILITY__SYS_NICE 0x00800000UL
558#define CAPABILITY__SYS_RESOURCE 0x01000000UL
559#define CAPABILITY__SYS_TIME 0x02000000UL
560#define CAPABILITY__SYS_TTY_CONFIG 0x04000000UL
561#define CAPABILITY__MKNOD 0x08000000UL
562#define CAPABILITY__LEASE 0x10000000UL
563#define CAPABILITY__AUDIT_WRITE 0x20000000UL
564#define CAPABILITY__AUDIT_CONTROL 0x40000000UL
565#define CAPABILITY__SETFCAP 0x80000000UL
566#define CAPABILITY2__MAC_OVERRIDE 0x00000001UL
567#define CAPABILITY2__MAC_ADMIN 0x00000002UL
568#define NETLINK_ROUTE_SOCKET__IOCTL 0x00000001UL
569#define NETLINK_ROUTE_SOCKET__READ 0x00000002UL
570#define NETLINK_ROUTE_SOCKET__WRITE 0x00000004UL
571#define NETLINK_ROUTE_SOCKET__CREATE 0x00000008UL
572#define NETLINK_ROUTE_SOCKET__GETATTR 0x00000010UL
573#define NETLINK_ROUTE_SOCKET__SETATTR 0x00000020UL
574#define NETLINK_ROUTE_SOCKET__LOCK 0x00000040UL
575#define NETLINK_ROUTE_SOCKET__RELABELFROM 0x00000080UL
576#define NETLINK_ROUTE_SOCKET__RELABELTO 0x00000100UL
577#define NETLINK_ROUTE_SOCKET__APPEND 0x00000200UL
578#define NETLINK_ROUTE_SOCKET__BIND 0x00000400UL
579#define NETLINK_ROUTE_SOCKET__CONNECT 0x00000800UL
580#define NETLINK_ROUTE_SOCKET__LISTEN 0x00001000UL
581#define NETLINK_ROUTE_SOCKET__ACCEPT 0x00002000UL
582#define NETLINK_ROUTE_SOCKET__GETOPT 0x00004000UL
583#define NETLINK_ROUTE_SOCKET__SETOPT 0x00008000UL
584#define NETLINK_ROUTE_SOCKET__SHUTDOWN 0x00010000UL
585#define NETLINK_ROUTE_SOCKET__RECVFROM 0x00020000UL
586#define NETLINK_ROUTE_SOCKET__SENDTO 0x00040000UL
587#define NETLINK_ROUTE_SOCKET__RECV_MSG 0x00080000UL
588#define NETLINK_ROUTE_SOCKET__SEND_MSG 0x00100000UL
589#define NETLINK_ROUTE_SOCKET__NAME_BIND 0x00200000UL
590#define NETLINK_ROUTE_SOCKET__NLMSG_READ 0x00400000UL
591#define NETLINK_ROUTE_SOCKET__NLMSG_WRITE 0x00800000UL
592#define NETLINK_FIREWALL_SOCKET__IOCTL 0x00000001UL
593#define NETLINK_FIREWALL_SOCKET__READ 0x00000002UL
594#define NETLINK_FIREWALL_SOCKET__WRITE 0x00000004UL
595#define NETLINK_FIREWALL_SOCKET__CREATE 0x00000008UL
596#define NETLINK_FIREWALL_SOCKET__GETATTR 0x00000010UL
597#define NETLINK_FIREWALL_SOCKET__SETATTR 0x00000020UL
598#define NETLINK_FIREWALL_SOCKET__LOCK 0x00000040UL
599#define NETLINK_FIREWALL_SOCKET__RELABELFROM 0x00000080UL
600#define NETLINK_FIREWALL_SOCKET__RELABELTO 0x00000100UL
601#define NETLINK_FIREWALL_SOCKET__APPEND 0x00000200UL
602#define NETLINK_FIREWALL_SOCKET__BIND 0x00000400UL
603#define NETLINK_FIREWALL_SOCKET__CONNECT 0x00000800UL
604#define NETLINK_FIREWALL_SOCKET__LISTEN 0x00001000UL
605#define NETLINK_FIREWALL_SOCKET__ACCEPT 0x00002000UL
606#define NETLINK_FIREWALL_SOCKET__GETOPT 0x00004000UL
607#define NETLINK_FIREWALL_SOCKET__SETOPT 0x00008000UL
608#define NETLINK_FIREWALL_SOCKET__SHUTDOWN 0x00010000UL
609#define NETLINK_FIREWALL_SOCKET__RECVFROM 0x00020000UL
610#define NETLINK_FIREWALL_SOCKET__SENDTO 0x00040000UL
611#define NETLINK_FIREWALL_SOCKET__RECV_MSG 0x00080000UL
612#define NETLINK_FIREWALL_SOCKET__SEND_MSG 0x00100000UL
613#define NETLINK_FIREWALL_SOCKET__NAME_BIND 0x00200000UL
614#define NETLINK_FIREWALL_SOCKET__NLMSG_READ 0x00400000UL
615#define NETLINK_FIREWALL_SOCKET__NLMSG_WRITE 0x00800000UL
616#define NETLINK_TCPDIAG_SOCKET__IOCTL 0x00000001UL
617#define NETLINK_TCPDIAG_SOCKET__READ 0x00000002UL
618#define NETLINK_TCPDIAG_SOCKET__WRITE 0x00000004UL
619#define NETLINK_TCPDIAG_SOCKET__CREATE 0x00000008UL
620#define NETLINK_TCPDIAG_SOCKET__GETATTR 0x00000010UL
621#define NETLINK_TCPDIAG_SOCKET__SETATTR 0x00000020UL
622#define NETLINK_TCPDIAG_SOCKET__LOCK 0x00000040UL
623#define NETLINK_TCPDIAG_SOCKET__RELABELFROM 0x00000080UL
624#define NETLINK_TCPDIAG_SOCKET__RELABELTO 0x00000100UL
625#define NETLINK_TCPDIAG_SOCKET__APPEND 0x00000200UL
626#define NETLINK_TCPDIAG_SOCKET__BIND 0x00000400UL
627#define NETLINK_TCPDIAG_SOCKET__CONNECT 0x00000800UL
628#define NETLINK_TCPDIAG_SOCKET__LISTEN 0x00001000UL
629#define NETLINK_TCPDIAG_SOCKET__ACCEPT 0x00002000UL
630#define NETLINK_TCPDIAG_SOCKET__GETOPT 0x00004000UL
631#define NETLINK_TCPDIAG_SOCKET__SETOPT 0x00008000UL
632#define NETLINK_TCPDIAG_SOCKET__SHUTDOWN 0x00010000UL
633#define NETLINK_TCPDIAG_SOCKET__RECVFROM 0x00020000UL
634#define NETLINK_TCPDIAG_SOCKET__SENDTO 0x00040000UL
635#define NETLINK_TCPDIAG_SOCKET__RECV_MSG 0x00080000UL
636#define NETLINK_TCPDIAG_SOCKET__SEND_MSG 0x00100000UL
637#define NETLINK_TCPDIAG_SOCKET__NAME_BIND 0x00200000UL
638#define NETLINK_TCPDIAG_SOCKET__NLMSG_READ 0x00400000UL
639#define NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE 0x00800000UL
640#define NETLINK_NFLOG_SOCKET__IOCTL 0x00000001UL
641#define NETLINK_NFLOG_SOCKET__READ 0x00000002UL
642#define NETLINK_NFLOG_SOCKET__WRITE 0x00000004UL
643#define NETLINK_NFLOG_SOCKET__CREATE 0x00000008UL
644#define NETLINK_NFLOG_SOCKET__GETATTR 0x00000010UL
645#define NETLINK_NFLOG_SOCKET__SETATTR 0x00000020UL
646#define NETLINK_NFLOG_SOCKET__LOCK 0x00000040UL
647#define NETLINK_NFLOG_SOCKET__RELABELFROM 0x00000080UL
648#define NETLINK_NFLOG_SOCKET__RELABELTO 0x00000100UL
649#define NETLINK_NFLOG_SOCKET__APPEND 0x00000200UL
650#define NETLINK_NFLOG_SOCKET__BIND 0x00000400UL
651#define NETLINK_NFLOG_SOCKET__CONNECT 0x00000800UL
652#define NETLINK_NFLOG_SOCKET__LISTEN 0x00001000UL
653#define NETLINK_NFLOG_SOCKET__ACCEPT 0x00002000UL
654#define NETLINK_NFLOG_SOCKET__GETOPT 0x00004000UL
655#define NETLINK_NFLOG_SOCKET__SETOPT 0x00008000UL
656#define NETLINK_NFLOG_SOCKET__SHUTDOWN 0x00010000UL
657#define NETLINK_NFLOG_SOCKET__RECVFROM 0x00020000UL
658#define NETLINK_NFLOG_SOCKET__SENDTO 0x00040000UL
659#define NETLINK_NFLOG_SOCKET__RECV_MSG 0x00080000UL
660#define NETLINK_NFLOG_SOCKET__SEND_MSG 0x00100000UL
661#define NETLINK_NFLOG_SOCKET__NAME_BIND 0x00200000UL
662#define NETLINK_XFRM_SOCKET__IOCTL 0x00000001UL
663#define NETLINK_XFRM_SOCKET__READ 0x00000002UL
664#define NETLINK_XFRM_SOCKET__WRITE 0x00000004UL
665#define NETLINK_XFRM_SOCKET__CREATE 0x00000008UL
666#define NETLINK_XFRM_SOCKET__GETATTR 0x00000010UL
667#define NETLINK_XFRM_SOCKET__SETATTR 0x00000020UL
668#define NETLINK_XFRM_SOCKET__LOCK 0x00000040UL
669#define NETLINK_XFRM_SOCKET__RELABELFROM 0x00000080UL
670#define NETLINK_XFRM_SOCKET__RELABELTO 0x00000100UL
671#define NETLINK_XFRM_SOCKET__APPEND 0x00000200UL
672#define NETLINK_XFRM_SOCKET__BIND 0x00000400UL
673#define NETLINK_XFRM_SOCKET__CONNECT 0x00000800UL
674#define NETLINK_XFRM_SOCKET__LISTEN 0x00001000UL
675#define NETLINK_XFRM_SOCKET__ACCEPT 0x00002000UL
676#define NETLINK_XFRM_SOCKET__GETOPT 0x00004000UL
677#define NETLINK_XFRM_SOCKET__SETOPT 0x00008000UL
678#define NETLINK_XFRM_SOCKET__SHUTDOWN 0x00010000UL
679#define NETLINK_XFRM_SOCKET__RECVFROM 0x00020000UL
680#define NETLINK_XFRM_SOCKET__SENDTO 0x00040000UL
681#define NETLINK_XFRM_SOCKET__RECV_MSG 0x00080000UL
682#define NETLINK_XFRM_SOCKET__SEND_MSG 0x00100000UL
683#define NETLINK_XFRM_SOCKET__NAME_BIND 0x00200000UL
684#define NETLINK_XFRM_SOCKET__NLMSG_READ 0x00400000UL
685#define NETLINK_XFRM_SOCKET__NLMSG_WRITE 0x00800000UL
686#define NETLINK_SELINUX_SOCKET__IOCTL 0x00000001UL
687#define NETLINK_SELINUX_SOCKET__READ 0x00000002UL
688#define NETLINK_SELINUX_SOCKET__WRITE 0x00000004UL
689#define NETLINK_SELINUX_SOCKET__CREATE 0x00000008UL
690#define NETLINK_SELINUX_SOCKET__GETATTR 0x00000010UL
691#define NETLINK_SELINUX_SOCKET__SETATTR 0x00000020UL
692#define NETLINK_SELINUX_SOCKET__LOCK 0x00000040UL
693#define NETLINK_SELINUX_SOCKET__RELABELFROM 0x00000080UL
694#define NETLINK_SELINUX_SOCKET__RELABELTO 0x00000100UL
695#define NETLINK_SELINUX_SOCKET__APPEND 0x00000200UL
696#define NETLINK_SELINUX_SOCKET__BIND 0x00000400UL
697#define NETLINK_SELINUX_SOCKET__CONNECT 0x00000800UL
698#define NETLINK_SELINUX_SOCKET__LISTEN 0x00001000UL
699#define NETLINK_SELINUX_SOCKET__ACCEPT 0x00002000UL
700#define NETLINK_SELINUX_SOCKET__GETOPT 0x00004000UL
701#define NETLINK_SELINUX_SOCKET__SETOPT 0x00008000UL
702#define NETLINK_SELINUX_SOCKET__SHUTDOWN 0x00010000UL
703#define NETLINK_SELINUX_SOCKET__RECVFROM 0x00020000UL
704#define NETLINK_SELINUX_SOCKET__SENDTO 0x00040000UL
705#define NETLINK_SELINUX_SOCKET__RECV_MSG 0x00080000UL
706#define NETLINK_SELINUX_SOCKET__SEND_MSG 0x00100000UL
707#define NETLINK_SELINUX_SOCKET__NAME_BIND 0x00200000UL
708#define NETLINK_AUDIT_SOCKET__IOCTL 0x00000001UL
709#define NETLINK_AUDIT_SOCKET__READ 0x00000002UL
710#define NETLINK_AUDIT_SOCKET__WRITE 0x00000004UL
711#define NETLINK_AUDIT_SOCKET__CREATE 0x00000008UL
712#define NETLINK_AUDIT_SOCKET__GETATTR 0x00000010UL
713#define NETLINK_AUDIT_SOCKET__SETATTR 0x00000020UL
714#define NETLINK_AUDIT_SOCKET__LOCK 0x00000040UL
715#define NETLINK_AUDIT_SOCKET__RELABELFROM 0x00000080UL
716#define NETLINK_AUDIT_SOCKET__RELABELTO 0x00000100UL
717#define NETLINK_AUDIT_SOCKET__APPEND 0x00000200UL
718#define NETLINK_AUDIT_SOCKET__BIND 0x00000400UL
719#define NETLINK_AUDIT_SOCKET__CONNECT 0x00000800UL
720#define NETLINK_AUDIT_SOCKET__LISTEN 0x00001000UL
721#define NETLINK_AUDIT_SOCKET__ACCEPT 0x00002000UL
722#define NETLINK_AUDIT_SOCKET__GETOPT 0x00004000UL
723#define NETLINK_AUDIT_SOCKET__SETOPT 0x00008000UL
724#define NETLINK_AUDIT_SOCKET__SHUTDOWN 0x00010000UL
725#define NETLINK_AUDIT_SOCKET__RECVFROM 0x00020000UL
726#define NETLINK_AUDIT_SOCKET__SENDTO 0x00040000UL
727#define NETLINK_AUDIT_SOCKET__RECV_MSG 0x00080000UL
728#define NETLINK_AUDIT_SOCKET__SEND_MSG 0x00100000UL
729#define NETLINK_AUDIT_SOCKET__NAME_BIND 0x00200000UL
730#define NETLINK_AUDIT_SOCKET__NLMSG_READ 0x00400000UL
731#define NETLINK_AUDIT_SOCKET__NLMSG_WRITE 0x00800000UL
732#define NETLINK_AUDIT_SOCKET__NLMSG_RELAY 0x01000000UL
733#define NETLINK_AUDIT_SOCKET__NLMSG_READPRIV 0x02000000UL
734#define NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT 0x04000000UL
735#define NETLINK_IP6FW_SOCKET__IOCTL 0x00000001UL
736#define NETLINK_IP6FW_SOCKET__READ 0x00000002UL
737#define NETLINK_IP6FW_SOCKET__WRITE 0x00000004UL
738#define NETLINK_IP6FW_SOCKET__CREATE 0x00000008UL
739#define NETLINK_IP6FW_SOCKET__GETATTR 0x00000010UL
740#define NETLINK_IP6FW_SOCKET__SETATTR 0x00000020UL
741#define NETLINK_IP6FW_SOCKET__LOCK 0x00000040UL
742#define NETLINK_IP6FW_SOCKET__RELABELFROM 0x00000080UL
743#define NETLINK_IP6FW_SOCKET__RELABELTO 0x00000100UL
744#define NETLINK_IP6FW_SOCKET__APPEND 0x00000200UL
745#define NETLINK_IP6FW_SOCKET__BIND 0x00000400UL
746#define NETLINK_IP6FW_SOCKET__CONNECT 0x00000800UL
747#define NETLINK_IP6FW_SOCKET__LISTEN 0x00001000UL
748#define NETLINK_IP6FW_SOCKET__ACCEPT 0x00002000UL
749#define NETLINK_IP6FW_SOCKET__GETOPT 0x00004000UL
750#define NETLINK_IP6FW_SOCKET__SETOPT 0x00008000UL
751#define NETLINK_IP6FW_SOCKET__SHUTDOWN 0x00010000UL
752#define NETLINK_IP6FW_SOCKET__RECVFROM 0x00020000UL
753#define NETLINK_IP6FW_SOCKET__SENDTO 0x00040000UL
754#define NETLINK_IP6FW_SOCKET__RECV_MSG 0x00080000UL
755#define NETLINK_IP6FW_SOCKET__SEND_MSG 0x00100000UL
756#define NETLINK_IP6FW_SOCKET__NAME_BIND 0x00200000UL
757#define NETLINK_IP6FW_SOCKET__NLMSG_READ 0x00400000UL
758#define NETLINK_IP6FW_SOCKET__NLMSG_WRITE 0x00800000UL
759#define NETLINK_DNRT_SOCKET__IOCTL 0x00000001UL
760#define NETLINK_DNRT_SOCKET__READ 0x00000002UL
761#define NETLINK_DNRT_SOCKET__WRITE 0x00000004UL
762#define NETLINK_DNRT_SOCKET__CREATE 0x00000008UL
763#define NETLINK_DNRT_SOCKET__GETATTR 0x00000010UL
764#define NETLINK_DNRT_SOCKET__SETATTR 0x00000020UL
765#define NETLINK_DNRT_SOCKET__LOCK 0x00000040UL
766#define NETLINK_DNRT_SOCKET__RELABELFROM 0x00000080UL
767#define NETLINK_DNRT_SOCKET__RELABELTO 0x00000100UL
768#define NETLINK_DNRT_SOCKET__APPEND 0x00000200UL
769#define NETLINK_DNRT_SOCKET__BIND 0x00000400UL
770#define NETLINK_DNRT_SOCKET__CONNECT 0x00000800UL
771#define NETLINK_DNRT_SOCKET__LISTEN 0x00001000UL
772#define NETLINK_DNRT_SOCKET__ACCEPT 0x00002000UL
773#define NETLINK_DNRT_SOCKET__GETOPT 0x00004000UL
774#define NETLINK_DNRT_SOCKET__SETOPT 0x00008000UL
775#define NETLINK_DNRT_SOCKET__SHUTDOWN 0x00010000UL
776#define NETLINK_DNRT_SOCKET__RECVFROM 0x00020000UL
777#define NETLINK_DNRT_SOCKET__SENDTO 0x00040000UL
778#define NETLINK_DNRT_SOCKET__RECV_MSG 0x00080000UL
779#define NETLINK_DNRT_SOCKET__SEND_MSG 0x00100000UL
780#define NETLINK_DNRT_SOCKET__NAME_BIND 0x00200000UL
781#define ASSOCIATION__SENDTO 0x00000001UL
782#define ASSOCIATION__RECVFROM 0x00000002UL
783#define ASSOCIATION__SETCONTEXT 0x00000004UL
784#define ASSOCIATION__POLMATCH 0x00000008UL
785#define NETLINK_KOBJECT_UEVENT_SOCKET__IOCTL 0x00000001UL
786#define NETLINK_KOBJECT_UEVENT_SOCKET__READ 0x00000002UL
787#define NETLINK_KOBJECT_UEVENT_SOCKET__WRITE 0x00000004UL
788#define NETLINK_KOBJECT_UEVENT_SOCKET__CREATE 0x00000008UL
789#define NETLINK_KOBJECT_UEVENT_SOCKET__GETATTR 0x00000010UL
790#define NETLINK_KOBJECT_UEVENT_SOCKET__SETATTR 0x00000020UL
791#define NETLINK_KOBJECT_UEVENT_SOCKET__LOCK 0x00000040UL
792#define NETLINK_KOBJECT_UEVENT_SOCKET__RELABELFROM 0x00000080UL
793#define NETLINK_KOBJECT_UEVENT_SOCKET__RELABELTO 0x00000100UL
794#define NETLINK_KOBJECT_UEVENT_SOCKET__APPEND 0x00000200UL
795#define NETLINK_KOBJECT_UEVENT_SOCKET__BIND 0x00000400UL
796#define NETLINK_KOBJECT_UEVENT_SOCKET__CONNECT 0x00000800UL
797#define NETLINK_KOBJECT_UEVENT_SOCKET__LISTEN 0x00001000UL
798#define NETLINK_KOBJECT_UEVENT_SOCKET__ACCEPT 0x00002000UL
799#define NETLINK_KOBJECT_UEVENT_SOCKET__GETOPT 0x00004000UL
800#define NETLINK_KOBJECT_UEVENT_SOCKET__SETOPT 0x00008000UL
801#define NETLINK_KOBJECT_UEVENT_SOCKET__SHUTDOWN 0x00010000UL
802#define NETLINK_KOBJECT_UEVENT_SOCKET__RECVFROM 0x00020000UL
803#define NETLINK_KOBJECT_UEVENT_SOCKET__SENDTO 0x00040000UL
804#define NETLINK_KOBJECT_UEVENT_SOCKET__RECV_MSG 0x00080000UL
805#define NETLINK_KOBJECT_UEVENT_SOCKET__SEND_MSG 0x00100000UL
806#define NETLINK_KOBJECT_UEVENT_SOCKET__NAME_BIND 0x00200000UL
807#define APPLETALK_SOCKET__IOCTL 0x00000001UL
808#define APPLETALK_SOCKET__READ 0x00000002UL
809#define APPLETALK_SOCKET__WRITE 0x00000004UL
810#define APPLETALK_SOCKET__CREATE 0x00000008UL
811#define APPLETALK_SOCKET__GETATTR 0x00000010UL
812#define APPLETALK_SOCKET__SETATTR 0x00000020UL
813#define APPLETALK_SOCKET__LOCK 0x00000040UL
814#define APPLETALK_SOCKET__RELABELFROM 0x00000080UL
815#define APPLETALK_SOCKET__RELABELTO 0x00000100UL
816#define APPLETALK_SOCKET__APPEND 0x00000200UL
817#define APPLETALK_SOCKET__BIND 0x00000400UL
818#define APPLETALK_SOCKET__CONNECT 0x00000800UL
819#define APPLETALK_SOCKET__LISTEN 0x00001000UL
820#define APPLETALK_SOCKET__ACCEPT 0x00002000UL
821#define APPLETALK_SOCKET__GETOPT 0x00004000UL
822#define APPLETALK_SOCKET__SETOPT 0x00008000UL
823#define APPLETALK_SOCKET__SHUTDOWN 0x00010000UL
824#define APPLETALK_SOCKET__RECVFROM 0x00020000UL
825#define APPLETALK_SOCKET__SENDTO 0x00040000UL
826#define APPLETALK_SOCKET__RECV_MSG 0x00080000UL
827#define APPLETALK_SOCKET__SEND_MSG 0x00100000UL
828#define APPLETALK_SOCKET__NAME_BIND 0x00200000UL
829#define PACKET__SEND 0x00000001UL
830#define PACKET__RECV 0x00000002UL
831#define PACKET__RELABELTO 0x00000004UL
832#define PACKET__FLOW_IN 0x00000008UL
833#define PACKET__FLOW_OUT 0x00000010UL
834#define PACKET__FORWARD_IN 0x00000020UL
835#define PACKET__FORWARD_OUT 0x00000040UL
836#define KEY__VIEW 0x00000001UL
837#define KEY__READ 0x00000002UL
838#define KEY__WRITE 0x00000004UL
839#define KEY__SEARCH 0x00000008UL
840#define KEY__LINK 0x00000010UL
841#define KEY__SETATTR 0x00000020UL
842#define KEY__CREATE 0x00000040UL
843#define DCCP_SOCKET__IOCTL 0x00000001UL
844#define DCCP_SOCKET__READ 0x00000002UL
845#define DCCP_SOCKET__WRITE 0x00000004UL
846#define DCCP_SOCKET__CREATE 0x00000008UL
847#define DCCP_SOCKET__GETATTR 0x00000010UL
848#define DCCP_SOCKET__SETATTR 0x00000020UL
849#define DCCP_SOCKET__LOCK 0x00000040UL
850#define DCCP_SOCKET__RELABELFROM 0x00000080UL
851#define DCCP_SOCKET__RELABELTO 0x00000100UL
852#define DCCP_SOCKET__APPEND 0x00000200UL
853#define DCCP_SOCKET__BIND 0x00000400UL
854#define DCCP_SOCKET__CONNECT 0x00000800UL
855#define DCCP_SOCKET__LISTEN 0x00001000UL
856#define DCCP_SOCKET__ACCEPT 0x00002000UL
857#define DCCP_SOCKET__GETOPT 0x00004000UL
858#define DCCP_SOCKET__SETOPT 0x00008000UL
859#define DCCP_SOCKET__SHUTDOWN 0x00010000UL
860#define DCCP_SOCKET__RECVFROM 0x00020000UL
861#define DCCP_SOCKET__SENDTO 0x00040000UL
862#define DCCP_SOCKET__RECV_MSG 0x00080000UL
863#define DCCP_SOCKET__SEND_MSG 0x00100000UL
864#define DCCP_SOCKET__NAME_BIND 0x00200000UL
865#define DCCP_SOCKET__NODE_BIND 0x00400000UL
866#define DCCP_SOCKET__NAME_CONNECT 0x00800000UL
867#define MEMPROTECT__MMAP_ZERO 0x00000001UL
868#define PEER__RECV 0x00000001UL
869#define KERNEL_SERVICE__USE_AS_OVERRIDE 0x00000001UL
870#define KERNEL_SERVICE__CREATE_FILES_AS 0x00000002UL
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
index bb1ec801bdfe..4677aa519b04 100644
--- a/security/selinux/include/avc_ss.h
+++ b/security/selinux/include/avc_ss.h
@@ -10,26 +10,13 @@
10 10
11int avc_ss_reset(u32 seqno); 11int avc_ss_reset(u32 seqno);
12 12
13struct av_perm_to_string { 13/* Class/perm mapping support */
14 u16 tclass; 14struct security_class_mapping {
15 u32 value;
16 const char *name; 15 const char *name;
16 const char *perms[sizeof(u32) * 8 + 1];
17}; 17};
18 18
19struct av_inherit { 19extern struct security_class_mapping secclass_map[];
20 const char **common_pts;
21 u32 common_base;
22 u16 tclass;
23};
24
25struct selinux_class_perm {
26 const struct av_perm_to_string *av_perm_to_string;
27 u32 av_pts_len;
28 u32 cts_len;
29 const char **class_to_string;
30 const struct av_inherit *av_inherit;
31 u32 av_inherit_len;
32};
33 20
34#endif /* _SELINUX_AVC_SS_H_ */ 21#endif /* _SELINUX_AVC_SS_H_ */
35 22
diff --git a/security/selinux/include/class_to_string.h b/security/selinux/include/class_to_string.h
deleted file mode 100644
index 7ab9299bfb6b..000000000000
--- a/security/selinux/include/class_to_string.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2/*
3 * Security object class definitions
4 */
5 S_(NULL)
6 S_("security")
7 S_("process")
8 S_("system")
9 S_("capability")
10 S_("filesystem")
11 S_("file")
12 S_("dir")
13 S_("fd")
14 S_("lnk_file")
15 S_("chr_file")
16 S_("blk_file")
17 S_("sock_file")
18 S_("fifo_file")
19 S_("socket")
20 S_("tcp_socket")
21 S_("udp_socket")
22 S_("rawip_socket")
23 S_("node")
24 S_("netif")
25 S_("netlink_socket")
26 S_("packet_socket")
27 S_("key_socket")
28 S_("unix_stream_socket")
29 S_("unix_dgram_socket")
30 S_("sem")
31 S_("msg")
32 S_("msgq")
33 S_("shm")
34 S_("ipc")
35 S_(NULL)
36 S_(NULL)
37 S_(NULL)
38 S_(NULL)
39 S_(NULL)
40 S_(NULL)
41 S_(NULL)
42 S_(NULL)
43 S_(NULL)
44 S_(NULL)
45 S_(NULL)
46 S_(NULL)
47 S_(NULL)
48 S_("netlink_route_socket")
49 S_("netlink_firewall_socket")
50 S_("netlink_tcpdiag_socket")
51 S_("netlink_nflog_socket")
52 S_("netlink_xfrm_socket")
53 S_("netlink_selinux_socket")
54 S_("netlink_audit_socket")
55 S_("netlink_ip6fw_socket")
56 S_("netlink_dnrt_socket")
57 S_(NULL)
58 S_(NULL)
59 S_("association")
60 S_("netlink_kobject_uevent_socket")
61 S_("appletalk_socket")
62 S_("packet")
63 S_("key")
64 S_(NULL)
65 S_("dccp_socket")
66 S_("memprotect")
67 S_(NULL)
68 S_(NULL)
69 S_(NULL)
70 S_(NULL)
71 S_(NULL)
72 S_(NULL)
73 S_("peer")
74 S_("capability2")
75 S_(NULL)
76 S_(NULL)
77 S_(NULL)
78 S_(NULL)
79 S_("kernel_service")
80 S_("tun_socket")
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
new file mode 100644
index 000000000000..8b32e959bb2e
--- /dev/null
+++ b/security/selinux/include/classmap.h
@@ -0,0 +1,150 @@
1#define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
2 "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append"
3
4#define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \
5 "rename", "execute", "swapon", "quotaon", "mounton"
6
7#define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \
8 "listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \
9 "sendto", "recv_msg", "send_msg", "name_bind"
10
11#define COMMON_IPC_PERMS "create", "destroy", "getattr", "setattr", "read", \
12 "write", "associate", "unix_read", "unix_write"
13
14struct security_class_mapping secclass_map[] = {
15 { "security",
16 { "compute_av", "compute_create", "compute_member",
17 "check_context", "load_policy", "compute_relabel",
18 "compute_user", "setenforce", "setbool", "setsecparam",
19 "setcheckreqprot", NULL } },
20 { "process",
21 { "fork", "transition", "sigchld", "sigkill",
22 "sigstop", "signull", "signal", "ptrace", "getsched", "setsched",
23 "getsession", "getpgid", "setpgid", "getcap", "setcap", "share",
24 "getattr", "setexec", "setfscreate", "noatsecure", "siginh",
25 "setrlimit", "rlimitinh", "dyntransition", "setcurrent",
26 "execmem", "execstack", "execheap", "setkeycreate",
27 "setsockcreate", NULL } },
28 { "system",
29 { "ipc_info", "syslog_read", "syslog_mod",
30 "syslog_console", "module_request", NULL } },
31 { "capability",
32 { "chown", "dac_override", "dac_read_search",
33 "fowner", "fsetid", "kill", "setgid", "setuid", "setpcap",
34 "linux_immutable", "net_bind_service", "net_broadcast",
35 "net_admin", "net_raw", "ipc_lock", "ipc_owner", "sys_module",
36 "sys_rawio", "sys_chroot", "sys_ptrace", "sys_pacct", "sys_admin",
37 "sys_boot", "sys_nice", "sys_resource", "sys_time",
38 "sys_tty_config", "mknod", "lease", "audit_write",
39 "audit_control", "setfcap", NULL } },
40 { "filesystem",
41 { "mount", "remount", "unmount", "getattr",
42 "relabelfrom", "relabelto", "transition", "associate", "quotamod",
43 "quotaget", NULL } },
44 { "file",
45 { COMMON_FILE_PERMS,
46 "execute_no_trans", "entrypoint", "execmod", "open", NULL } },
47 { "dir",
48 { COMMON_FILE_PERMS, "add_name", "remove_name",
49 "reparent", "search", "rmdir", "open", NULL } },
50 { "fd", { "use", NULL } },
51 { "lnk_file",
52 { COMMON_FILE_PERMS, NULL } },
53 { "chr_file",
54 { COMMON_FILE_PERMS,
55 "execute_no_trans", "entrypoint", "execmod", "open", NULL } },
56 { "blk_file",
57 { COMMON_FILE_PERMS, "open", NULL } },
58 { "sock_file",
59 { COMMON_FILE_PERMS, "open", NULL } },
60 { "fifo_file",
61 { COMMON_FILE_PERMS, "open", NULL } },
62 { "socket",
63 { COMMON_SOCK_PERMS, NULL } },
64 { "tcp_socket",
65 { COMMON_SOCK_PERMS,
66 "connectto", "newconn", "acceptfrom", "node_bind", "name_connect",
67 NULL } },
68 { "udp_socket",
69 { COMMON_SOCK_PERMS,
70 "node_bind", NULL } },
71 { "rawip_socket",
72 { COMMON_SOCK_PERMS,
73 "node_bind", NULL } },
74 { "node",
75 { "tcp_recv", "tcp_send", "udp_recv", "udp_send",
76 "rawip_recv", "rawip_send", "enforce_dest",
77 "dccp_recv", "dccp_send", "recvfrom", "sendto", NULL } },
78 { "netif",
79 { "tcp_recv", "tcp_send", "udp_recv", "udp_send",
80 "rawip_recv", "rawip_send", "dccp_recv", "dccp_send",
81 "ingress", "egress", NULL } },
82 { "netlink_socket",
83 { COMMON_SOCK_PERMS, NULL } },
84 { "packet_socket",
85 { COMMON_SOCK_PERMS, NULL } },
86 { "key_socket",
87 { COMMON_SOCK_PERMS, NULL } },
88 { "unix_stream_socket",
89 { COMMON_SOCK_PERMS, "connectto", "newconn", "acceptfrom", NULL
90 } },
91 { "unix_dgram_socket",
92 { COMMON_SOCK_PERMS, NULL
93 } },
94 { "sem",
95 { COMMON_IPC_PERMS, NULL } },
96 { "msg", { "send", "receive", NULL } },
97 { "msgq",
98 { COMMON_IPC_PERMS, "enqueue", NULL } },
99 { "shm",
100 { COMMON_IPC_PERMS, "lock", NULL } },
101 { "ipc",
102 { COMMON_IPC_PERMS, NULL } },
103 { "netlink_route_socket",
104 { COMMON_SOCK_PERMS,
105 "nlmsg_read", "nlmsg_write", NULL } },
106 { "netlink_firewall_socket",
107 { COMMON_SOCK_PERMS,
108 "nlmsg_read", "nlmsg_write", NULL } },
109 { "netlink_tcpdiag_socket",
110 { COMMON_SOCK_PERMS,
111 "nlmsg_read", "nlmsg_write", NULL } },
112 { "netlink_nflog_socket",
113 { COMMON_SOCK_PERMS, NULL } },
114 { "netlink_xfrm_socket",
115 { COMMON_SOCK_PERMS,
116 "nlmsg_read", "nlmsg_write", NULL } },
117 { "netlink_selinux_socket",
118 { COMMON_SOCK_PERMS, NULL } },
119 { "netlink_audit_socket",
120 { COMMON_SOCK_PERMS,
121 "nlmsg_read", "nlmsg_write", "nlmsg_relay", "nlmsg_readpriv",
122 "nlmsg_tty_audit", NULL } },
123 { "netlink_ip6fw_socket",
124 { COMMON_SOCK_PERMS,
125 "nlmsg_read", "nlmsg_write", NULL } },
126 { "netlink_dnrt_socket",
127 { COMMON_SOCK_PERMS, NULL } },
128 { "association",
129 { "sendto", "recvfrom", "setcontext", "polmatch", NULL } },
130 { "netlink_kobject_uevent_socket",
131 { COMMON_SOCK_PERMS, NULL } },
132 { "appletalk_socket",
133 { COMMON_SOCK_PERMS, NULL } },
134 { "packet",
135 { "send", "recv", "relabelto", "flow_in", "flow_out",
136 "forward_in", "forward_out", NULL } },
137 { "key",
138 { "view", "read", "write", "search", "link", "setattr", "create",
139 NULL } },
140 { "dccp_socket",
141 { COMMON_SOCK_PERMS,
142 "node_bind", "name_connect", NULL } },
143 { "memprotect", { "mmap_zero", NULL } },
144 { "peer", { "recv", NULL } },
145 { "capability2", { "mac_override", "mac_admin", NULL } },
146 { "kernel_service", { "use_as_override", "create_files_as", NULL } },
147 { "tun_socket",
148 { COMMON_SOCK_PERMS, NULL } },
149 { NULL }
150 };
diff --git a/security/selinux/include/common_perm_to_string.h b/security/selinux/include/common_perm_to_string.h
deleted file mode 100644
index ce5b6e2fe9dd..000000000000
--- a/security/selinux/include/common_perm_to_string.h
+++ /dev/null
@@ -1,58 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2TB_(common_file_perm_to_string)
3 S_("ioctl")
4 S_("read")
5 S_("write")
6 S_("create")
7 S_("getattr")
8 S_("setattr")
9 S_("lock")
10 S_("relabelfrom")
11 S_("relabelto")
12 S_("append")
13 S_("unlink")
14 S_("link")
15 S_("rename")
16 S_("execute")
17 S_("swapon")
18 S_("quotaon")
19 S_("mounton")
20TE_(common_file_perm_to_string)
21
22TB_(common_socket_perm_to_string)
23 S_("ioctl")
24 S_("read")
25 S_("write")
26 S_("create")
27 S_("getattr")
28 S_("setattr")
29 S_("lock")
30 S_("relabelfrom")
31 S_("relabelto")
32 S_("append")
33 S_("bind")
34 S_("connect")
35 S_("listen")
36 S_("accept")
37 S_("getopt")
38 S_("setopt")
39 S_("shutdown")
40 S_("recvfrom")
41 S_("sendto")
42 S_("recv_msg")
43 S_("send_msg")
44 S_("name_bind")
45TE_(common_socket_perm_to_string)
46
47TB_(common_ipc_perm_to_string)
48 S_("create")
49 S_("destroy")
50 S_("getattr")
51 S_("setattr")
52 S_("read")
53 S_("write")
54 S_("associate")
55 S_("unix_read")
56 S_("unix_write")
57TE_(common_ipc_perm_to_string)
58
diff --git a/security/selinux/include/flask.h b/security/selinux/include/flask.h
deleted file mode 100644
index f248500a1e3c..000000000000
--- a/security/selinux/include/flask.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2#ifndef _SELINUX_FLASK_H_
3#define _SELINUX_FLASK_H_
4
5/*
6 * Security object class definitions
7 */
8#define SECCLASS_SECURITY 1
9#define SECCLASS_PROCESS 2
10#define SECCLASS_SYSTEM 3
11#define SECCLASS_CAPABILITY 4
12#define SECCLASS_FILESYSTEM 5
13#define SECCLASS_FILE 6
14#define SECCLASS_DIR 7
15#define SECCLASS_FD 8
16#define SECCLASS_LNK_FILE 9
17#define SECCLASS_CHR_FILE 10
18#define SECCLASS_BLK_FILE 11
19#define SECCLASS_SOCK_FILE 12
20#define SECCLASS_FIFO_FILE 13
21#define SECCLASS_SOCKET 14
22#define SECCLASS_TCP_SOCKET 15
23#define SECCLASS_UDP_SOCKET 16
24#define SECCLASS_RAWIP_SOCKET 17
25#define SECCLASS_NODE 18
26#define SECCLASS_NETIF 19
27#define SECCLASS_NETLINK_SOCKET 20
28#define SECCLASS_PACKET_SOCKET 21
29#define SECCLASS_KEY_SOCKET 22
30#define SECCLASS_UNIX_STREAM_SOCKET 23
31#define SECCLASS_UNIX_DGRAM_SOCKET 24
32#define SECCLASS_SEM 25
33#define SECCLASS_MSG 26
34#define SECCLASS_MSGQ 27
35#define SECCLASS_SHM 28
36#define SECCLASS_IPC 29
37#define SECCLASS_NETLINK_ROUTE_SOCKET 43
38#define SECCLASS_NETLINK_FIREWALL_SOCKET 44
39#define SECCLASS_NETLINK_TCPDIAG_SOCKET 45
40#define SECCLASS_NETLINK_NFLOG_SOCKET 46
41#define SECCLASS_NETLINK_XFRM_SOCKET 47
42#define SECCLASS_NETLINK_SELINUX_SOCKET 48
43#define SECCLASS_NETLINK_AUDIT_SOCKET 49
44#define SECCLASS_NETLINK_IP6FW_SOCKET 50
45#define SECCLASS_NETLINK_DNRT_SOCKET 51
46#define SECCLASS_ASSOCIATION 54
47#define SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET 55
48#define SECCLASS_APPLETALK_SOCKET 56
49#define SECCLASS_PACKET 57
50#define SECCLASS_KEY 58
51#define SECCLASS_DCCP_SOCKET 60
52#define SECCLASS_MEMPROTECT 61
53#define SECCLASS_PEER 68
54#define SECCLASS_CAPABILITY2 69
55#define SECCLASS_KERNEL_SERVICE 74
56#define SECCLASS_TUN_SOCKET 75
57
58/*
59 * Security identifier indices for initial entities
60 */
61#define SECINITSID_KERNEL 1
62#define SECINITSID_SECURITY 2
63#define SECINITSID_UNLABELED 3
64#define SECINITSID_FS 4
65#define SECINITSID_FILE 5
66#define SECINITSID_FILE_LABELS 6
67#define SECINITSID_INIT 7
68#define SECINITSID_ANY_SOCKET 8
69#define SECINITSID_PORT 9
70#define SECINITSID_NETIF 10
71#define SECINITSID_NETMSG 11
72#define SECINITSID_NODE 12
73#define SECINITSID_IGMP_PACKET 13
74#define SECINITSID_ICMP_SOCKET 14
75#define SECINITSID_TCP_SOCKET 15
76#define SECINITSID_SYSCTL_MODPROBE 16
77#define SECINITSID_SYSCTL 17
78#define SECINITSID_SYSCTL_FS 18
79#define SECINITSID_SYSCTL_KERNEL 19
80#define SECINITSID_SYSCTL_NET 20
81#define SECINITSID_SYSCTL_NET_UNIX 21
82#define SECINITSID_SYSCTL_VM 22
83#define SECINITSID_SYSCTL_DEV 23
84#define SECINITSID_KMOD 24
85#define SECINITSID_POLICY 25
86#define SECINITSID_SCMP_PACKET 26
87#define SECINITSID_DEVNULL 27
88
89#define SECINITSID_NUM 27
90
91#endif
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index ca835795a8b3..2553266ad793 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -97,11 +97,18 @@ struct av_decision {
97#define AVD_FLAGS_PERMISSIVE 0x0001 97#define AVD_FLAGS_PERMISSIVE 0x0001
98 98
99int security_compute_av(u32 ssid, u32 tsid, 99int security_compute_av(u32 ssid, u32 tsid,
100 u16 tclass, u32 requested, 100 u16 tclass, u32 requested,
101 struct av_decision *avd); 101 struct av_decision *avd);
102
103int security_compute_av_user(u32 ssid, u32 tsid,
104 u16 tclass, u32 requested,
105 struct av_decision *avd);
102 106
103int security_transition_sid(u32 ssid, u32 tsid, 107int security_transition_sid(u32 ssid, u32 tsid,
104 u16 tclass, u32 *out_sid); 108 u16 tclass, u32 *out_sid);
109
110int security_transition_sid_user(u32 ssid, u32 tsid,
111 u16 tclass, u32 *out_sid);
105 112
106int security_member_sid(u32 ssid, u32 tsid, 113int security_member_sid(u32 ssid, u32 tsid,
107 u16 tclass, u32 *out_sid); 114 u16 tclass, u32 *out_sid);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index b4fc506e7a87..fab36fdf2769 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -522,7 +522,7 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
522 if (length < 0) 522 if (length < 0)
523 goto out2; 523 goto out2;
524 524
525 length = security_compute_av(ssid, tsid, tclass, req, &avd); 525 length = security_compute_av_user(ssid, tsid, tclass, req, &avd);
526 if (length < 0) 526 if (length < 0)
527 goto out2; 527 goto out2;
528 528
@@ -571,7 +571,7 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
571 if (length < 0) 571 if (length < 0)
572 goto out2; 572 goto out2;
573 573
574 length = security_transition_sid(ssid, tsid, tclass, &newsid); 574 length = security_transition_sid_user(ssid, tsid, tclass, &newsid);
575 if (length < 0) 575 if (length < 0)
576 goto out2; 576 goto out2;
577 577
diff --git a/security/selinux/ss/Makefile b/security/selinux/ss/Makefile
index bad78779b9b0..15d4e62917de 100644
--- a/security/selinux/ss/Makefile
+++ b/security/selinux/ss/Makefile
@@ -2,7 +2,7 @@
2# Makefile for building the SELinux security server as part of the kernel tree. 2# Makefile for building the SELinux security server as part of the kernel tree.
3# 3#
4 4
5EXTRA_CFLAGS += -Isecurity/selinux/include 5EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
6obj-y := ss.o 6obj-y := ss.o
7 7
8ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o 8ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index b5407f16c2a4..3f2b2706b5bb 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -532,7 +532,7 @@ int mls_compute_sid(struct context *scontext,
532 } 532 }
533 /* Fallthrough */ 533 /* Fallthrough */
534 case AVTAB_CHANGE: 534 case AVTAB_CHANGE:
535 if (tclass == SECCLASS_PROCESS) 535 if (tclass == policydb.process_class)
536 /* Use the process MLS attributes. */ 536 /* Use the process MLS attributes. */
537 return mls_context_cpy(newcontext, scontext); 537 return mls_context_cpy(newcontext, scontext);
538 else 538 else
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 72e4a54973aa..f03667213ea8 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -713,7 +713,6 @@ void policydb_destroy(struct policydb *p)
713 ebitmap_destroy(&p->type_attr_map[i]); 713 ebitmap_destroy(&p->type_attr_map[i]);
714 } 714 }
715 kfree(p->type_attr_map); 715 kfree(p->type_attr_map);
716 kfree(p->undefined_perms);
717 ebitmap_destroy(&p->policycaps); 716 ebitmap_destroy(&p->policycaps);
718 ebitmap_destroy(&p->permissive_map); 717 ebitmap_destroy(&p->permissive_map);
719 718
@@ -1640,6 +1639,40 @@ static int policydb_bounds_sanity_check(struct policydb *p)
1640 1639
1641extern int ss_initialized; 1640extern int ss_initialized;
1642 1641
1642u16 string_to_security_class(struct policydb *p, const char *name)
1643{
1644 struct class_datum *cladatum;
1645
1646 cladatum = hashtab_search(p->p_classes.table, name);
1647 if (!cladatum)
1648 return 0;
1649
1650 return cladatum->value;
1651}
1652
1653u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name)
1654{
1655 struct class_datum *cladatum;
1656 struct perm_datum *perdatum = NULL;
1657 struct common_datum *comdatum;
1658
1659 if (!tclass || tclass > p->p_classes.nprim)
1660 return 0;
1661
1662 cladatum = p->class_val_to_struct[tclass-1];
1663 comdatum = cladatum->comdatum;
1664 if (comdatum)
1665 perdatum = hashtab_search(comdatum->permissions.table,
1666 name);
1667 if (!perdatum)
1668 perdatum = hashtab_search(cladatum->permissions.table,
1669 name);
1670 if (!perdatum)
1671 return 0;
1672
1673 return 1U << (perdatum->value-1);
1674}
1675
1643/* 1676/*
1644 * Read the configuration data from a policy database binary 1677 * Read the configuration data from a policy database binary
1645 * representation file into a policy database structure. 1678 * representation file into a policy database structure.
@@ -1861,6 +1894,16 @@ int policydb_read(struct policydb *p, void *fp)
1861 if (rc) 1894 if (rc)
1862 goto bad; 1895 goto bad;
1863 1896
1897 p->process_class = string_to_security_class(p, "process");
1898 if (!p->process_class)
1899 goto bad;
1900 p->process_trans_perms = string_to_av_perm(p, p->process_class,
1901 "transition");
1902 p->process_trans_perms |= string_to_av_perm(p, p->process_class,
1903 "dyntransition");
1904 if (!p->process_trans_perms)
1905 goto bad;
1906
1864 for (i = 0; i < info->ocon_num; i++) { 1907 for (i = 0; i < info->ocon_num; i++) {
1865 rc = next_entry(buf, fp, sizeof(u32)); 1908 rc = next_entry(buf, fp, sizeof(u32));
1866 if (rc < 0) 1909 if (rc < 0)
@@ -2101,7 +2144,7 @@ int policydb_read(struct policydb *p, void *fp)
2101 goto bad; 2144 goto bad;
2102 rt->target_class = le32_to_cpu(buf[0]); 2145 rt->target_class = le32_to_cpu(buf[0]);
2103 } else 2146 } else
2104 rt->target_class = SECCLASS_PROCESS; 2147 rt->target_class = p->process_class;
2105 if (!policydb_type_isvalid(p, rt->source_type) || 2148 if (!policydb_type_isvalid(p, rt->source_type) ||
2106 !policydb_type_isvalid(p, rt->target_type) || 2149 !policydb_type_isvalid(p, rt->target_type) ||
2107 !policydb_class_isvalid(p, rt->target_class)) { 2150 !policydb_class_isvalid(p, rt->target_class)) {
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 55152d498b53..cdcc5700946f 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -254,7 +254,9 @@ struct policydb {
254 254
255 unsigned int reject_unknown : 1; 255 unsigned int reject_unknown : 1;
256 unsigned int allow_unknown : 1; 256 unsigned int allow_unknown : 1;
257 u32 *undefined_perms; 257
258 u16 process_class;
259 u32 process_trans_perms;
258}; 260};
259 261
260extern void policydb_destroy(struct policydb *p); 262extern void policydb_destroy(struct policydb *p);
@@ -295,5 +297,8 @@ static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
295 return 0; 297 return 0;
296} 298}
297 299
300extern u16 string_to_security_class(struct policydb *p, const char *name);
301extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
302
298#endif /* _SS_POLICYDB_H_ */ 303#endif /* _SS_POLICYDB_H_ */
299 304
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index ff17820d35ec..d6bb20cbad62 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -65,16 +65,10 @@
65#include "audit.h" 65#include "audit.h"
66 66
67extern void selnl_notify_policyload(u32 seqno); 67extern void selnl_notify_policyload(u32 seqno);
68unsigned int policydb_loaded_version;
69 68
70int selinux_policycap_netpeer; 69int selinux_policycap_netpeer;
71int selinux_policycap_openperm; 70int selinux_policycap_openperm;
72 71
73/*
74 * This is declared in avc.c
75 */
76extern const struct selinux_class_perm selinux_class_perm;
77
78static DEFINE_RWLOCK(policy_rwlock); 72static DEFINE_RWLOCK(policy_rwlock);
79 73
80static struct sidtab sidtab; 74static struct sidtab sidtab;
@@ -98,6 +92,165 @@ static int context_struct_compute_av(struct context *scontext,
98 u16 tclass, 92 u16 tclass,
99 u32 requested, 93 u32 requested,
100 struct av_decision *avd); 94 struct av_decision *avd);
95
96struct selinux_mapping {
97 u16 value; /* policy value */
98 unsigned num_perms;
99 u32 perms[sizeof(u32) * 8];
100};
101
102static struct selinux_mapping *current_mapping;
103static u16 current_mapping_size;
104
105static int selinux_set_mapping(struct policydb *pol,
106 struct security_class_mapping *map,
107 struct selinux_mapping **out_map_p,
108 u16 *out_map_size)
109{
110 struct selinux_mapping *out_map = NULL;
111 size_t size = sizeof(struct selinux_mapping);
112 u16 i, j;
113 unsigned k;
114 bool print_unknown_handle = false;
115
116 /* Find number of classes in the input mapping */
117 if (!map)
118 return -EINVAL;
119 i = 0;
120 while (map[i].name)
121 i++;
122
123 /* Allocate space for the class records, plus one for class zero */
124 out_map = kcalloc(++i, size, GFP_ATOMIC);
125 if (!out_map)
126 return -ENOMEM;
127
128 /* Store the raw class and permission values */
129 j = 0;
130 while (map[j].name) {
131 struct security_class_mapping *p_in = map + (j++);
132 struct selinux_mapping *p_out = out_map + j;
133
134 /* An empty class string skips ahead */
135 if (!strcmp(p_in->name, "")) {
136 p_out->num_perms = 0;
137 continue;
138 }
139
140 p_out->value = string_to_security_class(pol, p_in->name);
141 if (!p_out->value) {
142 printk(KERN_INFO
143 "SELinux: Class %s not defined in policy.\n",
144 p_in->name);
145 if (pol->reject_unknown)
146 goto err;
147 p_out->num_perms = 0;
148 print_unknown_handle = true;
149 continue;
150 }
151
152 k = 0;
153 while (p_in->perms && p_in->perms[k]) {
154 /* An empty permission string skips ahead */
155 if (!*p_in->perms[k]) {
156 k++;
157 continue;
158 }
159 p_out->perms[k] = string_to_av_perm(pol, p_out->value,
160 p_in->perms[k]);
161 if (!p_out->perms[k]) {
162 printk(KERN_INFO
163 "SELinux: Permission %s in class %s not defined in policy.\n",
164 p_in->perms[k], p_in->name);
165 if (pol->reject_unknown)
166 goto err;
167 print_unknown_handle = true;
168 }
169
170 k++;
171 }
172 p_out->num_perms = k;
173 }
174
175 if (print_unknown_handle)
176 printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n",
177 pol->allow_unknown ? "allowed" : "denied");
178
179 *out_map_p = out_map;
180 *out_map_size = i;
181 return 0;
182err:
183 kfree(out_map);
184 return -EINVAL;
185}
186
187/*
188 * Get real, policy values from mapped values
189 */
190
191static u16 unmap_class(u16 tclass)
192{
193 if (tclass < current_mapping_size)
194 return current_mapping[tclass].value;
195
196 return tclass;
197}
198
199static u32 unmap_perm(u16 tclass, u32 tperm)
200{
201 if (tclass < current_mapping_size) {
202 unsigned i;
203 u32 kperm = 0;
204
205 for (i = 0; i < current_mapping[tclass].num_perms; i++)
206 if (tperm & (1<<i)) {
207 kperm |= current_mapping[tclass].perms[i];
208 tperm &= ~(1<<i);
209 }
210 return kperm;
211 }
212
213 return tperm;
214}
215
216static void map_decision(u16 tclass, struct av_decision *avd,
217 int allow_unknown)
218{
219 if (tclass < current_mapping_size) {
220 unsigned i, n = current_mapping[tclass].num_perms;
221 u32 result;
222
223 for (i = 0, result = 0; i < n; i++) {
224 if (avd->allowed & current_mapping[tclass].perms[i])
225 result |= 1<<i;
226 if (allow_unknown && !current_mapping[tclass].perms[i])
227 result |= 1<<i;
228 }
229 avd->allowed = result;
230
231 for (i = 0, result = 0; i < n; i++)
232 if (avd->auditallow & current_mapping[tclass].perms[i])
233 result |= 1<<i;
234 avd->auditallow = result;
235
236 for (i = 0, result = 0; i < n; i++) {
237 if (avd->auditdeny & current_mapping[tclass].perms[i])
238 result |= 1<<i;
239 if (!allow_unknown && !current_mapping[tclass].perms[i])
240 result |= 1<<i;
241 }
242 /*
243 * In case the kernel has a bug and requests a permission
244 * between num_perms and the maximum permission number, we
245 * should audit that denial
246 */
247 for (; i < (sizeof(u32)*8); i++)
248 result |= 1<<i;
249 avd->auditdeny = result;
250 }
251}
252
253
101/* 254/*
102 * Return the boolean value of a constraint expression 255 * Return the boolean value of a constraint expression
103 * when it is applied to the specified source and target 256 * when it is applied to the specified source and target
@@ -467,21 +620,9 @@ static int context_struct_compute_av(struct context *scontext,
467 struct class_datum *tclass_datum; 620 struct class_datum *tclass_datum;
468 struct ebitmap *sattr, *tattr; 621 struct ebitmap *sattr, *tattr;
469 struct ebitmap_node *snode, *tnode; 622 struct ebitmap_node *snode, *tnode;
470 const struct selinux_class_perm *kdefs = &selinux_class_perm;
471 unsigned int i, j; 623 unsigned int i, j;
472 624
473 /* 625 /*
474 * Remap extended Netlink classes for old policy versions.
475 * Do this here rather than socket_type_to_security_class()
476 * in case a newer policy version is loaded, allowing sockets
477 * to remain in the correct class.
478 */
479 if (policydb_loaded_version < POLICYDB_VERSION_NLCLASS)
480 if (tclass >= SECCLASS_NETLINK_ROUTE_SOCKET &&
481 tclass <= SECCLASS_NETLINK_DNRT_SOCKET)
482 tclass = SECCLASS_NETLINK_SOCKET;
483
484 /*
485 * Initialize the access vectors to the default values. 626 * Initialize the access vectors to the default values.
486 */ 627 */
487 avd->allowed = 0; 628 avd->allowed = 0;
@@ -490,33 +631,11 @@ static int context_struct_compute_av(struct context *scontext,
490 avd->seqno = latest_granting; 631 avd->seqno = latest_granting;
491 avd->flags = 0; 632 avd->flags = 0;
492 633
493 /* 634 if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
494 * Check for all the invalid cases. 635 if (printk_ratelimit())
495 * - tclass 0 636 printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
496 * - tclass > policy and > kernel 637 return -EINVAL;
497 * - tclass > policy but is a userspace class 638 }
498 * - tclass > policy but we do not allow unknowns
499 */
500 if (unlikely(!tclass))
501 goto inval_class;
502 if (unlikely(tclass > policydb.p_classes.nprim))
503 if (tclass > kdefs->cts_len ||
504 !kdefs->class_to_string[tclass] ||
505 !policydb.allow_unknown)
506 goto inval_class;
507
508 /*
509 * Kernel class and we allow unknown so pad the allow decision
510 * the pad will be all 1 for unknown classes.
511 */
512 if (tclass <= kdefs->cts_len && policydb.allow_unknown)
513 avd->allowed = policydb.undefined_perms[tclass - 1];
514
515 /*
516 * Not in policy. Since decision is completed (all 1 or all 0) return.
517 */
518 if (unlikely(tclass > policydb.p_classes.nprim))
519 return 0;
520 639
521 tclass_datum = policydb.class_val_to_struct[tclass - 1]; 640 tclass_datum = policydb.class_val_to_struct[tclass - 1];
522 641
@@ -568,8 +687,8 @@ static int context_struct_compute_av(struct context *scontext,
568 * role is changing, then check the (current_role, new_role) 687 * role is changing, then check the (current_role, new_role)
569 * pair. 688 * pair.
570 */ 689 */
571 if (tclass == SECCLASS_PROCESS && 690 if (tclass == policydb.process_class &&
572 (avd->allowed & (PROCESS__TRANSITION | PROCESS__DYNTRANSITION)) && 691 (avd->allowed & policydb.process_trans_perms) &&
573 scontext->role != tcontext->role) { 692 scontext->role != tcontext->role) {
574 for (ra = policydb.role_allow; ra; ra = ra->next) { 693 for (ra = policydb.role_allow; ra; ra = ra->next) {
575 if (scontext->role == ra->role && 694 if (scontext->role == ra->role &&
@@ -577,8 +696,7 @@ static int context_struct_compute_av(struct context *scontext,
577 break; 696 break;
578 } 697 }
579 if (!ra) 698 if (!ra)
580 avd->allowed &= ~(PROCESS__TRANSITION | 699 avd->allowed &= ~policydb.process_trans_perms;
581 PROCESS__DYNTRANSITION);
582 } 700 }
583 701
584 /* 702 /*
@@ -590,21 +708,6 @@ static int context_struct_compute_av(struct context *scontext,
590 tclass, requested, avd); 708 tclass, requested, avd);
591 709
592 return 0; 710 return 0;
593
594inval_class:
595 if (!tclass || tclass > kdefs->cts_len ||
596 !kdefs->class_to_string[tclass]) {
597 if (printk_ratelimit())
598 printk(KERN_ERR "SELinux: %s: unrecognized class %d\n",
599 __func__, tclass);
600 return -EINVAL;
601 }
602
603 /*
604 * Known to the kernel, but not to the policy.
605 * Handle as a denial (allowed is 0).
606 */
607 return 0;
608} 711}
609 712
610static int security_validtrans_handle_fail(struct context *ocontext, 713static int security_validtrans_handle_fail(struct context *ocontext,
@@ -636,13 +739,14 @@ out:
636} 739}
637 740
638int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, 741int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
639 u16 tclass) 742 u16 orig_tclass)
640{ 743{
641 struct context *ocontext; 744 struct context *ocontext;
642 struct context *ncontext; 745 struct context *ncontext;
643 struct context *tcontext; 746 struct context *tcontext;
644 struct class_datum *tclass_datum; 747 struct class_datum *tclass_datum;
645 struct constraint_node *constraint; 748 struct constraint_node *constraint;
749 u16 tclass;
646 int rc = 0; 750 int rc = 0;
647 751
648 if (!ss_initialized) 752 if (!ss_initialized)
@@ -650,16 +754,7 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
650 754
651 read_lock(&policy_rwlock); 755 read_lock(&policy_rwlock);
652 756
653 /* 757 tclass = unmap_class(orig_tclass);
654 * Remap extended Netlink classes for old policy versions.
655 * Do this here rather than socket_type_to_security_class()
656 * in case a newer policy version is loaded, allowing sockets
657 * to remain in the correct class.
658 */
659 if (policydb_loaded_version < POLICYDB_VERSION_NLCLASS)
660 if (tclass >= SECCLASS_NETLINK_ROUTE_SOCKET &&
661 tclass <= SECCLASS_NETLINK_DNRT_SOCKET)
662 tclass = SECCLASS_NETLINK_SOCKET;
663 758
664 if (!tclass || tclass > policydb.p_classes.nprim) { 759 if (!tclass || tclass > policydb.p_classes.nprim) {
665 printk(KERN_ERR "SELinux: %s: unrecognized class %d\n", 760 printk(KERN_ERR "SELinux: %s: unrecognized class %d\n",
@@ -792,6 +887,38 @@ out:
792} 887}
793 888
794 889
890static int security_compute_av_core(u32 ssid,
891 u32 tsid,
892 u16 tclass,
893 u32 requested,
894 struct av_decision *avd)
895{
896 struct context *scontext = NULL, *tcontext = NULL;
897 int rc = 0;
898
899 scontext = sidtab_search(&sidtab, ssid);
900 if (!scontext) {
901 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
902 __func__, ssid);
903 return -EINVAL;
904 }
905 tcontext = sidtab_search(&sidtab, tsid);
906 if (!tcontext) {
907 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
908 __func__, tsid);
909 return -EINVAL;
910 }
911
912 rc = context_struct_compute_av(scontext, tcontext, tclass,
913 requested, avd);
914
915 /* permissive domain? */
916 if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
917 avd->flags |= AVD_FLAGS_PERMISSIVE;
918
919 return rc;
920}
921
795/** 922/**
796 * security_compute_av - Compute access vector decisions. 923 * security_compute_av - Compute access vector decisions.
797 * @ssid: source security identifier 924 * @ssid: source security identifier
@@ -807,12 +934,49 @@ out:
807 */ 934 */
808int security_compute_av(u32 ssid, 935int security_compute_av(u32 ssid,
809 u32 tsid, 936 u32 tsid,
810 u16 tclass, 937 u16 orig_tclass,
811 u32 requested, 938 u32 orig_requested,
812 struct av_decision *avd) 939 struct av_decision *avd)
813{ 940{
814 struct context *scontext = NULL, *tcontext = NULL; 941 u16 tclass;
815 int rc = 0; 942 u32 requested;
943 int rc;
944
945 read_lock(&policy_rwlock);
946
947 if (!ss_initialized)
948 goto allow;
949
950 requested = unmap_perm(orig_tclass, orig_requested);
951 tclass = unmap_class(orig_tclass);
952 if (unlikely(orig_tclass && !tclass)) {
953 if (policydb.allow_unknown)
954 goto allow;
955 rc = -EINVAL;
956 goto out;
957 }
958 rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
959 map_decision(orig_tclass, avd, policydb.allow_unknown);
960out:
961 read_unlock(&policy_rwlock);
962 return rc;
963allow:
964 avd->allowed = 0xffffffff;
965 avd->auditallow = 0;
966 avd->auditdeny = 0xffffffff;
967 avd->seqno = latest_granting;
968 avd->flags = 0;
969 rc = 0;
970 goto out;
971}
972
973int security_compute_av_user(u32 ssid,
974 u32 tsid,
975 u16 tclass,
976 u32 requested,
977 struct av_decision *avd)
978{
979 int rc;
816 980
817 if (!ss_initialized) { 981 if (!ss_initialized) {
818 avd->allowed = 0xffffffff; 982 avd->allowed = 0xffffffff;
@@ -823,29 +987,7 @@ int security_compute_av(u32 ssid,
823 } 987 }
824 988
825 read_lock(&policy_rwlock); 989 read_lock(&policy_rwlock);
826 990 rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
827 scontext = sidtab_search(&sidtab, ssid);
828 if (!scontext) {
829 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
830 __func__, ssid);
831 rc = -EINVAL;
832 goto out;
833 }
834 tcontext = sidtab_search(&sidtab, tsid);
835 if (!tcontext) {
836 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
837 __func__, tsid);
838 rc = -EINVAL;
839 goto out;
840 }
841
842 rc = context_struct_compute_av(scontext, tcontext, tclass,
843 requested, avd);
844
845 /* permissive domain? */
846 if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
847 avd->flags |= AVD_FLAGS_PERMISSIVE;
848out:
849 read_unlock(&policy_rwlock); 991 read_unlock(&policy_rwlock);
850 return rc; 992 return rc;
851} 993}
@@ -1204,20 +1346,22 @@ out:
1204 1346
1205static int security_compute_sid(u32 ssid, 1347static int security_compute_sid(u32 ssid,
1206 u32 tsid, 1348 u32 tsid,
1207 u16 tclass, 1349 u16 orig_tclass,
1208 u32 specified, 1350 u32 specified,
1209 u32 *out_sid) 1351 u32 *out_sid,
1352 bool kern)
1210{ 1353{
1211 struct context *scontext = NULL, *tcontext = NULL, newcontext; 1354 struct context *scontext = NULL, *tcontext = NULL, newcontext;
1212 struct role_trans *roletr = NULL; 1355 struct role_trans *roletr = NULL;
1213 struct avtab_key avkey; 1356 struct avtab_key avkey;
1214 struct avtab_datum *avdatum; 1357 struct avtab_datum *avdatum;
1215 struct avtab_node *node; 1358 struct avtab_node *node;
1359 u16 tclass;
1216 int rc = 0; 1360 int rc = 0;
1217 1361
1218 if (!ss_initialized) { 1362 if (!ss_initialized) {
1219 switch (tclass) { 1363 switch (orig_tclass) {
1220 case SECCLASS_PROCESS: 1364 case SECCLASS_PROCESS: /* kernel value */
1221 *out_sid = ssid; 1365 *out_sid = ssid;
1222 break; 1366 break;
1223 default: 1367 default:
@@ -1231,6 +1375,11 @@ static int security_compute_sid(u32 ssid,
1231 1375
1232 read_lock(&policy_rwlock); 1376 read_lock(&policy_rwlock);
1233 1377
1378 if (kern)
1379 tclass = unmap_class(orig_tclass);
1380 else
1381 tclass = orig_tclass;
1382
1234 scontext = sidtab_search(&sidtab, ssid); 1383 scontext = sidtab_search(&sidtab, ssid);
1235 if (!scontext) { 1384 if (!scontext) {
1236 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", 1385 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
@@ -1260,13 +1409,11 @@ static int security_compute_sid(u32 ssid,
1260 } 1409 }
1261 1410
1262 /* Set the role and type to default values. */ 1411 /* Set the role and type to default values. */
1263 switch (tclass) { 1412 if (tclass == policydb.process_class) {
1264 case SECCLASS_PROCESS:
1265 /* Use the current role and type of process. */ 1413 /* Use the current role and type of process. */
1266 newcontext.role = scontext->role; 1414 newcontext.role = scontext->role;
1267 newcontext.type = scontext->type; 1415 newcontext.type = scontext->type;
1268 break; 1416 } else {
1269 default:
1270 /* Use the well-defined object role. */ 1417 /* Use the well-defined object role. */
1271 newcontext.role = OBJECT_R_VAL; 1418 newcontext.role = OBJECT_R_VAL;
1272 /* Use the type of the related object. */ 1419 /* Use the type of the related object. */
@@ -1297,8 +1444,7 @@ static int security_compute_sid(u32 ssid,
1297 } 1444 }
1298 1445
1299 /* Check for class-specific changes. */ 1446 /* Check for class-specific changes. */
1300 switch (tclass) { 1447 if (tclass == policydb.process_class) {
1301 case SECCLASS_PROCESS:
1302 if (specified & AVTAB_TRANSITION) { 1448 if (specified & AVTAB_TRANSITION) {
1303 /* Look for a role transition rule. */ 1449 /* Look for a role transition rule. */
1304 for (roletr = policydb.role_tr; roletr; 1450 for (roletr = policydb.role_tr; roletr;
@@ -1311,9 +1457,6 @@ static int security_compute_sid(u32 ssid,
1311 } 1457 }
1312 } 1458 }
1313 } 1459 }
1314 break;
1315 default:
1316 break;
1317 } 1460 }
1318 1461
1319 /* Set the MLS attributes. 1462 /* Set the MLS attributes.
@@ -1358,7 +1501,17 @@ int security_transition_sid(u32 ssid,
1358 u16 tclass, 1501 u16 tclass,
1359 u32 *out_sid) 1502 u32 *out_sid)
1360{ 1503{
1361 return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, out_sid); 1504 return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
1505 out_sid, true);
1506}
1507
1508int security_transition_sid_user(u32 ssid,
1509 u32 tsid,
1510 u16 tclass,
1511 u32 *out_sid)
1512{
1513 return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
1514 out_sid, false);
1362} 1515}
1363 1516
1364/** 1517/**
@@ -1379,7 +1532,8 @@ int security_member_sid(u32 ssid,
1379 u16 tclass, 1532 u16 tclass,
1380 u32 *out_sid) 1533 u32 *out_sid)
1381{ 1534{
1382 return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid); 1535 return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid,
1536 false);
1383} 1537}
1384 1538
1385/** 1539/**
@@ -1400,144 +1554,8 @@ int security_change_sid(u32 ssid,
1400 u16 tclass, 1554 u16 tclass,
1401 u32 *out_sid) 1555 u32 *out_sid)
1402{ 1556{
1403 return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid); 1557 return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid,
1404} 1558 false);
1405
1406/*
1407 * Verify that each kernel class that is defined in the
1408 * policy is correct
1409 */
1410static int validate_classes(struct policydb *p)
1411{
1412 int i, j;
1413 struct class_datum *cladatum;
1414 struct perm_datum *perdatum;
1415 u32 nprim, tmp, common_pts_len, perm_val, pol_val;
1416 u16 class_val;
1417 const struct selinux_class_perm *kdefs = &selinux_class_perm;
1418 const char *def_class, *def_perm, *pol_class;
1419 struct symtab *perms;
1420 bool print_unknown_handle = 0;
1421
1422 if (p->allow_unknown) {
1423 u32 num_classes = kdefs->cts_len;
1424 p->undefined_perms = kcalloc(num_classes, sizeof(u32), GFP_KERNEL);
1425 if (!p->undefined_perms)
1426 return -ENOMEM;
1427 }
1428
1429 for (i = 1; i < kdefs->cts_len; i++) {
1430 def_class = kdefs->class_to_string[i];
1431 if (!def_class)
1432 continue;
1433 if (i > p->p_classes.nprim) {
1434 printk(KERN_INFO
1435 "SELinux: class %s not defined in policy\n",
1436 def_class);
1437 if (p->reject_unknown)
1438 return -EINVAL;
1439 if (p->allow_unknown)
1440 p->undefined_perms[i-1] = ~0U;
1441 print_unknown_handle = 1;
1442 continue;
1443 }
1444 pol_class = p->p_class_val_to_name[i-1];
1445 if (strcmp(pol_class, def_class)) {
1446 printk(KERN_ERR
1447 "SELinux: class %d is incorrect, found %s but should be %s\n",
1448 i, pol_class, def_class);
1449 return -EINVAL;
1450 }
1451 }
1452 for (i = 0; i < kdefs->av_pts_len; i++) {
1453 class_val = kdefs->av_perm_to_string[i].tclass;
1454 perm_val = kdefs->av_perm_to_string[i].value;
1455 def_perm = kdefs->av_perm_to_string[i].name;
1456 if (class_val > p->p_classes.nprim)
1457 continue;
1458 pol_class = p->p_class_val_to_name[class_val-1];
1459 cladatum = hashtab_search(p->p_classes.table, pol_class);
1460 BUG_ON(!cladatum);
1461 perms = &cladatum->permissions;
1462 nprim = 1 << (perms->nprim - 1);
1463 if (perm_val > nprim) {
1464 printk(KERN_INFO
1465 "SELinux: permission %s in class %s not defined in policy\n",
1466 def_perm, pol_class);
1467 if (p->reject_unknown)
1468 return -EINVAL;
1469 if (p->allow_unknown)
1470 p->undefined_perms[class_val-1] |= perm_val;
1471 print_unknown_handle = 1;
1472 continue;
1473 }
1474 perdatum = hashtab_search(perms->table, def_perm);
1475 if (perdatum == NULL) {
1476 printk(KERN_ERR
1477 "SELinux: permission %s in class %s not found in policy, bad policy\n",
1478 def_perm, pol_class);
1479 return -EINVAL;
1480 }
1481 pol_val = 1 << (perdatum->value - 1);
1482 if (pol_val != perm_val) {
1483 printk(KERN_ERR
1484 "SELinux: permission %s in class %s has incorrect value\n",
1485 def_perm, pol_class);
1486 return -EINVAL;
1487 }
1488 }
1489 for (i = 0; i < kdefs->av_inherit_len; i++) {
1490 class_val = kdefs->av_inherit[i].tclass;
1491 if (class_val > p->p_classes.nprim)
1492 continue;
1493 pol_class = p->p_class_val_to_name[class_val-1];
1494 cladatum = hashtab_search(p->p_classes.table, pol_class);
1495 BUG_ON(!cladatum);
1496 if (!cladatum->comdatum) {
1497 printk(KERN_ERR
1498 "SELinux: class %s should have an inherits clause but does not\n",
1499 pol_class);
1500 return -EINVAL;
1501 }
1502 tmp = kdefs->av_inherit[i].common_base;
1503 common_pts_len = 0;
1504 while (!(tmp & 0x01)) {
1505 common_pts_len++;
1506 tmp >>= 1;
1507 }
1508 perms = &cladatum->comdatum->permissions;
1509 for (j = 0; j < common_pts_len; j++) {
1510 def_perm = kdefs->av_inherit[i].common_pts[j];
1511 if (j >= perms->nprim) {
1512 printk(KERN_INFO
1513 "SELinux: permission %s in class %s not defined in policy\n",
1514 def_perm, pol_class);
1515 if (p->reject_unknown)
1516 return -EINVAL;
1517 if (p->allow_unknown)
1518 p->undefined_perms[class_val-1] |= (1 << j);
1519 print_unknown_handle = 1;
1520 continue;
1521 }
1522 perdatum = hashtab_search(perms->table, def_perm);
1523 if (perdatum == NULL) {
1524 printk(KERN_ERR
1525 "SELinux: permission %s in class %s not found in policy, bad policy\n",
1526 def_perm, pol_class);
1527 return -EINVAL;
1528 }
1529 if (perdatum->value != j + 1) {
1530 printk(KERN_ERR
1531 "SELinux: permission %s in class %s has incorrect value\n",
1532 def_perm, pol_class);
1533 return -EINVAL;
1534 }
1535 }
1536 }
1537 if (print_unknown_handle)
1538 printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n",
1539 (security_get_allow_unknown() ? "allowed" : "denied"));
1540 return 0;
1541} 1559}
1542 1560
1543/* Clone the SID into the new SID table. */ 1561/* Clone the SID into the new SID table. */
@@ -1710,8 +1728,10 @@ int security_load_policy(void *data, size_t len)
1710{ 1728{
1711 struct policydb oldpolicydb, newpolicydb; 1729 struct policydb oldpolicydb, newpolicydb;
1712 struct sidtab oldsidtab, newsidtab; 1730 struct sidtab oldsidtab, newsidtab;
1731 struct selinux_mapping *oldmap, *map = NULL;
1713 struct convert_context_args args; 1732 struct convert_context_args args;
1714 u32 seqno; 1733 u32 seqno;
1734 u16 map_size;
1715 int rc = 0; 1735 int rc = 0;
1716 struct policy_file file = { data, len }, *fp = &file; 1736 struct policy_file file = { data, len }, *fp = &file;
1717 1737
@@ -1721,22 +1741,19 @@ int security_load_policy(void *data, size_t len)
1721 avtab_cache_destroy(); 1741 avtab_cache_destroy();
1722 return -EINVAL; 1742 return -EINVAL;
1723 } 1743 }
1724 if (policydb_load_isids(&policydb, &sidtab)) { 1744 if (selinux_set_mapping(&policydb, secclass_map,
1745 &current_mapping,
1746 &current_mapping_size)) {
1725 policydb_destroy(&policydb); 1747 policydb_destroy(&policydb);
1726 avtab_cache_destroy(); 1748 avtab_cache_destroy();
1727 return -EINVAL; 1749 return -EINVAL;
1728 } 1750 }
1729 /* Verify that the kernel defined classes are correct. */ 1751 if (policydb_load_isids(&policydb, &sidtab)) {
1730 if (validate_classes(&policydb)) {
1731 printk(KERN_ERR
1732 "SELinux: the definition of a class is incorrect\n");
1733 sidtab_destroy(&sidtab);
1734 policydb_destroy(&policydb); 1752 policydb_destroy(&policydb);
1735 avtab_cache_destroy(); 1753 avtab_cache_destroy();
1736 return -EINVAL; 1754 return -EINVAL;
1737 } 1755 }
1738 security_load_policycaps(); 1756 security_load_policycaps();
1739 policydb_loaded_version = policydb.policyvers;
1740 ss_initialized = 1; 1757 ss_initialized = 1;
1741 seqno = ++latest_granting; 1758 seqno = ++latest_granting;
1742 selinux_complete_init(); 1759 selinux_complete_init();
@@ -1759,13 +1776,9 @@ int security_load_policy(void *data, size_t len)
1759 return -ENOMEM; 1776 return -ENOMEM;
1760 } 1777 }
1761 1778
1762 /* Verify that the kernel defined classes are correct. */ 1779 if (selinux_set_mapping(&newpolicydb, secclass_map,
1763 if (validate_classes(&newpolicydb)) { 1780 &map, &map_size))
1764 printk(KERN_ERR
1765 "SELinux: the definition of a class is incorrect\n");
1766 rc = -EINVAL;
1767 goto err; 1781 goto err;
1768 }
1769 1782
1770 rc = security_preserve_bools(&newpolicydb); 1783 rc = security_preserve_bools(&newpolicydb);
1771 if (rc) { 1784 if (rc) {
@@ -1799,13 +1812,16 @@ int security_load_policy(void *data, size_t len)
1799 memcpy(&policydb, &newpolicydb, sizeof policydb); 1812 memcpy(&policydb, &newpolicydb, sizeof policydb);
1800 sidtab_set(&sidtab, &newsidtab); 1813 sidtab_set(&sidtab, &newsidtab);
1801 security_load_policycaps(); 1814 security_load_policycaps();
1815 oldmap = current_mapping;
1816 current_mapping = map;
1817 current_mapping_size = map_size;
1802 seqno = ++latest_granting; 1818 seqno = ++latest_granting;
1803 policydb_loaded_version = policydb.policyvers;
1804 write_unlock_irq(&policy_rwlock); 1819 write_unlock_irq(&policy_rwlock);
1805 1820
1806 /* Free the old policydb and SID table. */ 1821 /* Free the old policydb and SID table. */
1807 policydb_destroy(&oldpolicydb); 1822 policydb_destroy(&oldpolicydb);
1808 sidtab_destroy(&oldsidtab); 1823 sidtab_destroy(&oldsidtab);
1824 kfree(oldmap);
1809 1825
1810 avc_ss_reset(seqno); 1826 avc_ss_reset(seqno);
1811 selnl_notify_policyload(seqno); 1827 selnl_notify_policyload(seqno);
@@ -1815,6 +1831,7 @@ int security_load_policy(void *data, size_t len)
1815 return 0; 1831 return 0;
1816 1832
1817err: 1833err:
1834 kfree(map);
1818 sidtab_destroy(&newsidtab); 1835 sidtab_destroy(&newsidtab);
1819 policydb_destroy(&newpolicydb); 1836 policydb_destroy(&newpolicydb);
1820 return rc; 1837 return rc;
@@ -2091,7 +2108,7 @@ out_unlock:
2091 } 2108 }
2092 for (i = 0, j = 0; i < mynel; i++) { 2109 for (i = 0, j = 0; i < mynel; i++) {
2093 rc = avc_has_perm_noaudit(fromsid, mysids[i], 2110 rc = avc_has_perm_noaudit(fromsid, mysids[i],
2094 SECCLASS_PROCESS, 2111 SECCLASS_PROCESS, /* kernel value */
2095 PROCESS__TRANSITION, AVC_STRICT, 2112 PROCESS__TRANSITION, AVC_STRICT,
2096 NULL); 2113 NULL);
2097 if (!rc) 2114 if (!rc)
@@ -2119,10 +2136,11 @@ out:
2119 */ 2136 */
2120int security_genfs_sid(const char *fstype, 2137int security_genfs_sid(const char *fstype,
2121 char *path, 2138 char *path,
2122 u16 sclass, 2139 u16 orig_sclass,
2123 u32 *sid) 2140 u32 *sid)
2124{ 2141{
2125 int len; 2142 int len;
2143 u16 sclass;
2126 struct genfs *genfs; 2144 struct genfs *genfs;
2127 struct ocontext *c; 2145 struct ocontext *c;
2128 int rc = 0, cmp = 0; 2146 int rc = 0, cmp = 0;
@@ -2132,6 +2150,8 @@ int security_genfs_sid(const char *fstype,
2132 2150
2133 read_lock(&policy_rwlock); 2151 read_lock(&policy_rwlock);
2134 2152
2153 sclass = unmap_class(orig_sclass);
2154
2135 for (genfs = policydb.genfs; genfs; genfs = genfs->next) { 2155 for (genfs = policydb.genfs; genfs; genfs = genfs->next) {
2136 cmp = strcmp(fstype, genfs->fstype); 2156 cmp = strcmp(fstype, genfs->fstype);
2137 if (cmp <= 0) 2157 if (cmp <= 0)
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 3c8bd8ee0b95..e0d0354008b7 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -187,6 +187,8 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
187 const s8 pattern_type, const s8 end_type, 187 const s8 pattern_type, const s8 end_type,
188 const char *function) 188 const char *function)
189{ 189{
190 const char *const start = filename;
191 bool in_repetition = false;
190 bool contains_pattern = false; 192 bool contains_pattern = false;
191 unsigned char c; 193 unsigned char c;
192 unsigned char d; 194 unsigned char d;
@@ -212,9 +214,13 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
212 if (c == '/') 214 if (c == '/')
213 goto out; 215 goto out;
214 } 216 }
215 while ((c = *filename++) != '\0') { 217 while (1) {
218 c = *filename++;
219 if (!c)
220 break;
216 if (c == '\\') { 221 if (c == '\\') {
217 switch ((c = *filename++)) { 222 c = *filename++;
223 switch (c) {
218 case '\\': /* "\\" */ 224 case '\\': /* "\\" */
219 continue; 225 continue;
220 case '$': /* "\$" */ 226 case '$': /* "\$" */
@@ -231,6 +237,22 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
231 break; /* Must not contain pattern */ 237 break; /* Must not contain pattern */
232 contains_pattern = true; 238 contains_pattern = true;
233 continue; 239 continue;
240 case '{': /* "/\{" */
241 if (filename - 3 < start ||
242 *(filename - 3) != '/')
243 break;
244 if (pattern_type == -1)
245 break; /* Must not contain pattern */
246 contains_pattern = true;
247 in_repetition = true;
248 continue;
249 case '}': /* "\}/" */
250 if (*filename != '/')
251 break;
252 if (!in_repetition)
253 break;
254 in_repetition = false;
255 continue;
234 case '0': /* "\ooo" */ 256 case '0': /* "\ooo" */
235 case '1': 257 case '1':
236 case '2': 258 case '2':
@@ -246,6 +268,8 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
246 continue; /* pattern is not \000 */ 268 continue; /* pattern is not \000 */
247 } 269 }
248 goto out; 270 goto out;
271 } else if (in_repetition && c == '/') {
272 goto out;
249 } else if (tomoyo_is_invalid(c)) { 273 } else if (tomoyo_is_invalid(c)) {
250 goto out; 274 goto out;
251 } 275 }
@@ -254,6 +278,8 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
254 if (!contains_pattern) 278 if (!contains_pattern)
255 goto out; 279 goto out;
256 } 280 }
281 if (in_repetition)
282 goto out;
257 return true; 283 return true;
258 out: 284 out:
259 printk(KERN_DEBUG "%s: Invalid pathname '%s'\n", function, 285 printk(KERN_DEBUG "%s: Invalid pathname '%s'\n", function,
@@ -360,33 +386,6 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
360} 386}
361 387
362/** 388/**
363 * tomoyo_path_depth - Evaluate the number of '/' in a string.
364 *
365 * @pathname: The string to evaluate.
366 *
367 * Returns path depth of the string.
368 *
369 * I score 2 for each of the '/' in the @pathname
370 * and score 1 if the @pathname ends with '/'.
371 */
372static int tomoyo_path_depth(const char *pathname)
373{
374 int i = 0;
375
376 if (pathname) {
377 const char *ep = pathname + strlen(pathname);
378 if (pathname < ep--) {
379 if (*ep != '/')
380 i++;
381 while (pathname <= ep)
382 if (*ep-- == '/')
383 i += 2;
384 }
385 }
386 return i;
387}
388
389/**
390 * tomoyo_const_part_length - Evaluate the initial length without a pattern in a token. 389 * tomoyo_const_part_length - Evaluate the initial length without a pattern in a token.
391 * 390 *
392 * @filename: The string to evaluate. 391 * @filename: The string to evaluate.
@@ -444,11 +443,10 @@ void tomoyo_fill_path_info(struct tomoyo_path_info *ptr)
444 ptr->is_dir = len && (name[len - 1] == '/'); 443 ptr->is_dir = len && (name[len - 1] == '/');
445 ptr->is_patterned = (ptr->const_len < len); 444 ptr->is_patterned = (ptr->const_len < len);
446 ptr->hash = full_name_hash(name, len); 445 ptr->hash = full_name_hash(name, len);
447 ptr->depth = tomoyo_path_depth(name);
448} 446}
449 447
450/** 448/**
451 * tomoyo_file_matches_to_pattern2 - Pattern matching without '/' character 449 * tomoyo_file_matches_pattern2 - Pattern matching without '/' character
452 * and "\-" pattern. 450 * and "\-" pattern.
453 * 451 *
454 * @filename: The start of string to check. 452 * @filename: The start of string to check.
@@ -458,10 +456,10 @@ void tomoyo_fill_path_info(struct tomoyo_path_info *ptr)
458 * 456 *
459 * Returns true if @filename matches @pattern, false otherwise. 457 * Returns true if @filename matches @pattern, false otherwise.
460 */ 458 */
461static bool tomoyo_file_matches_to_pattern2(const char *filename, 459static bool tomoyo_file_matches_pattern2(const char *filename,
462 const char *filename_end, 460 const char *filename_end,
463 const char *pattern, 461 const char *pattern,
464 const char *pattern_end) 462 const char *pattern_end)
465{ 463{
466 while (filename < filename_end && pattern < pattern_end) { 464 while (filename < filename_end && pattern < pattern_end) {
467 char c; 465 char c;
@@ -519,7 +517,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename,
519 case '*': 517 case '*':
520 case '@': 518 case '@':
521 for (i = 0; i <= filename_end - filename; i++) { 519 for (i = 0; i <= filename_end - filename; i++) {
522 if (tomoyo_file_matches_to_pattern2( 520 if (tomoyo_file_matches_pattern2(
523 filename + i, filename_end, 521 filename + i, filename_end,
524 pattern + 1, pattern_end)) 522 pattern + 1, pattern_end))
525 return true; 523 return true;
@@ -550,7 +548,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename,
550 j++; 548 j++;
551 } 549 }
552 for (i = 1; i <= j; i++) { 550 for (i = 1; i <= j; i++) {
553 if (tomoyo_file_matches_to_pattern2( 551 if (tomoyo_file_matches_pattern2(
554 filename + i, filename_end, 552 filename + i, filename_end,
555 pattern + 1, pattern_end)) 553 pattern + 1, pattern_end))
556 return true; 554 return true;
@@ -567,7 +565,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename,
567} 565}
568 566
569/** 567/**
570 * tomoyo_file_matches_to_pattern - Pattern matching without without '/' character. 568 * tomoyo_file_matches_pattern - Pattern matching without without '/' character.
571 * 569 *
572 * @filename: The start of string to check. 570 * @filename: The start of string to check.
573 * @filename_end: The end of string to check. 571 * @filename_end: The end of string to check.
@@ -576,7 +574,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename,
576 * 574 *
577 * Returns true if @filename matches @pattern, false otherwise. 575 * Returns true if @filename matches @pattern, false otherwise.
578 */ 576 */
579static bool tomoyo_file_matches_to_pattern(const char *filename, 577static bool tomoyo_file_matches_pattern(const char *filename,
580 const char *filename_end, 578 const char *filename_end,
581 const char *pattern, 579 const char *pattern,
582 const char *pattern_end) 580 const char *pattern_end)
@@ -589,10 +587,10 @@ static bool tomoyo_file_matches_to_pattern(const char *filename,
589 /* Split at "\-" pattern. */ 587 /* Split at "\-" pattern. */
590 if (*pattern++ != '\\' || *pattern++ != '-') 588 if (*pattern++ != '\\' || *pattern++ != '-')
591 continue; 589 continue;
592 result = tomoyo_file_matches_to_pattern2(filename, 590 result = tomoyo_file_matches_pattern2(filename,
593 filename_end, 591 filename_end,
594 pattern_start, 592 pattern_start,
595 pattern - 2); 593 pattern - 2);
596 if (first) 594 if (first)
597 result = !result; 595 result = !result;
598 if (result) 596 if (result)
@@ -600,13 +598,79 @@ static bool tomoyo_file_matches_to_pattern(const char *filename,
600 first = false; 598 first = false;
601 pattern_start = pattern; 599 pattern_start = pattern;
602 } 600 }
603 result = tomoyo_file_matches_to_pattern2(filename, filename_end, 601 result = tomoyo_file_matches_pattern2(filename, filename_end,
604 pattern_start, pattern_end); 602 pattern_start, pattern_end);
605 return first ? result : !result; 603 return first ? result : !result;
606} 604}
607 605
608/** 606/**
607 * tomoyo_path_matches_pattern2 - Do pathname pattern matching.
608 *
609 * @f: The start of string to check.
610 * @p: The start of pattern to compare.
611 *
612 * Returns true if @f matches @p, false otherwise.
613 */
614static bool tomoyo_path_matches_pattern2(const char *f, const char *p)
615{
616 const char *f_delimiter;
617 const char *p_delimiter;
618
619 while (*f && *p) {
620 f_delimiter = strchr(f, '/');
621 if (!f_delimiter)
622 f_delimiter = f + strlen(f);
623 p_delimiter = strchr(p, '/');
624 if (!p_delimiter)
625 p_delimiter = p + strlen(p);
626 if (*p == '\\' && *(p + 1) == '{')
627 goto recursive;
628 if (!tomoyo_file_matches_pattern(f, f_delimiter, p,
629 p_delimiter))
630 return false;
631 f = f_delimiter;
632 if (*f)
633 f++;
634 p = p_delimiter;
635 if (*p)
636 p++;
637 }
638 /* Ignore trailing "\*" and "\@" in @pattern. */
639 while (*p == '\\' &&
640 (*(p + 1) == '*' || *(p + 1) == '@'))
641 p += 2;
642 return !*f && !*p;
643 recursive:
644 /*
645 * The "\{" pattern is permitted only after '/' character.
646 * This guarantees that below "*(p - 1)" is safe.
647 * Also, the "\}" pattern is permitted only before '/' character
648 * so that "\{" + "\}" pair will not break the "\-" operator.
649 */
650 if (*(p - 1) != '/' || p_delimiter <= p + 3 || *p_delimiter != '/' ||
651 *(p_delimiter - 1) != '}' || *(p_delimiter - 2) != '\\')
652 return false; /* Bad pattern. */
653 do {
654 /* Compare current component with pattern. */
655 if (!tomoyo_file_matches_pattern(f, f_delimiter, p + 2,
656 p_delimiter - 2))
657 break;
658 /* Proceed to next component. */
659 f = f_delimiter;
660 if (!*f)
661 break;
662 f++;
663 /* Continue comparison. */
664 if (tomoyo_path_matches_pattern2(f, p_delimiter + 1))
665 return true;
666 f_delimiter = strchr(f, '/');
667 } while (f_delimiter);
668 return false; /* Not matched. */
669}
670
671/**
609 * tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern. 672 * tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern.
673 *
610 * @filename: The filename to check. 674 * @filename: The filename to check.
611 * @pattern: The pattern to compare. 675 * @pattern: The pattern to compare.
612 * 676 *
@@ -615,24 +679,24 @@ static bool tomoyo_file_matches_to_pattern(const char *filename,
615 * The following patterns are available. 679 * The following patterns are available.
616 * \\ \ itself. 680 * \\ \ itself.
617 * \ooo Octal representation of a byte. 681 * \ooo Octal representation of a byte.
618 * \* More than or equals to 0 character other than '/'. 682 * \* Zero or more repetitions of characters other than '/'.
619 * \@ More than or equals to 0 character other than '/' or '.'. 683 * \@ Zero or more repetitions of characters other than '/' or '.'.
620 * \? 1 byte character other than '/'. 684 * \? 1 byte character other than '/'.
621 * \$ More than or equals to 1 decimal digit. 685 * \$ One or more repetitions of decimal digits.
622 * \+ 1 decimal digit. 686 * \+ 1 decimal digit.
623 * \X More than or equals to 1 hexadecimal digit. 687 * \X One or more repetitions of hexadecimal digits.
624 * \x 1 hexadecimal digit. 688 * \x 1 hexadecimal digit.
625 * \A More than or equals to 1 alphabet character. 689 * \A One or more repetitions of alphabet characters.
626 * \a 1 alphabet character. 690 * \a 1 alphabet character.
691 *
627 * \- Subtraction operator. 692 * \- Subtraction operator.
693 *
694 * /\{dir\}/ '/' + 'One or more repetitions of dir/' (e.g. /dir/ /dir/dir/
695 * /dir/dir/dir/ ).
628 */ 696 */
629bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, 697bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
630 const struct tomoyo_path_info *pattern) 698 const struct tomoyo_path_info *pattern)
631{ 699{
632 /*
633 if (!filename || !pattern)
634 return false;
635 */
636 const char *f = filename->name; 700 const char *f = filename->name;
637 const char *p = pattern->name; 701 const char *p = pattern->name;
638 const int len = pattern->const_len; 702 const int len = pattern->const_len;
@@ -640,37 +704,15 @@ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
640 /* If @pattern doesn't contain pattern, I can use strcmp(). */ 704 /* If @pattern doesn't contain pattern, I can use strcmp(). */
641 if (!pattern->is_patterned) 705 if (!pattern->is_patterned)
642 return !tomoyo_pathcmp(filename, pattern); 706 return !tomoyo_pathcmp(filename, pattern);
643 /* Dont compare if the number of '/' differs. */ 707 /* Don't compare directory and non-directory. */
644 if (filename->depth != pattern->depth) 708 if (filename->is_dir != pattern->is_dir)
645 return false; 709 return false;
646 /* Compare the initial length without patterns. */ 710 /* Compare the initial length without patterns. */
647 if (strncmp(f, p, len)) 711 if (strncmp(f, p, len))
648 return false; 712 return false;
649 f += len; 713 f += len;
650 p += len; 714 p += len;
651 /* Main loop. Compare each directory component. */ 715 return tomoyo_path_matches_pattern2(f, p);
652 while (*f && *p) {
653 const char *f_delimiter = strchr(f, '/');
654 const char *p_delimiter = strchr(p, '/');
655 if (!f_delimiter)
656 f_delimiter = f + strlen(f);
657 if (!p_delimiter)
658 p_delimiter = p + strlen(p);
659 if (!tomoyo_file_matches_to_pattern(f, f_delimiter,
660 p, p_delimiter))
661 return false;
662 f = f_delimiter;
663 if (*f)
664 f++;
665 p = p_delimiter;
666 if (*p)
667 p++;
668 }
669 /* Ignore trailing "\*" and "\@" in @pattern. */
670 while (*p == '\\' &&
671 (*(p + 1) == '*' || *(p + 1) == '@'))
672 p += 2;
673 return !*f && !*p;
674} 716}
675 717
676/** 718/**
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 31df541911f7..92169d29b2db 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -56,9 +56,6 @@ struct tomoyo_page_buffer {
56 * (5) "is_patterned" is a bool which is true if "name" contains wildcard 56 * (5) "is_patterned" is a bool which is true if "name" contains wildcard
57 * characters, false otherwise. This allows TOMOYO to use "hash" and 57 * characters, false otherwise. This allows TOMOYO to use "hash" and
58 * strcmp() for string comparison if "is_patterned" is false. 58 * strcmp() for string comparison if "is_patterned" is false.
59 * (6) "depth" is calculated using the number of "/" characters in "name".
60 * This allows TOMOYO to avoid comparing two pathnames which never match
61 * (e.g. whether "/var/www/html/index.html" matches "/tmp/sh-thd-\$").
62 */ 59 */
63struct tomoyo_path_info { 60struct tomoyo_path_info {
64 const char *name; 61 const char *name;
@@ -66,7 +63,6 @@ struct tomoyo_path_info {
66 u16 const_len; /* = tomoyo_const_part_length(name) */ 63 u16 const_len; /* = tomoyo_const_part_length(name) */
67 bool is_dir; /* = tomoyo_strendswith(name, "/") */ 64 bool is_dir; /* = tomoyo_strendswith(name, "/") */
68 bool is_patterned; /* = tomoyo_path_contains_pattern(name) */ 65 bool is_patterned; /* = tomoyo_path_contains_pattern(name) */
69 u16 depth; /* = tomoyo_path_depth(name) */
70}; 66};
71 67
72/* 68/*
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 5f2e33263371..917f564cdab1 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -13,6 +13,8 @@
13#include <linux/mount.h> 13#include <linux/mount.h>
14#include <linux/mnt_namespace.h> 14#include <linux/mnt_namespace.h>
15#include <linux/fs_struct.h> 15#include <linux/fs_struct.h>
16#include <linux/hash.h>
17
16#include "common.h" 18#include "common.h"
17#include "realpath.h" 19#include "realpath.h"
18 20
@@ -263,7 +265,8 @@ static unsigned int tomoyo_quota_for_savename;
263 * table. Frequency of appending strings is very low. So we don't need 265 * table. Frequency of appending strings is very low. So we don't need
264 * large (e.g. 64k) hash size. 256 will be sufficient. 266 * large (e.g. 64k) hash size. 256 will be sufficient.
265 */ 267 */
266#define TOMOYO_MAX_HASH 256 268#define TOMOYO_HASH_BITS 8
269#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
267 270
268/* 271/*
269 * tomoyo_name_entry is a structure which is used for linking 272 * tomoyo_name_entry is a structure which is used for linking
@@ -315,6 +318,7 @@ const struct tomoyo_path_info *tomoyo_save_name(const char *name)
315 struct tomoyo_free_memory_block_list *fmb; 318 struct tomoyo_free_memory_block_list *fmb;
316 int len; 319 int len;
317 char *cp; 320 char *cp;
321 struct list_head *head;
318 322
319 if (!name) 323 if (!name)
320 return NULL; 324 return NULL;
@@ -325,9 +329,10 @@ const struct tomoyo_path_info *tomoyo_save_name(const char *name)
325 return NULL; 329 return NULL;
326 } 330 }
327 hash = full_name_hash((const unsigned char *) name, len - 1); 331 hash = full_name_hash((const unsigned char *) name, len - 1);
332 head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
333
328 mutex_lock(&lock); 334 mutex_lock(&lock);
329 list_for_each_entry(ptr, &tomoyo_name_list[hash % TOMOYO_MAX_HASH], 335 list_for_each_entry(ptr, head, list) {
330 list) {
331 if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name)) 336 if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name))
332 goto out; 337 goto out;
333 } 338 }
@@ -365,7 +370,7 @@ const struct tomoyo_path_info *tomoyo_save_name(const char *name)
365 tomoyo_fill_path_info(&ptr->entry); 370 tomoyo_fill_path_info(&ptr->entry);
366 fmb->ptr += len; 371 fmb->ptr += len;
367 fmb->len -= len; 372 fmb->len -= len;
368 list_add_tail(&ptr->list, &tomoyo_name_list[hash % TOMOYO_MAX_HASH]); 373 list_add_tail(&ptr->list, head);
369 if (fmb->len == 0) { 374 if (fmb->len == 0) {
370 list_del(&fmb->list); 375 list_del(&fmb->list);
371 kfree(fmb); 376 kfree(fmb);
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index 64b859925c0b..7717e01fc071 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -131,7 +131,7 @@ static int snd_pdacf_probe(struct pcmcia_device *link)
131 return err; 131 return err;
132 } 132 }
133 133
134 snd_card_set_dev(card, &handle_to_dev(link)); 134 snd_card_set_dev(card, &link->dev);
135 135
136 pdacf->index = i; 136 pdacf->index = i;
137 card_list[i] = card; 137 card_list[i] = card;
@@ -142,12 +142,10 @@ static int snd_pdacf_probe(struct pcmcia_device *link)
142 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 142 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
143 link->io.NumPorts1 = 16; 143 link->io.NumPorts1 = 16;
144 144
145 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT | IRQ_FORCED_PULSE; 145 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_FORCED_PULSE;
146 // link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 146 // link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
147 147
148 link->irq.IRQInfo1 = 0 /* | IRQ_LEVEL_ID */;
149 link->irq.Handler = pdacf_interrupt; 148 link->irq.Handler = pdacf_interrupt;
150 link->irq.Instance = pdacf;
151 link->conf.Attributes = CONF_ENABLE_IRQ; 149 link->conf.Attributes = CONF_ENABLE_IRQ;
152 link->conf.IntType = INT_MEMORY_AND_IO; 150 link->conf.IntType = INT_MEMORY_AND_IO;
153 link->conf.ConfigIndex = 1; 151 link->conf.ConfigIndex = 1;
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 1492744ad67f..7be3b3357045 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -161,11 +161,9 @@ static int snd_vxpocket_new(struct snd_card *card, int ibl,
161 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 161 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
162 link->io.NumPorts1 = 16; 162 link->io.NumPorts1 = 16;
163 163
164 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 164 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
165 165
166 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
167 link->irq.Handler = &snd_vx_irq_handler; 166 link->irq.Handler = &snd_vx_irq_handler;
168 link->irq.Instance = chip;
169 167
170 link->conf.Attributes = CONF_ENABLE_IRQ; 168 link->conf.Attributes = CONF_ENABLE_IRQ;
171 link->conf.IntType = INT_MEMORY_AND_IO; 169 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -244,7 +242,7 @@ static int vxpocket_config(struct pcmcia_device *link)
244 if (ret) 242 if (ret)
245 goto failed; 243 goto failed;
246 244
247 chip->dev = &handle_to_dev(link); 245 chip->dev = &link->dev;
248 snd_card_set_dev(chip->card, chip->dev); 246 snd_card_set_dev(chip->card, chip->dev);
249 247
250 if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0) 248 if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0)